aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/ARCMigrate/ARCMT.cpp7
-rw-r--r--clang/lib/ARCMigrate/FileRemapper.cpp6
-rw-r--r--clang/lib/ARCMigrate/Internals.h4
-rw-r--r--clang/lib/ARCMigrate/ObjCMT.cpp50
-rw-r--r--clang/lib/ARCMigrate/TransGCAttrs.cpp9
-rw-r--r--clang/lib/ARCMigrate/TransProperties.cpp26
-rw-r--r--clang/lib/ARCMigrate/TransProtectedScope.cpp3
-rw-r--r--clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp12
-rw-r--r--clang/lib/ARCMigrate/Transforms.cpp1
-rw-r--r--clang/lib/AST/APValue.cpp84
-rw-r--r--clang/lib/AST/ASTConcept.cpp13
-rw-r--r--clang/lib/AST/ASTContext.cpp940
-rw-r--r--clang/lib/AST/ASTDiagnostic.cpp5
-rw-r--r--clang/lib/AST/ASTDumper.cpp95
-rw-r--r--clang/lib/AST/ASTImporter.cpp2152
-rw-r--r--clang/lib/AST/ASTImporterLookupTable.cpp6
-rw-r--r--clang/lib/AST/ASTStructuralEquivalence.cpp84
-rw-r--r--clang/lib/AST/ASTTypeTraits.cpp34
-rw-r--r--clang/lib/AST/AttrImpl.cpp151
-rw-r--r--clang/lib/AST/CXXInheritance.cpp26
-rw-r--r--clang/lib/AST/CommentCommandTraits.cpp1
-rw-r--r--clang/lib/AST/CommentSema.cpp50
-rw-r--r--clang/lib/AST/ComparisonCategories.cpp1
-rw-r--r--clang/lib/AST/ComputeDependence.cpp796
-rw-r--r--clang/lib/AST/DataCollection.cpp2
-rw-r--r--clang/lib/AST/Decl.cpp146
-rw-r--r--clang/lib/AST/DeclBase.cpp67
-rw-r--r--clang/lib/AST/DeclCXX.cpp268
-rw-r--r--clang/lib/AST/DeclObjC.cpp29
-rw-r--r--clang/lib/AST/DeclPrinter.cpp131
-rw-r--r--clang/lib/AST/DeclTemplate.cpp52
-rw-r--r--clang/lib/AST/DeclarationName.cpp16
-rw-r--r--clang/lib/AST/Expr.cpp1164
-rw-r--r--clang/lib/AST/ExprCXX.cpp488
-rw-r--r--clang/lib/AST/ExprClassification.cpp14
-rw-r--r--clang/lib/AST/ExprConcepts.cpp215
-rw-r--r--clang/lib/AST/ExprConstant.cpp864
-rw-r--r--clang/lib/AST/ExprObjC.cpp61
-rw-r--r--clang/lib/AST/ExternalASTSource.cpp19
-rw-r--r--clang/lib/AST/FormatString.cpp3
-rw-r--r--clang/lib/AST/Interp/Boolean.h7
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.h1
-rw-r--r--clang/lib/AST/Interp/Context.cpp1
-rw-r--r--clang/lib/AST/Interp/Disasm.cpp1
-rw-r--r--clang/lib/AST/Interp/Integral.h33
-rw-r--r--clang/lib/AST/Interp/Interp.cpp2
-rw-r--r--clang/lib/AST/Interp/Interp.h2
-rw-r--r--clang/lib/AST/Interp/InterpBlock.cpp (renamed from clang/lib/AST/Interp/Block.cpp)2
-rw-r--r--clang/lib/AST/Interp/InterpBlock.h (renamed from clang/lib/AST/Interp/Block.h)2
-rw-r--r--clang/lib/AST/Interp/InterpFrame.h8
-rw-r--r--clang/lib/AST/Interp/Pointer.cpp2
-rw-r--r--clang/lib/AST/Interp/Pointer.h4
-rw-r--r--clang/lib/AST/Interp/Source.h4
-rw-r--r--clang/lib/AST/ItaniumMangle.cpp373
-rw-r--r--clang/lib/AST/JSONNodeDumper.cpp91
-rw-r--r--clang/lib/AST/Linkage.h1
-rw-r--r--clang/lib/AST/Mangle.cpp74
-rw-r--r--clang/lib/AST/MicrosoftMangle.cpp170
-rw-r--r--clang/lib/AST/NSAPI.cpp4
-rw-r--r--clang/lib/AST/NestedNameSpecifier.cpp89
-rw-r--r--clang/lib/AST/ODRHash.cpp15
-rw-r--r--clang/lib/AST/OSLog.cpp6
-rw-r--r--clang/lib/AST/OpenMPClause.cpp655
-rw-r--r--clang/lib/AST/ParentMapContext.cpp321
-rw-r--r--clang/lib/AST/PrintfFormatString.cpp4
-rw-r--r--clang/lib/AST/RawCommentList.cpp3
-rw-r--r--clang/lib/AST/RecordLayoutBuilder.cpp38
-rw-r--r--clang/lib/AST/Stmt.cpp19
-rw-r--r--clang/lib/AST/StmtOpenMP.cpp101
-rw-r--r--clang/lib/AST/StmtPrinter.cpp200
-rw-r--r--clang/lib/AST/StmtProfile.cpp152
-rw-r--r--clang/lib/AST/TemplateBase.cpp142
-rw-r--r--clang/lib/AST/TemplateName.cpp70
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp407
-rw-r--r--clang/lib/AST/Type.cpp479
-rw-r--r--clang/lib/AST/TypeLoc.cpp99
-rw-r--r--clang/lib/AST/TypePrinter.cpp114
-rw-r--r--clang/lib/AST/VTableBuilder.cpp154
-rw-r--r--clang/lib/ASTMatchers/ASTMatchFinder.cpp128
-rw-r--r--clang/lib/ASTMatchers/ASTMatchersInternal.cpp282
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp4
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Marshallers.cpp172
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Marshallers.h264
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Registry.cpp33
-rw-r--r--clang/lib/ASTMatchers/Dynamic/VariantValue.cpp11
-rw-r--r--clang/lib/ASTMatchers/GtestMatchers.cpp104
-rw-r--r--clang/lib/Analysis/AnalysisDeclContext.cpp117
-rw-r--r--clang/lib/Analysis/BodyFarm.cpp44
-rw-r--r--clang/lib/Analysis/CFG.cpp40
-rw-r--r--clang/lib/Analysis/CallGraph.cpp24
-rw-r--r--clang/lib/Analysis/CloneDetection.cpp1
-rw-r--r--clang/lib/Analysis/ExprMutationAnalyzer.cpp43
-rw-r--r--clang/lib/Analysis/LiveVariables.cpp62
-rw-r--r--clang/lib/Analysis/PathDiagnostic.cpp7
-rw-r--r--clang/lib/Analysis/PostOrderCFGView.cpp5
-rw-r--r--clang/lib/Analysis/ProgramPoint.cpp1
-rw-r--r--clang/lib/Analysis/ReachableCode.cpp4
-rw-r--r--clang/lib/Analysis/RetainSummaryManager.cpp8
-rw-r--r--clang/lib/Analysis/ThreadSafety.cpp59
-rw-r--r--clang/lib/Analysis/UninitializedValues.cpp142
-rw-r--r--clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp2
-rw-r--r--clang/lib/Basic/Attributes.cpp51
-rw-r--r--clang/lib/Basic/CodeGenOptions.cpp2
-rw-r--r--clang/lib/Basic/Cuda.cpp364
-rw-r--r--clang/lib/Basic/Diagnostic.cpp6
-rw-r--r--clang/lib/Basic/DiagnosticIDs.cpp3
-rw-r--r--clang/lib/Basic/ExpressionTraits.cpp36
-rw-r--r--clang/lib/Basic/FileManager.cpp26
-rw-r--r--clang/lib/Basic/FixedPoint.cpp136
-rw-r--r--clang/lib/Basic/IdentifierTable.cpp23
-rw-r--r--clang/lib/Basic/LangOptions.cpp22
-rw-r--r--clang/lib/Basic/Module.cpp92
-rw-r--r--clang/lib/Basic/OpenMPKinds.cpp728
-rw-r--r--clang/lib/Basic/SanitizerBlacklist.cpp7
-rw-r--r--clang/lib/Basic/SourceManager.cpp165
-rw-r--r--clang/lib/Basic/TargetInfo.cpp26
-rw-r--r--clang/lib/Basic/Targets.cpp9
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp183
-rw-r--r--clang/lib/Basic/Targets/AArch64.h28
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.cpp84
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.h51
-rw-r--r--clang/lib/Basic/Targets/ARC.h2
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp48
-rw-r--r--clang/lib/Basic/Targets/ARM.h8
-rw-r--r--clang/lib/Basic/Targets/AVR.cpp1
-rw-r--r--clang/lib/Basic/Targets/BPF.h4
-rw-r--r--clang/lib/Basic/Targets/Hexagon.cpp46
-rw-r--r--clang/lib/Basic/Targets/Hexagon.h17
-rw-r--r--clang/lib/Basic/Targets/Lanai.h2
-rw-r--r--clang/lib/Basic/Targets/MSP430.h10
-rw-r--r--clang/lib/Basic/Targets/Mips.h1
-rw-r--r--clang/lib/Basic/Targets/NVPTX.cpp7
-rw-r--r--clang/lib/Basic/Targets/NVPTX.h2
-rw-r--r--clang/lib/Basic/Targets/OSTargets.cpp4
-rw-r--r--clang/lib/Basic/Targets/OSTargets.h6
-rw-r--r--clang/lib/Basic/Targets/PNaCl.h2
-rw-r--r--clang/lib/Basic/Targets/PPC.cpp113
-rw-r--r--clang/lib/Basic/Targets/PPC.h69
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp6
-rw-r--r--clang/lib/Basic/Targets/RISCV.h5
-rw-r--r--clang/lib/Basic/Targets/SPIR.cpp2
-rw-r--r--clang/lib/Basic/Targets/SPIR.h2
-rw-r--r--clang/lib/Basic/Targets/Sparc.h3
-rw-r--r--clang/lib/Basic/Targets/SystemZ.h15
-rw-r--r--clang/lib/Basic/Targets/VE.cpp39
-rw-r--r--clang/lib/Basic/Targets/VE.h170
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.cpp25
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.h21
-rw-r--r--clang/lib/Basic/Targets/X86.cpp840
-rw-r--r--clang/lib/Basic/Targets/X86.h58
-rw-r--r--clang/lib/Basic/Targets/XCore.h2
-rw-r--r--clang/lib/Basic/TypeTraits.cpp86
-rw-r--r--clang/lib/Basic/Version.cpp51
-rw-r--r--clang/lib/Basic/Warnings.cpp5
-rw-r--r--clang/lib/Basic/XRayInstr.cpp18
-rw-r--r--clang/lib/Basic/XRayLists.cpp6
-rw-r--r--clang/lib/CodeGen/ABIInfo.h6
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp266
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp13
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp223
-rw-r--r--clang/lib/CodeGen/CGBlocks.h4
-rw-r--r--clang/lib/CodeGen/CGBuilder.h32
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp3300
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp148
-rw-r--r--clang/lib/CodeGen/CGCUDARuntime.h39
-rw-r--r--clang/lib/CodeGen/CGCXX.cpp4
-rw-r--r--clang/lib/CodeGen/CGCXXABI.cpp19
-rw-r--r--clang/lib/CodeGen/CGCXXABI.h57
-rw-r--r--clang/lib/CodeGen/CGCall.cpp853
-rw-r--r--clang/lib/CodeGen/CGCall.h34
-rw-r--r--clang/lib/CodeGen/CGClass.cpp56
-rw-r--r--clang/lib/CodeGen/CGCleanup.cpp11
-rw-r--r--clang/lib/CodeGen/CGCleanup.h23
-rw-r--r--clang/lib/CodeGen/CGCoroutine.cpp6
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp287
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.h22
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp77
-rw-r--r--clang/lib/CodeGen/CGDeclCXX.cpp194
-rw-r--r--clang/lib/CodeGen/CGException.cpp104
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp322
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp73
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp40
-rw-r--r--clang/lib/CodeGen/CGExprComplex.cpp13
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp72
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp449
-rw-r--r--clang/lib/CodeGen/CGGPUBuiltin.cpp36
-rw-r--r--clang/lib/CodeGen/CGLoopInfo.cpp11
-rw-r--r--clang/lib/CodeGen/CGLoopInfo.h2
-rw-r--r--clang/lib/CodeGen/CGNonTrivialStruct.cpp50
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp80
-rw-r--r--clang/lib/CodeGen/CGObjCGNU.cpp53
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp120
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.cpp13
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.h5
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp3650
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.h311
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp94
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h14
-rw-r--r--clang/lib/CodeGen/CGRecordLayoutBuilder.cpp25
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp113
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp1726
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp372
-rw-r--r--clang/lib/CodeGen/CGVTables.h40
-rw-r--r--clang/lib/CodeGen/CGValue.h38
-rw-r--r--clang/lib/CodeGen/CodeGenABITypes.cpp44
-rw-r--r--clang/lib/CodeGen/CodeGenAction.cpp30
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp355
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h359
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp589
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h102
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.cpp30
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.h4
-rw-r--r--clang/lib/CodeGen/CodeGenTBAA.cpp37
-rw-r--r--clang/lib/CodeGen/CodeGenTypeCache.h4
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp197
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.h8
-rw-r--r--clang/lib/CodeGen/ConstantEmitter.h2
-rw-r--r--clang/lib/CodeGen/ConstantInitBuilder.cpp22
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp151
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.h18
-rw-r--r--clang/lib/CodeGen/EHScopeStack.h14
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp367
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp69
-rw-r--r--clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp4
-rw-r--r--clang/lib/CodeGen/PatternInit.cpp12
-rw-r--r--clang/lib/CodeGen/SanitizerMetadata.cpp15
-rw-r--r--clang/lib/CodeGen/SanitizerMetadata.h2
-rw-r--r--clang/lib/CodeGen/SwiftCallingConv.cpp7
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp1734
-rw-r--r--clang/lib/CodeGen/TargetInfo.h47
-rw-r--r--clang/lib/CrossTU/CrossTranslationUnit.cpp283
-rw-r--r--clang/lib/DirectoryWatcher/DirectoryScanner.cpp2
-rw-r--r--clang/lib/DirectoryWatcher/DirectoryScanner.h2
-rw-r--r--clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp2
-rw-r--r--clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp53
-rw-r--r--clang/lib/Driver/Action.cpp7
-rw-r--r--clang/lib/Driver/Compilation.cpp34
-rw-r--r--clang/lib/Driver/Distro.cpp4
-rw-r--r--clang/lib/Driver/Driver.cpp300
-rw-r--r--clang/lib/Driver/Job.cpp80
-rw-r--r--clang/lib/Driver/Multilib.cpp8
-rw-r--r--clang/lib/Driver/SanitizerArgs.cpp293
-rw-r--r--clang/lib/Driver/Tool.cpp9
-rw-r--r--clang/lib/Driver/ToolChain.cpp238
-rw-r--r--clang/lib/Driver/ToolChains/AIX.cpp53
-rw-r--r--clang/lib/Driver/ToolChains/AIX.h7
-rw-r--r--clang/lib/Driver/ToolChains/AMDGPU.cpp483
-rw-r--r--clang/lib/Driver/ToolChains/AMDGPU.h41
-rw-r--r--clang/lib/Driver/ToolChains/AVR.cpp9
-rw-r--r--clang/lib/Driver/ToolChains/AVR.h4
-rw-r--r--clang/lib/Driver/ToolChains/Ananas.cpp8
-rw-r--r--clang/lib/Driver/ToolChains/Ananas.h9
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.cpp68
-rw-r--r--clang/lib/Driver/ToolChains/Arch/ARM.cpp59
-rw-r--r--clang/lib/Driver/ToolChains/Arch/ARM.h6
-rw-r--r--clang/lib/Driver/ToolChains/Arch/PPC.cpp22
-rw-r--r--clang/lib/Driver/ToolChains/Arch/RISCV.cpp193
-rw-r--r--clang/lib/Driver/ToolChains/Arch/SystemZ.cpp30
-rw-r--r--clang/lib/Driver/ToolChains/Arch/SystemZ.h10
-rw-r--r--clang/lib/Driver/ToolChains/Arch/VE.cpp26
-rw-r--r--clang/lib/Driver/ToolChains/Arch/VE.h33
-rw-r--r--clang/lib/Driver/ToolChains/Arch/X86.cpp44
-rw-r--r--clang/lib/Driver/ToolChains/BareMetal.cpp10
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp763
-rw-r--r--clang/lib/Driver/ToolChains/Clang.h5
-rw-r--r--clang/lib/Driver/ToolChains/CloudABI.cpp7
-rw-r--r--clang/lib/Driver/ToolChains/CloudABI.h4
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp344
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.h28
-rw-r--r--clang/lib/Driver/ToolChains/CrossWindows.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/CrossWindows.h3
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.cpp242
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.h21
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp448
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.h44
-rw-r--r--clang/lib/Driver/ToolChains/DragonFly.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/DragonFly.h8
-rw-r--r--clang/lib/Driver/ToolChains/Flang.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/FreeBSD.cpp22
-rw-r--r--clang/lib/Driver/ToolChains/FreeBSD.h22
-rw-r--r--clang/lib/Driver/ToolChains/Fuchsia.cpp27
-rw-r--r--clang/lib/Driver/ToolChains/Fuchsia.h3
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.cpp250
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.h52
-rw-r--r--clang/lib/Driver/ToolChains/HIP.cpp404
-rw-r--r--clang/lib/Driver/ToolChains/HIP.h38
-rw-r--r--clang/lib/Driver/ToolChains/Hexagon.cpp122
-rw-r--r--clang/lib/Driver/ToolChains/Hexagon.h18
-rw-r--r--clang/lib/Driver/ToolChains/Hurd.cpp48
-rw-r--r--clang/lib/Driver/ToolChains/Hurd.h4
-rw-r--r--clang/lib/Driver/ToolChains/InterfaceStubs.cpp5
-rw-r--r--clang/lib/Driver/ToolChains/Linux.cpp175
-rw-r--r--clang/lib/Driver/ToolChains/Linux.h13
-rw-r--r--clang/lib/Driver/ToolChains/MSP430.cpp7
-rw-r--r--clang/lib/Driver/ToolChains/MSP430.h7
-rw-r--r--clang/lib/Driver/ToolChains/MSVC.cpp58
-rw-r--r--clang/lib/Driver/ToolChains/MSVC.h12
-rw-r--r--clang/lib/Driver/ToolChains/MinGW.cpp53
-rw-r--r--clang/lib/Driver/ToolChains/MinGW.h8
-rw-r--r--clang/lib/Driver/ToolChains/Minix.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/Minix.h9
-rw-r--r--clang/lib/Driver/ToolChains/MipsLinux.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Myriad.cpp15
-rw-r--r--clang/lib/Driver/ToolChains/Myriad.h4
-rw-r--r--clang/lib/Driver/ToolChains/NaCl.cpp3
-rw-r--r--clang/lib/Driver/ToolChains/NaCl.h4
-rw-r--r--clang/lib/Driver/ToolChains/NetBSD.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/NetBSD.h9
-rw-r--r--clang/lib/Driver/ToolChains/OpenBSD.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/OpenBSD.h8
-rw-r--r--clang/lib/Driver/ToolChains/PS4CPU.cpp251
-rw-r--r--clang/lib/Driver/ToolChains/PS4CPU.h19
-rw-r--r--clang/lib/Driver/ToolChains/RISCVToolchain.cpp9
-rw-r--r--clang/lib/Driver/ToolChains/RISCVToolchain.h6
-rw-r--r--clang/lib/Driver/ToolChains/ROCm.h228
-rw-r--r--clang/lib/Driver/ToolChains/Solaris.cpp8
-rw-r--r--clang/lib/Driver/ToolChains/VEToolchain.cpp119
-rw-r--r--clang/lib/Driver/ToolChains/VEToolchain.h66
-rw-r--r--clang/lib/Driver/ToolChains/WebAssembly.cpp37
-rw-r--r--clang/lib/Driver/ToolChains/WebAssembly.h5
-rw-r--r--clang/lib/Driver/ToolChains/XCore.cpp6
-rw-r--r--clang/lib/Driver/Types.cpp77
-rw-r--r--clang/lib/Driver/XRayArgs.cpp298
-rw-r--r--clang/lib/Format/BreakableToken.cpp9
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp174
-rw-r--r--clang/lib/Format/ContinuationIndenter.h44
-rw-r--r--clang/lib/Format/Format.cpp271
-rw-r--r--clang/lib/Format/FormatToken.cpp5
-rw-r--r--clang/lib/Format/FormatToken.h104
-rw-r--r--clang/lib/Format/FormatTokenLexer.cpp291
-rw-r--r--clang/lib/Format/FormatTokenLexer.h20
-rw-r--r--clang/lib/Format/NamespaceEndCommentsFixer.cpp37
-rw-r--r--clang/lib/Format/SortJavaScriptImports.cpp2
-rw-r--r--clang/lib/Format/TokenAnalyzer.cpp17
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp697
-rw-r--r--clang/lib/Format/UnwrappedLineFormatter.cpp38
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp382
-rw-r--r--clang/lib/Format/UnwrappedLineParser.h9
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp187
-rw-r--r--clang/lib/Format/WhitespaceManager.h30
-rw-r--r--clang/lib/Frontend/ASTConsumers.cpp35
-rw-r--r--clang/lib/Frontend/ASTUnit.cpp32
-rw-r--r--clang/lib/Frontend/ChainedIncludesSource.cpp2
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp63
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp757
-rw-r--r--clang/lib/Frontend/CreateInvocationFromCommandLine.cpp2
-rw-r--r--clang/lib/Frontend/DependencyFile.cpp13
-rw-r--r--clang/lib/Frontend/DependencyGraph.cpp3
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp81
-rw-r--r--clang/lib/Frontend/FrontendActions.cpp20
-rw-r--r--clang/lib/Frontend/FrontendOptions.cpp3
-rw-r--r--clang/lib/Frontend/HeaderIncludeGen.cpp7
-rw-r--r--clang/lib/Frontend/InitHeaderSearch.cpp15
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp79
-rw-r--r--clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp15
-rw-r--r--clang/lib/Frontend/LogDiagnosticPrinter.cpp9
-rw-r--r--clang/lib/Frontend/ModuleDependencyCollector.cpp2
-rw-r--r--clang/lib/Frontend/PrecompiledPreamble.cpp119
-rw-r--r--clang/lib/Frontend/Rewrite/FixItRewriter.cpp6
-rw-r--r--clang/lib/Frontend/Rewrite/FrontendActions.cpp12
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp110
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteObjC.cpp105
-rw-r--r--clang/lib/Frontend/SerializedDiagnosticPrinter.cpp26
-rw-r--r--clang/lib/Frontend/TextDiagnosticBuffer.cpp8
-rw-r--r--clang/lib/Frontend/VerifyDiagnosticConsumer.cpp91
-rw-r--r--clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp50
-rw-r--r--clang/lib/Headers/__clang_cuda_cmath.h41
-rw-r--r--clang/lib/Headers/__clang_cuda_complex_builtins.h268
-rw-r--r--clang/lib/Headers/__clang_cuda_device_functions.h333
-rw-r--r--clang/lib/Headers/__clang_cuda_intrinsics.h4
-rw-r--r--clang/lib/Headers/__clang_cuda_libdevice_declares.h2
-rw-r--r--clang/lib/Headers/__clang_cuda_math.h347
-rw-r--r--clang/lib/Headers/__clang_cuda_math_forward_declares.h41
-rw-r--r--clang/lib/Headers/__clang_cuda_runtime_wrapper.h19
-rw-r--r--clang/lib/Headers/__clang_hip_libdevice_declares.h326
-rw-r--r--clang/lib/Headers/__clang_hip_math.h1185
-rw-r--r--clang/lib/Headers/__clang_hip_runtime_wrapper.h64
-rw-r--r--clang/lib/Headers/altivec.h402
-rw-r--r--clang/lib/Headers/amxintrin.h225
-rw-r--r--clang/lib/Headers/arm_acle.h16
-rw-r--r--clang/lib/Headers/avx2intrin.h2
-rw-r--r--clang/lib/Headers/avx512bwintrin.h19
-rw-r--r--clang/lib/Headers/avx512fintrin.h42
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h18
-rw-r--r--clang/lib/Headers/avx512vlintrin.h53
-rw-r--r--clang/lib/Headers/bmiintrin.h50
-rw-r--r--clang/lib/Headers/cet.h66
-rw-r--r--clang/lib/Headers/cldemoteintrin.h8
-rw-r--r--clang/lib/Headers/cpuid.h9
-rw-r--r--clang/lib/Headers/emmintrin.h6
-rw-r--r--clang/lib/Headers/immintrin.h207
-rw-r--r--clang/lib/Headers/intrin.h3
-rw-r--r--clang/lib/Headers/module.modulemap6
-rw-r--r--clang/lib/Headers/msa.h4
-rw-r--r--clang/lib/Headers/opencl-c.h698
-rw-r--r--clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h (renamed from clang/lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h)25
-rw-r--r--clang/lib/Headers/openmp_wrappers/__clang_openmp_math.h35
-rw-r--r--clang/lib/Headers/openmp_wrappers/cmath69
-rw-r--r--clang/lib/Headers/openmp_wrappers/complex25
-rw-r--r--clang/lib/Headers/openmp_wrappers/complex.h25
-rw-r--r--clang/lib/Headers/openmp_wrappers/math.h46
-rw-r--r--clang/lib/Headers/openmp_wrappers/new70
-rw-r--r--clang/lib/Headers/openmp_wrappers/time.h32
-rw-r--r--clang/lib/Headers/ppc_wrappers/emmintrin.h4
-rw-r--r--clang/lib/Headers/serializeintrin.h30
-rw-r--r--clang/lib/Headers/tsxldtrkintrin.h56
-rw-r--r--clang/lib/Headers/vecintrin.h8962
-rw-r--r--clang/lib/Headers/wasm_simd128.h1133
-rw-r--r--clang/lib/Headers/x86intrin.h27
-rw-r--r--clang/lib/Headers/xmmintrin.h54
-rw-r--r--clang/lib/Index/CommentToXML.cpp2
-rw-r--r--clang/lib/Index/FileIndexRecord.cpp1
-rw-r--r--clang/lib/Index/IndexBody.cpp2
-rw-r--r--clang/lib/Index/IndexDecl.cpp18
-rw-r--r--clang/lib/Index/IndexSymbol.cpp12
-rw-r--r--clang/lib/Index/IndexTypeSourceInfo.cpp5
-rw-r--r--clang/lib/Index/IndexingAction.cpp15
-rw-r--r--clang/lib/Index/IndexingContext.cpp11
-rw-r--r--clang/lib/Index/USRGeneration.cpp20
-rw-r--r--clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp1
-rw-r--r--clang/lib/Lex/HeaderSearch.cpp68
-rw-r--r--clang/lib/Lex/Lexer.cpp22
-rw-r--r--clang/lib/Lex/LiteralSupport.cpp111
-rw-r--r--clang/lib/Lex/ModuleMap.cpp71
-rw-r--r--clang/lib/Lex/PPCallbacks.cpp19
-rw-r--r--clang/lib/Lex/PPDirectives.cpp236
-rw-r--r--clang/lib/Lex/PPExpressions.cpp26
-rw-r--r--clang/lib/Lex/PPLexerChange.cpp9
-rw-r--r--clang/lib/Lex/PPMacroExpansion.cpp6
-rw-r--r--clang/lib/Lex/Pragma.cpp70
-rw-r--r--clang/lib/Lex/Preprocessor.cpp36
-rw-r--r--clang/lib/Lex/TokenConcatenation.cpp4
-rw-r--r--clang/lib/Parse/ParseCXXInlineMethods.cpp280
-rw-r--r--clang/lib/Parse/ParseDecl.cpp702
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp169
-rw-r--r--clang/lib/Parse/ParseExpr.cpp412
-rw-r--r--clang/lib/Parse/ParseExprCXX.cpp713
-rw-r--r--clang/lib/Parse/ParseInit.cpp25
-rw-r--r--clang/lib/Parse/ParseObjc.cpp66
-rw-r--r--clang/lib/Parse/ParseOpenMP.cpp1503
-rw-r--r--clang/lib/Parse/ParsePragma.cpp330
-rw-r--r--clang/lib/Parse/ParseStmt.cpp53
-rw-r--r--clang/lib/Parse/ParseStmtAsm.cpp166
-rw-r--r--clang/lib/Parse/ParseTemplate.cpp429
-rw-r--r--clang/lib/Parse/ParseTentative.cpp360
-rw-r--r--clang/lib/Parse/Parser.cpp137
-rw-r--r--clang/lib/Sema/AnalysisBasedWarnings.cpp67
-rw-r--r--clang/lib/Sema/CodeCompleteConsumer.cpp44
-rw-r--r--clang/lib/Sema/DeclSpec.cpp65
-rw-r--r--clang/lib/Sema/JumpDiagnostics.cpp25
-rw-r--r--clang/lib/Sema/MultiplexExternalSemaSource.cpp6
-rw-r--r--clang/lib/Sema/OpenCLBuiltins.td331
-rw-r--r--clang/lib/Sema/ParsedAttr.cpp86
-rw-r--r--clang/lib/Sema/Sema.cpp372
-rw-r--r--clang/lib/Sema/SemaAttr.cpp250
-rw-r--r--clang/lib/Sema/SemaAvailability.cpp964
-rw-r--r--clang/lib/Sema/SemaCUDA.cpp120
-rw-r--r--clang/lib/Sema/SemaCast.cpp196
-rw-r--r--clang/lib/Sema/SemaChecking.cpp2536
-rw-r--r--clang/lib/Sema/SemaCodeComplete.cpp782
-rw-r--r--clang/lib/Sema/SemaConcept.cpp433
-rw-r--r--clang/lib/Sema/SemaCoroutine.cpp140
-rw-r--r--clang/lib/Sema/SemaDecl.cpp845
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp1408
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp1221
-rw-r--r--clang/lib/Sema/SemaDeclObjC.cpp117
-rw-r--r--clang/lib/Sema/SemaExceptionSpec.cpp27
-rw-r--r--clang/lib/Sema/SemaExpr.cpp1845
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp979
-rw-r--r--clang/lib/Sema/SemaExprObjC.cpp236
-rw-r--r--clang/lib/Sema/SemaInit.cpp156
-rw-r--r--clang/lib/Sema/SemaLambda.cpp27
-rw-r--r--clang/lib/Sema/SemaLookup.cpp225
-rw-r--r--clang/lib/Sema/SemaObjCProperty.cpp617
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp3883
-rw-r--r--clang/lib/Sema/SemaOverload.cpp625
-rw-r--r--clang/lib/Sema/SemaPseudoObject.cpp107
-rw-r--r--clang/lib/Sema/SemaSYCL.cpp49
-rw-r--r--clang/lib/Sema/SemaStmt.cpp127
-rw-r--r--clang/lib/Sema/SemaStmtAsm.cpp27
-rw-r--r--clang/lib/Sema/SemaStmtAttr.cpp41
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp888
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp444
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp565
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp357
-rw-r--r--clang/lib/Sema/SemaTemplateVariadic.cpp4
-rw-r--r--clang/lib/Sema/SemaType.cpp717
-rw-r--r--clang/lib/Sema/TreeTransform.h1322
-rw-r--r--clang/lib/Sema/UsedDeclVisitor.h102
-rw-r--r--clang/lib/Serialization/ASTCommon.cpp14
-rw-r--r--clang/lib/Serialization/ASTReader.cpp2037
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp133
-rw-r--r--clang/lib/Serialization/ASTReaderStmt.cpp459
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp345
-rw-r--r--clang/lib/Serialization/ASTWriterDecl.cpp49
-rw-r--r--clang/lib/Serialization/ASTWriterStmt.cpp270
-rw-r--r--clang/lib/Serialization/GeneratePCH.cpp5
-rw-r--r--clang/lib/Serialization/GlobalModuleIndex.cpp15
-rw-r--r--clang/lib/Serialization/ModuleManager.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp80
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp12
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp22
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp14
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp81
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp19
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp623
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp10
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp357
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp16
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp12
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp299
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp1083
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp26
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp150
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp54
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp17
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp241
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp95
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp47
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp10
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp344
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h12
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp53
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/Iterator.cpp110
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/Iterator.h22
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp1410
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp138
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp21
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp14
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp1406
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp18
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp33
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp149
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp181
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp66
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp9
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp10
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp562
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp101
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h32
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp21
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h17
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp14
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp180
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SmartPtr.h33
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp80
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp189
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp26
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp2223
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp1048
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp21
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp298
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp9
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp93
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h84
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/DiagOutputUtils.h36
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp155
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp172
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h59
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp167
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp195
-rwxr-xr-xclang/lib/StaticAnalyzer/Checkers/Yaml.h1
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp66
-rw-r--r--clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/BugReporter.cpp71
-rw-r--r--clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp45
-rw-r--r--clang/lib/StaticAnalyzer/Core/CallEvent.cpp363
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp42
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerManager.cpp59
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp241
-rw-r--r--clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp23
-rw-r--r--clang/lib/StaticAnalyzer/Core/CoreEngine.cpp43
-rw-r--r--clang/lib/StaticAnalyzer/Core/DynamicSize.cpp71
-rw-r--r--clang/lib/StaticAnalyzer/Core/DynamicType.cpp206
-rw-r--r--clang/lib/StaticAnalyzer/Core/Environment.cpp12
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngine.cpp51
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp14
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp354
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp36
-rw-r--r--clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp67
-rw-r--r--clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp18
-rw-r--r--clang/lib/StaticAnalyzer/Core/LoopWidening.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Core/MemRegion.cpp296
-rw-r--r--clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp30
-rw-r--r--clang/lib/StaticAnalyzer/Core/ProgramState.cpp17
-rw-r--r--clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp842
-rw-r--r--clang/lib/StaticAnalyzer/Core/RegionStore.cpp51
-rw-r--r--clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/SValBuilder.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp13
-rw-r--r--clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Core/Store.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Core/SubEngine.cpp13
-rw-r--r--clang/lib/StaticAnalyzer/Core/SymbolManager.cpp71
-rw-r--r--clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp156
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp175
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp (renamed from clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp)62
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp494
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/CreateCheckerManager.cpp50
-rw-r--r--clang/lib/Testing/CommandLineArgs.cpp70
-rw-r--r--clang/lib/Tooling/ASTDiff/ASTDiff.cpp29
-rw-r--r--clang/lib/Tooling/AllTUsExecution.cpp6
-rw-r--r--clang/lib/Tooling/ArgumentsAdjusters.cpp11
-rw-r--r--clang/lib/Tooling/CompilationDatabase.cpp28
-rw-r--r--clang/lib/Tooling/Core/Diagnostic.cpp18
-rw-r--r--clang/lib/Tooling/Core/Lookup.cpp11
-rw-r--r--clang/lib/Tooling/Core/Replacement.cpp12
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp3
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp131
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp13
-rw-r--r--clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp101
-rw-r--r--clang/lib/Tooling/Execution.cpp10
-rw-r--r--clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp1
-rw-r--r--clang/lib/Tooling/FileMatchTrie.cpp2
-rw-r--r--clang/lib/Tooling/Inclusions/HeaderIncludes.cpp6
-rw-r--r--clang/lib/Tooling/InterpolatingCompilationDatabase.cpp9
-rw-r--r--clang/lib/Tooling/JSONCompilationDatabase.cpp2
-rw-r--r--clang/lib/Tooling/Refactoring/ASTSelection.cpp1
-rw-r--r--clang/lib/Tooling/Refactoring/AtomicChange.cpp12
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp3
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp1
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp15
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp9
-rw-r--r--clang/lib/Tooling/RefactoringCallbacks.cpp22
-rw-r--r--clang/lib/Tooling/Syntax/BuildTree.cpp1108
-rw-r--r--clang/lib/Tooling/Syntax/Mutations.cpp2
-rw-r--r--clang/lib/Tooling/Syntax/Nodes.cpp281
-rw-r--r--clang/lib/Tooling/Syntax/Tokens.cpp609
-rw-r--r--clang/lib/Tooling/Syntax/Tree.cpp23
-rw-r--r--clang/lib/Tooling/Tooling.cpp16
-rw-r--r--clang/lib/Tooling/Transformer/Parsing.cpp279
-rw-r--r--clang/lib/Tooling/Transformer/RangeSelector.cpp9
-rw-r--r--clang/lib/Tooling/Transformer/RewriteRule.cpp68
-rw-r--r--clang/lib/Tooling/Transformer/SourceCode.cpp370
-rw-r--r--clang/lib/Tooling/Transformer/Stencil.cpp66
-rw-r--r--clang/lib/Tooling/Transformer/Transformer.cpp44
702 files changed, 78149 insertions, 36342 deletions
diff --git a/clang/lib/ARCMigrate/ARCMT.cpp b/clang/lib/ARCMigrate/ARCMT.cpp
index a9018c1c4bdf..e18def8a0b19 100644
--- a/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/clang/lib/ARCMigrate/ARCMT.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "Internals.h"
+#include "clang/ARCMigrate/ARCMT.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/DiagnosticCategories.h"
#include "clang/Frontend/ASTUnit.h"
@@ -189,7 +190,7 @@ createInvocationForMigration(CompilerInvocation &origCI,
PPOpts.Includes.insert(PPOpts.Includes.begin(), OriginalFile);
PPOpts.ImplicitPCHInclude.clear();
}
- std::string define = getARCMTMacroName();
+ std::string define = std::string(getARCMTMacroName());
define += '=';
CInvok->getPreprocessorOpts().addMacroDef(define);
CInvok->getLangOpts()->ObjCAutoRefCount = true;
@@ -296,7 +297,7 @@ bool arcmt::checkForManualIssues(
for (CapturedDiagList::iterator
I = capturedDiags.begin(), E = capturedDiags.end(); I != E; ++I)
arcDiags.push_back(*I);
- writeARCDiagsToPlist(plistOut, arcDiags,
+ writeARCDiagsToPlist(std::string(plistOut), arcDiags,
Ctx.getSourceManager(), Ctx.getLangOpts());
}
@@ -598,7 +599,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
RewriteBuffer &buf = I->second;
const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
assert(file);
- std::string newFname = file->getName();
+ std::string newFname = std::string(file->getName());
newFname += "-trans";
SmallString<512> newText;
llvm::raw_svector_ostream vecOS(newText);
diff --git a/clang/lib/ARCMigrate/FileRemapper.cpp b/clang/lib/ARCMigrate/FileRemapper.cpp
index a031fe22ac13..0222583c015b 100644
--- a/clang/lib/ARCMigrate/FileRemapper.cpp
+++ b/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -43,7 +43,7 @@ std::string FileRemapper::getRemapInfoFile(StringRef outputDir) {
assert(!outputDir.empty());
SmallString<128> InfoFile = outputDir;
llvm::sys::path::append(InfoFile, "remap");
- return InfoFile.str();
+ return std::string(InfoFile.str());
}
bool FileRemapper::initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag,
@@ -56,7 +56,7 @@ bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
bool ignoreIfFilesChanged) {
assert(FromToMappings.empty() &&
"initFromDisk should be called before any remap calls");
- std::string infoFile = filePath;
+ std::string infoFile = std::string(filePath);
if (!llvm::sys::fs::exists(infoFile))
return false;
@@ -120,7 +120,7 @@ bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
using namespace llvm::sys;
std::error_code EC;
- std::string infoFile = outputPath;
+ std::string infoFile = std::string(outputPath);
llvm::raw_fd_ostream infoOut(infoFile, EC, llvm::sys::fs::OF_None);
if (EC)
return report(EC.message(), Diag);
diff --git a/clang/lib/ARCMigrate/Internals.h b/clang/lib/ARCMigrate/Internals.h
index 47fc09317500..ed0136e4867a 100644
--- a/clang/lib/ARCMigrate/Internals.h
+++ b/clang/lib/ARCMigrate/Internals.h
@@ -9,13 +9,15 @@
#ifndef LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
#define LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
-#include "clang/ARCMigrate/ARCMT.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Frontend/MigratorOptions.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include <list>
namespace clang {
+ class ASTContext;
class Sema;
class Stmt;
diff --git a/clang/lib/ARCMigrate/ObjCMT.cpp b/clang/lib/ARCMigrate/ObjCMT.cpp
index 4c6e9f2432f6..51c4a460cc25 100644
--- a/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -114,21 +114,15 @@ public:
return *Summaries;
}
- ObjCMigrateASTConsumer(StringRef migrateDir,
- unsigned astMigrateActions,
- FileRemapper &remapper,
- FileManager &fileMgr,
+ ObjCMigrateASTConsumer(StringRef migrateDir, unsigned astMigrateActions,
+ FileRemapper &remapper, FileManager &fileMgr,
const PPConditionalDirectiveRecord *PPRec,
- Preprocessor &PP,
- bool isOutputFile,
+ Preprocessor &PP, bool isOutputFile,
ArrayRef<std::string> WhiteList)
- : MigrateDir(migrateDir),
- ASTMigrateActions(astMigrateActions),
- NSIntegerTypedefed(nullptr), NSUIntegerTypedefed(nullptr),
- Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec), PP(PP),
- IsOutputFile(isOutputFile),
- FoundationIncluded(false){
-
+ : MigrateDir(migrateDir), ASTMigrateActions(astMigrateActions),
+ NSIntegerTypedefed(nullptr), NSUIntegerTypedefed(nullptr),
+ Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec), PP(PP),
+ IsOutputFile(isOutputFile), FoundationIncluded(false) {
// FIXME: StringSet should have insert(iter, iter) to use here.
for (const std::string &Val : WhiteList)
WhiteListFilenames.insert(Val);
@@ -191,12 +185,10 @@ protected:
} // end anonymous namespace
ObjCMigrateAction::ObjCMigrateAction(
- std::unique_ptr<FrontendAction> WrappedAction,
- StringRef migrateDir,
- unsigned migrateAction)
- : WrapperFrontendAction(std::move(WrappedAction)), MigrateDir(migrateDir),
- ObjCMigAction(migrateAction),
- CompInst(nullptr) {
+ std::unique_ptr<FrontendAction> WrappedAction, StringRef migrateDir,
+ unsigned migrateAction)
+ : WrapperFrontendAction(std::move(WrappedAction)), MigrateDir(migrateDir),
+ ObjCMigAction(migrateAction), CompInst(nullptr) {
if (MigrateDir.empty())
MigrateDir = "."; // user current directory if none is given.
}
@@ -533,7 +525,7 @@ static void rewriteToObjCProperty(const ObjCMethodDecl *Getter,
// after that; e.g. isContinuous will become continuous.
StringRef PropertyNameStringRef(PropertyNameString);
PropertyNameStringRef = PropertyNameStringRef.drop_front(LengthOfPrefix);
- PropertyNameString = PropertyNameStringRef;
+ PropertyNameString = std::string(PropertyNameStringRef);
bool NoLowering = (isUppercase(PropertyNameString[0]) &&
PropertyNameString.size() > 1 &&
isUppercase(PropertyNameString[1]));
@@ -994,7 +986,7 @@ static void ReplaceWithClasstype(const ObjCMigrateASTConsumer &ASTC,
if (TypeSourceInfo *TSInfo = OM->getReturnTypeSourceInfo()) {
TypeLoc TL = TSInfo->getTypeLoc();
R = SourceRange(TL.getBeginLoc(), TL.getEndLoc()); {
- ClassString = IDecl->getName();
+ ClassString = std::string(IDecl->getName());
ClassString += "*";
}
}
@@ -1320,7 +1312,7 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
if (!IDecl)
return;
- std::string StringClassName = IDecl->getName();
+ std::string StringClassName = std::string(IDecl->getName());
StringRef LoweredClassName(StringClassName);
std::string StringLoweredClassName = LoweredClassName.lower();
LoweredClassName = StringLoweredClassName;
@@ -1330,7 +1322,7 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
if (!MethodIdName)
return;
- std::string MethodName = MethodIdName->getName();
+ std::string MethodName = std::string(MethodIdName->getName());
if (OIT_Family == OIT_Singleton || OIT_Family == OIT_ReturnsSelf) {
StringRef STRefMethodName(MethodName);
size_t len = 0;
@@ -1342,7 +1334,7 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
len = strlen("default");
else
return;
- MethodName = STRefMethodName.substr(len);
+ MethodName = std::string(STRefMethodName.substr(len));
}
std::string MethodNameSubStr = MethodName.substr(0, 3);
StringRef MethodNamePrefix(MethodNameSubStr);
@@ -1351,7 +1343,7 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
size_t Ix = LoweredClassName.rfind(MethodNamePrefix);
if (Ix == StringRef::npos)
return;
- std::string ClassNamePostfix = LoweredClassName.substr(Ix);
+ std::string ClassNamePostfix = std::string(LoweredClassName.substr(Ix));
StringRef LoweredMethodName(MethodName);
std::string StringLoweredMethodName = LoweredMethodName.lower();
LoweredMethodName = StringLoweredMethodName;
@@ -2010,7 +2002,7 @@ static std::vector<std::string> getWhiteListFilenames(StringRef DirPath) {
directory_iterator DE;
for (; !EC && DI != DE; DI = DI.increment(EC)) {
if (is_regular_file(DI->path()))
- Filenames.push_back(filename(DI->path()));
+ Filenames.push_back(std::string(filename(DI->path())));
}
return Filenames;
@@ -2153,7 +2145,7 @@ private:
if (Val.getAsInteger(10, Entry.RemoveLen))
Ignore = true;
} else if (Key == "text") {
- Entry.Text = Val;
+ Entry.Text = std::string(Val);
}
}
@@ -2224,7 +2216,7 @@ static std::string applyEditsToTemp(const FileEntry *FE,
TmpOut.write(NewText.data(), NewText.size());
TmpOut.close();
- return TempPath.str();
+ return std::string(TempPath.str());
}
bool arcmt::getFileRemappingsFromFileList(
@@ -2277,7 +2269,7 @@ bool arcmt::getFileRemappingsFromFileList(
continue;
}
- remap.emplace_back(I->first->getName(), TempFile);
+ remap.emplace_back(std::string(I->first->getName()), TempFile);
}
return hasErrorOccurred;
diff --git a/clang/lib/ARCMigrate/TransGCAttrs.cpp b/clang/lib/ARCMigrate/TransGCAttrs.cpp
index 5e3162197ed1..8f5f3cff17cb 100644
--- a/clang/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/clang/lib/ARCMigrate/TransGCAttrs.cpp
@@ -231,8 +231,7 @@ static void checkAllAtProps(MigrationContext &MigrateCtx,
SmallVector<std::pair<AttributedTypeLoc, ObjCPropertyDecl *>, 4> ATLs;
bool hasWeak = false, hasStrong = false;
- ObjCPropertyDecl::PropertyAttributeKind
- Attrs = ObjCPropertyDecl::OBJC_PR_noattr;
+ ObjCPropertyAttribute::Kind Attrs = ObjCPropertyAttribute::kind_noattr;
for (IndivPropsTy::iterator
PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) {
ObjCPropertyDecl *PD = *PI;
@@ -274,7 +273,7 @@ static void checkAllAtProps(MigrationContext &MigrateCtx,
else
toAttr = "unsafe_unretained";
}
- if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
+ if (Attrs & ObjCPropertyAttribute::kind_assign)
MigrateCtx.rewritePropertyAttribute("assign", toAttr, AtLoc);
else
MigrateCtx.addPropertyAttribute(toAttr, AtLoc);
@@ -302,8 +301,8 @@ static void checkAllProps(MigrationContext &MigrateCtx,
for (unsigned i = 0, e = AllProps.size(); i != e; ++i) {
ObjCPropertyDecl *PD = AllProps[i];
if (PD->getPropertyAttributesAsWritten() &
- (ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_readonly)) {
+ (ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_readonly)) {
SourceLocation AtLoc = PD->getAtLoc();
if (AtLoc.isInvalid())
continue;
diff --git a/clang/lib/ARCMigrate/TransProperties.cpp b/clang/lib/ARCMigrate/TransProperties.cpp
index 0675fb0baeb8..cba2256ef97b 100644
--- a/clang/lib/ARCMigrate/TransProperties.cpp
+++ b/clang/lib/ARCMigrate/TransProperties.cpp
@@ -168,22 +168,22 @@ private:
}
void rewriteProperty(PropsTy &props, SourceLocation atLoc) {
- ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
+ ObjCPropertyAttribute::Kind propAttrs = getPropertyAttrs(props);
- if (propAttrs & (ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_weak))
+ if (propAttrs &
+ (ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_unsafe_unretained |
+ ObjCPropertyAttribute::kind_strong | ObjCPropertyAttribute::kind_weak))
return;
- if (propAttrs & ObjCPropertyDecl::OBJC_PR_retain) {
+ if (propAttrs & ObjCPropertyAttribute::kind_retain) {
// strong is the default.
return doPropAction(PropAction_RetainReplacedWithStrong, props, atLoc);
}
bool HasIvarAssignedAPlusOneObject = hasIvarAssignedAPlusOneObject(props);
- if (propAttrs & ObjCPropertyDecl::OBJC_PR_assign) {
+ if (propAttrs & ObjCPropertyAttribute::kind_assign) {
if (HasIvarAssignedAPlusOneObject)
return doPropAction(PropAction_AssignRemoved, props, atLoc);
return doPropAction(PropAction_AssignRewritten, props, atLoc);
@@ -287,7 +287,10 @@ private:
public:
PlusOneAssign(ObjCIvarDecl *D) : Ivar(D) {}
- bool VisitBinAssign(BinaryOperator *E) {
+ bool VisitBinaryOperator(BinaryOperator *E) {
+ if (E->getOpcode() != BO_Assign)
+ return true;
+
Expr *lhs = E->getLHS()->IgnoreParenImpCasts();
if (ObjCIvarRefExpr *RE = dyn_cast<ObjCIvarRefExpr>(lhs)) {
if (RE->getDecl() != Ivar)
@@ -354,11 +357,10 @@ private:
return ty;
}
- ObjCPropertyDecl::PropertyAttributeKind
- getPropertyAttrs(PropsTy &props) const {
+ ObjCPropertyAttribute::Kind getPropertyAttrs(PropsTy &props) const {
assert(!props.empty());
- ObjCPropertyDecl::PropertyAttributeKind
- attrs = props[0].PropD->getPropertyAttributesAsWritten();
+ ObjCPropertyAttribute::Kind attrs =
+ props[0].PropD->getPropertyAttributesAsWritten();
#ifndef NDEBUG
for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
diff --git a/clang/lib/ARCMigrate/TransProtectedScope.cpp b/clang/lib/ARCMigrate/TransProtectedScope.cpp
index 9e9e9cb7a96d..154e0b54800f 100644
--- a/clang/lib/ARCMigrate/TransProtectedScope.cpp
+++ b/clang/lib/ARCMigrate/TransProtectedScope.cpp
@@ -11,9 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "Transforms.h"
#include "Internals.h"
+#include "Transforms.h"
#include "clang/AST/ASTContext.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Sema/SemaDiagnostic.h"
using namespace clang;
diff --git a/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp b/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
index d28bd378acc1..81e67628fb1f 100644
--- a/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
+++ b/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
@@ -118,13 +118,11 @@ public:
ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *setterM = PD->getSetterMethodDecl();
if (!(setterM && setterM->isDefined())) {
- ObjCPropertyDecl::PropertyAttributeKind AttrKind =
- PD->getPropertyAttributes();
- if (AttrKind &
- (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_strong))
- SynthesizedProperties[PD] = PID;
+ ObjCPropertyAttribute::Kind AttrKind = PD->getPropertyAttributes();
+ if (AttrKind & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_strong))
+ SynthesizedProperties[PD] = PID;
}
}
}
diff --git a/clang/lib/ARCMigrate/Transforms.cpp b/clang/lib/ARCMigrate/Transforms.cpp
index 59b80a917e56..e274a540e408 100644
--- a/clang/lib/ARCMigrate/Transforms.cpp
+++ b/clang/lib/ARCMigrate/Transforms.cpp
@@ -8,6 +8,7 @@
#include "Transforms.h"
#include "Internals.h"
+#include "clang/ARCMigrate/ARCMT.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp
index 50f8d05dacb4..f3828bb54c1d 100644
--- a/clang/lib/AST/APValue.cpp
+++ b/clang/lib/AST/APValue.cpp
@@ -378,11 +378,6 @@ void APValue::swap(APValue &RHS) {
memcpy(RHS.Data.buffer, TmpData, DataSize);
}
-LLVM_DUMP_METHOD void APValue::dump() const {
- dump(llvm::errs());
- llvm::errs() << '\n';
-}
-
static double GetApproxValue(const llvm::APFloat &F) {
llvm::APFloat V = F;
bool ignored;
@@ -391,85 +386,6 @@ static double GetApproxValue(const llvm::APFloat &F) {
return V.convertToDouble();
}
-void APValue::dump(raw_ostream &OS) const {
- switch (getKind()) {
- case None:
- OS << "None";
- return;
- case Indeterminate:
- OS << "Indeterminate";
- return;
- case Int:
- OS << "Int: " << getInt();
- return;
- case Float:
- OS << "Float: " << GetApproxValue(getFloat());
- return;
- case FixedPoint:
- OS << "FixedPoint : " << getFixedPoint();
- return;
- case Vector:
- OS << "Vector: ";
- getVectorElt(0).dump(OS);
- for (unsigned i = 1; i != getVectorLength(); ++i) {
- OS << ", ";
- getVectorElt(i).dump(OS);
- }
- return;
- case ComplexInt:
- OS << "ComplexInt: " << getComplexIntReal() << ", " << getComplexIntImag();
- return;
- case ComplexFloat:
- OS << "ComplexFloat: " << GetApproxValue(getComplexFloatReal())
- << ", " << GetApproxValue(getComplexFloatImag());
- return;
- case LValue:
- OS << "LValue: <todo>";
- return;
- case Array:
- OS << "Array: ";
- for (unsigned I = 0, N = getArrayInitializedElts(); I != N; ++I) {
- getArrayInitializedElt(I).dump(OS);
- if (I != getArraySize() - 1) OS << ", ";
- }
- if (hasArrayFiller()) {
- OS << getArraySize() - getArrayInitializedElts() << " x ";
- getArrayFiller().dump(OS);
- }
- return;
- case Struct:
- OS << "Struct ";
- if (unsigned N = getStructNumBases()) {
- OS << " bases: ";
- getStructBase(0).dump(OS);
- for (unsigned I = 1; I != N; ++I) {
- OS << ", ";
- getStructBase(I).dump(OS);
- }
- }
- if (unsigned N = getStructNumFields()) {
- OS << " fields: ";
- getStructField(0).dump(OS);
- for (unsigned I = 1; I != N; ++I) {
- OS << ", ";
- getStructField(I).dump(OS);
- }
- }
- return;
- case Union:
- OS << "Union: ";
- getUnionValue().dump(OS);
- return;
- case MemberPointer:
- OS << "MemberPointer: <todo>";
- return;
- case AddrLabelDiff:
- OS << "AddrLabelDiff: <todo>";
- return;
- }
- llvm_unreachable("Unknown APValue kind!");
-}
-
void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
QualType Ty) const {
switch (getKind()) {
diff --git a/clang/lib/AST/ASTConcept.cpp b/clang/lib/AST/ASTConcept.cpp
index fc32e768d92f..549088ad4a8a 100644
--- a/clang/lib/AST/ASTConcept.cpp
+++ b/clang/lib/AST/ASTConcept.cpp
@@ -14,6 +14,10 @@
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/TemplateBase.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FoldingSet.h"
using namespace clang;
ASTConstraintSatisfaction::ASTConstraintSatisfaction(const ASTContext &C,
@@ -53,3 +57,12 @@ ASTConstraintSatisfaction::Create(const ASTContext &C,
void *Mem = C.Allocate(size, alignof(ASTConstraintSatisfaction));
return new (Mem) ASTConstraintSatisfaction(C, Satisfaction);
}
+
+void ConstraintSatisfaction::Profile(
+ llvm::FoldingSetNodeID &ID, const ASTContext &C,
+ const NamedDecl *ConstraintOwner, ArrayRef<TemplateArgument> TemplateArgs) {
+ ID.AddPointer(ConstraintOwner);
+ ID.AddInteger(TemplateArgs.size());
+ for (auto &Arg : TemplateArgs)
+ Arg.Profile(ID, C);
+}
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index a51429264dbe..2ba643f12a82 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -29,15 +29,17 @@
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RawCommentList.h"
#include "clang/AST/RecordLayout.h"
-#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
@@ -54,6 +56,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Linkage.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/SanitizerBlacklist.h"
#include "clang/Basic/SourceLocation.h"
@@ -97,34 +100,8 @@
using namespace clang;
enum FloatingRank {
- Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank
+ BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank
};
-const Expr *ASTContext::traverseIgnored(const Expr *E) const {
- return traverseIgnored(const_cast<Expr *>(E));
-}
-
-Expr *ASTContext::traverseIgnored(Expr *E) const {
- if (!E)
- return nullptr;
-
- switch (Traversal) {
- case ast_type_traits::TK_AsIs:
- return E;
- case ast_type_traits::TK_IgnoreImplicitCastsAndParentheses:
- return E->IgnoreParenImpCasts();
- case ast_type_traits::TK_IgnoreUnlessSpelledInSource:
- return E->IgnoreUnlessSpelledInSource();
- }
- llvm_unreachable("Invalid Traversal type!");
-}
-
-ast_type_traits::DynTypedNode
-ASTContext::traverseIgnored(const ast_type_traits::DynTypedNode &N) const {
- if (const auto *E = N.get<Expr>()) {
- return ast_type_traits::DynTypedNode::create(*traverseIgnored(E));
- }
- return N;
-}
/// \returns location that is relevant when searching for Doc comments related
/// to \p D.
@@ -321,6 +298,12 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile);
}
+void ASTContext::addComment(const RawComment &RC) {
+ assert(LangOpts.RetainCommentsFromSystemHeaders ||
+ !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
+ Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
+}
+
/// If we have a 'templated' declaration for a template, adjust 'D' to
/// refer to the actual template.
/// If we have an implicit instantiation, adjust 'D' to refer to template.
@@ -493,10 +476,20 @@ void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
if (Comments.empty() || Decls.empty())
return;
- // See if there are any new comments that are not attached to a decl.
- // The location doesn't have to be precise - we care only about the file.
- const FileID File =
- SourceMgr.getDecomposedLoc((*Decls.begin())->getLocation()).first;
+ FileID File;
+ for (Decl *D : Decls) {
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isValid()) {
+ // See if there are any new comments that are not attached to a decl.
+ // The location doesn't have to be precise - we care only about the file.
+ File = SourceMgr.getDecomposedLoc(Loc).first;
+ break;
+ }
+ }
+
+ if (File.isInvalid())
+ return;
+
auto CommentsInThisFile = Comments.getCommentsInFile(File);
if (!CommentsInThisFile || CommentsInThisFile->empty() ||
CommentsInThisFile->rbegin()->second->isAttached())
@@ -661,7 +654,7 @@ comments::FullComment *ASTContext::getCommentForDecl(
return FC;
}
-void
+void
ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &C,
TemplateTemplateParmDecl *Parm) {
@@ -716,6 +709,57 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
RequiresClause->Profile(ID, C, /*Canonical=*/true);
}
+static Expr *
+canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC,
+ QualType ConstrainedType) {
+ // This is a bit ugly - we need to form a new immediately-declared
+ // constraint that references the new parameter; this would ideally
+ // require semantic analysis (e.g. template<C T> struct S {}; - the
+ // converted arguments of C<T> could be an argument pack if C is
+ // declared as template<typename... T> concept C = ...).
+ // We don't have semantic analysis here so we dig deep into the
+ // ready-made constraint expr and change the thing manually.
+ ConceptSpecializationExpr *CSE;
+ if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC))
+ CSE = cast<ConceptSpecializationExpr>(Fold->getLHS());
+ else
+ CSE = cast<ConceptSpecializationExpr>(IDC);
+ ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments();
+ SmallVector<TemplateArgument, 3> NewConverted;
+ NewConverted.reserve(OldConverted.size());
+ if (OldConverted.front().getKind() == TemplateArgument::Pack) {
+ // The case:
+ // template<typename... T> concept C = true;
+ // template<C<int> T> struct S; -> constraint is C<{T, int}>
+ NewConverted.push_back(ConstrainedType);
+ for (auto &Arg : OldConverted.front().pack_elements().drop_front(1))
+ NewConverted.push_back(Arg);
+ TemplateArgument NewPack(NewConverted);
+
+ NewConverted.clear();
+ NewConverted.push_back(NewPack);
+ assert(OldConverted.size() == 1 &&
+ "Template parameter pack should be the last parameter");
+ } else {
+ assert(OldConverted.front().getKind() == TemplateArgument::Type &&
+ "Unexpected first argument kind for immediately-declared "
+ "constraint");
+ NewConverted.push_back(ConstrainedType);
+ for (auto &Arg : OldConverted.drop_front(1))
+ NewConverted.push_back(Arg);
+ }
+ Expr *NewIDC = ConceptSpecializationExpr::Create(
+ C, CSE->getNamedConcept(), NewConverted, nullptr,
+ CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack());
+
+ if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
+ NewIDC = new (C) CXXFoldExpr(OrigFold->getType(), SourceLocation(), NewIDC,
+ BinaryOperatorKind::BO_LAnd,
+ SourceLocation(), /*RHS=*/nullptr,
+ SourceLocation(), /*NumExpansions=*/None);
+ return NewIDC;
+}
+
TemplateTemplateParmDecl *
ASTContext::getCanonicalTemplateTemplateParmDecl(
TemplateTemplateParmDecl *TTP) const {
@@ -743,68 +787,23 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
TTP->isExpandedParameterPack() ?
llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None);
if (const auto *TC = TTP->getTypeConstraint()) {
- // This is a bit ugly - we need to form a new immediately-declared
- // constraint that references the new parameter; this would ideally
- // require semantic analysis (e.g. template<C T> struct S {}; - the
- // converted arguments of C<T> could be an argument pack if C is
- // declared as template<typename... T> concept C = ...).
- // We don't have semantic analysis here so we dig deep into the
- // ready-made constraint expr and change the thing manually.
- Expr *IDC = TC->getImmediatelyDeclaredConstraint();
- ConceptSpecializationExpr *CSE;
- if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC))
- CSE = cast<ConceptSpecializationExpr>(Fold->getLHS());
- else
- CSE = cast<ConceptSpecializationExpr>(IDC);
- ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments();
- SmallVector<TemplateArgument, 3> NewConverted;
- NewConverted.reserve(OldConverted.size());
-
QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0);
- if (OldConverted.front().getKind() == TemplateArgument::Pack) {
- // The case:
- // template<typename... T> concept C = true;
- // template<C<int> T> struct S; -> constraint is C<{T, int}>
- NewConverted.push_back(ParamAsArgument);
- for (auto &Arg : OldConverted.front().pack_elements().drop_front(1))
- NewConverted.push_back(Arg);
- TemplateArgument NewPack(NewConverted);
-
- NewConverted.clear();
- NewConverted.push_back(NewPack);
- assert(OldConverted.size() == 1 &&
- "Template parameter pack should be the last parameter");
- } else {
- assert(OldConverted.front().getKind() == TemplateArgument::Type &&
- "Unexpected first argument kind for immediately-declared "
- "constraint");
- NewConverted.push_back(ParamAsArgument);
- for (auto &Arg : OldConverted.drop_front(1))
- NewConverted.push_back(Arg);
- }
- Expr *NewIDC = ConceptSpecializationExpr::Create(*this,
- NestedNameSpecifierLoc(), /*TemplateKWLoc=*/SourceLocation(),
- CSE->getConceptNameInfo(), /*FoundDecl=*/CSE->getNamedConcept(),
- CSE->getNamedConcept(),
- // Actually canonicalizing a TemplateArgumentLoc is difficult so we
- // simply omit the ArgsAsWritten
- /*ArgsAsWritten=*/nullptr, NewConverted, nullptr);
-
- if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
- NewIDC = new (*this) CXXFoldExpr(OrigFold->getType(),
- SourceLocation(), NewIDC,
- BinaryOperatorKind::BO_LAnd,
- SourceLocation(), /*RHS=*/nullptr,
- SourceLocation(),
- /*NumExpansions=*/None);
-
+ Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint(
+ *this, TC->getImmediatelyDeclaredConstraint(),
+ ParamAsArgument);
+ TemplateArgumentListInfo CanonArgsAsWritten;
+ if (auto *Args = TC->getTemplateArgsAsWritten())
+ for (const auto &ArgLoc : Args->arguments())
+ CanonArgsAsWritten.addArgument(
+ TemplateArgumentLoc(ArgLoc.getArgument(),
+ TemplateArgumentLocInfo()));
NewTTP->setTypeConstraint(
NestedNameSpecifierLoc(),
DeclarationNameInfo(TC->getNamedConcept()->getDeclName(),
SourceLocation()), /*FoundDecl=*/nullptr,
// Actually canonicalizing a TemplateArgumentLoc is difficult so we
// simply omit the ArgsAsWritten
- CSE->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC);
+ TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC);
}
CanonParams.push_back(NewTTP);
} else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
@@ -839,6 +838,13 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
NTTP->isParameterPack(),
TInfo);
}
+ if (AutoType *AT = T->getContainedAutoType()) {
+ if (AT->isConstrained()) {
+ Param->setPlaceholderTypeConstraint(
+ canonicalizeImmediatelyDeclaredConstraint(
+ *this, NTTP->getPlaceholderTypeConstraint(), T));
+ }
+ }
CanonParams.push_back(Param);
} else
@@ -886,6 +892,7 @@ CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::XL:
return CreateItaniumCXXABI(*this);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(*this);
@@ -900,6 +907,12 @@ interp::Context &ASTContext::getInterpContext() {
return *InterpContext.get();
}
+ParentMapContext &ASTContext::getParentMapContext() {
+ if (!ParentMapCtx)
+ ParentMapCtx.reset(new ParentMapContext(*this));
+ return *ParentMapCtx.get();
+}
+
static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
const LangOptions &LOpts) {
if (LOpts.FakeAddressSpaceMap) {
@@ -943,7 +956,7 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
Builtin::Context &builtins)
: ConstantArrayTypes(this_()), FunctionProtoTypes(this_()),
TemplateSpecializationTypes(this_()),
- DependentTemplateSpecializationTypes(this_()),
+ DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
SubstTemplateTemplateParmPacks(this_()),
CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
@@ -995,80 +1008,9 @@ ASTContext::~ASTContext() {
Value->~APValue();
}
-class ASTContext::ParentMap {
- /// Contains parents of a node.
- using ParentVector = llvm::SmallVector<ast_type_traits::DynTypedNode, 2>;
-
- /// Maps from a node to its parents. This is used for nodes that have
- /// pointer identity only, which are more common and we can save space by
- /// only storing a unique pointer to them.
- using ParentMapPointers = llvm::DenseMap<
- const void *,
- llvm::PointerUnion<const Decl *, const Stmt *,
- ast_type_traits::DynTypedNode *, ParentVector *>>;
-
- /// Parent map for nodes without pointer identity. We store a full
- /// DynTypedNode for all keys.
- using ParentMapOtherNodes = llvm::DenseMap<
- ast_type_traits::DynTypedNode,
- llvm::PointerUnion<const Decl *, const Stmt *,
- ast_type_traits::DynTypedNode *, ParentVector *>>;
-
- ParentMapPointers PointerParents;
- ParentMapOtherNodes OtherParents;
- class ASTVisitor;
-
- static ast_type_traits::DynTypedNode
- getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
- if (const auto *D = U.dyn_cast<const Decl *>())
- return ast_type_traits::DynTypedNode::create(*D);
- if (const auto *S = U.dyn_cast<const Stmt *>())
- return ast_type_traits::DynTypedNode::create(*S);
- return *U.get<ast_type_traits::DynTypedNode *>();
- }
-
- template <typename NodeTy, typename MapTy>
- static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
- const MapTy &Map) {
- auto I = Map.find(Node);
- if (I == Map.end()) {
- return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
- }
- if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
- return llvm::makeArrayRef(*V);
- }
- return getSingleDynTypedNodeFromParentMap(I->second);
- }
-
-public:
- ParentMap(ASTContext &Ctx);
- ~ParentMap() {
- for (const auto &Entry : PointerParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
- }
- }
- for (const auto &Entry : OtherParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
- }
- }
- }
-
- DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node) {
- if (Node.getNodeKind().hasPointerIdentity())
- return getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
- return getDynNodeFromMap(Node, OtherParents);
- }
-};
-
void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
TraversalScope = TopLevelDecls;
- Parents.clear();
+ getParentMapContext().clear();
}
void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
@@ -1163,6 +1105,15 @@ void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
}
+ArrayRef<Module *>
+ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
+ auto MergedIt =
+ MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
+ if (MergedIt == MergedDefModules.end())
+ return None;
+ return MergedIt->second;
+}
+
void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
if (LazyInitializers.empty())
return;
@@ -1432,8 +1383,13 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
// Placeholder type for OMP array sections.
- if (LangOpts.OpenMP)
+ if (LangOpts.OpenMP) {
InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
+ InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
+ InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
+ }
+ if (LangOpts.MatrixTypes)
+ InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
// C99 6.2.5p11.
FloatComplexTy = getComplexType(FloatTy);
@@ -1492,8 +1448,16 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
// half type (OpenCL 6.1.1.1) / ARM NEON __fp16
InitBuiltinType(HalfTy, BuiltinType::Half);
+ InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
+
// Builtin type used to help define __builtin_va_list.
VaListTagDecl = nullptr;
+
+ // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
+ if (LangOpts.MicrosoftExt || LangOpts.Borland) {
+ MSGuidTagDecl = buildImplicitRecord("_GUID");
+ TUDecl->addDecl(MSGuidTagDecl);
+ }
}
DiagnosticsEngine &ASTContext::getDiagnostics() const {
@@ -1666,7 +1630,8 @@ void ASTContext::getOverriddenMethods(
}
void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
- assert(!Import->NextLocalImport && "Import declaration already in the chain");
+ assert(!Import->getNextLocalImport() &&
+ "Import declaration already in the chain");
assert(!Import->isFromASTFile() && "Non-local import declaration");
if (!FirstLocalImport) {
FirstLocalImport = Import;
@@ -1674,7 +1639,7 @@ void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
return;
}
- LastLocalImport->NextLocalImport = Import;
+ LastLocalImport->setNextLocalImport(Import);
LastLocalImport = Import;
}
@@ -1688,6 +1653,8 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
switch (T->castAs<BuiltinType>()->getKind()) {
default:
llvm_unreachable("Not a floating point type!");
+ case BuiltinType::BFloat16:
+ return Target->getBFloat16Format();
case BuiltinType::Float16:
case BuiltinType::Half:
return Target->getHalfFormat();
@@ -1800,6 +1767,10 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
return toCharUnitsFromBits(Align);
}
+CharUnits ASTContext::getExnObjectAlignment() const {
+ return toCharUnitsFromBits(Target->getExnObjectAlignment());
+}
+
// getTypeInfoDataSizeInChars - Return the size of a type, in
// chars. If the type is a record, its data size is returned. This is
// the size of the memcpy that's performed when assigning this type
@@ -1930,24 +1901,24 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::IncompleteArray:
case Type::VariableArray:
- Width = 0;
- Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
- break;
-
case Type::ConstantArray: {
- const auto *CAT = cast<ConstantArrayType>(T);
+ // Model non-constant sized arrays as size zero, but track the alignment.
+ uint64_t Size = 0;
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
+ Size = CAT->getSize().getZExtValue();
- TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
- uint64_t Size = CAT->getSize().getZExtValue();
+ TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
"Overflow in array type bit size evaluation");
Width = EltInfo.Width * Size;
Align = EltInfo.Align;
+ AlignIsRequired = EltInfo.AlignIsRequired;
if (!getTargetInfo().getCXXABI().isMicrosoft() ||
getTargetInfo().getPointerWidth(0) == 64)
Width = llvm::alignTo(Width, Align);
break;
}
+
case Type::ExtVector:
case Type::Vector: {
const auto *VT = cast<VectorType>(T);
@@ -1967,6 +1938,17 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
}
+ case Type::ConstantMatrix: {
+ const auto *MT = cast<ConstantMatrixType>(T);
+ TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
+ // The internal layout of a matrix value is implementation defined.
+ // Initially be ABI compatible with arrays with respect to alignment and
+ // size.
+ Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
+ Align = ElementInfo.Align;
+ break;
+ }
+
case Type::Builtin:
switch (cast<BuiltinType>(T)->getKind()) {
default: llvm_unreachable("Unknown builtin type!");
@@ -2067,6 +2049,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Width = Target->getLongFractWidth();
Align = Target->getLongFractAlign();
break;
+ case BuiltinType::BFloat16:
+ Width = Target->getBFloat16Width();
+ Align = Target->getBFloat16Align();
+ break;
case BuiltinType::Float16:
case BuiltinType::Half:
if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
@@ -2145,16 +2131,17 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
// Because the length is only known at runtime, we use a dummy value
// of 0 for the static length. The alignment values are those defined
// by the Procedure Call Standard for the Arm Architecture.
-#define SVE_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, IsSigned, IsFP)\
- case BuiltinType::Id: \
- Width = 0; \
- Align = 128; \
- break;
-#define SVE_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
- case BuiltinType::Id: \
- Width = 0; \
- Align = 16; \
- break;
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
+ IsSigned, IsFP, IsBF) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 128; \
+ break;
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 16; \
+ break;
#include "clang/Basic/AArch64SVEACLETypes.def"
}
break;
@@ -2202,11 +2189,25 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
case Type::ObjCInterface: {
const auto *ObjCI = cast<ObjCInterfaceType>(T);
+ if (ObjCI->getDecl()->isInvalidDecl()) {
+ Width = 8;
+ Align = 8;
+ break;
+ }
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
Width = toBits(Layout.getSize());
Align = toBits(Layout.getAlignment());
break;
}
+ case Type::ExtInt: {
+ const auto *EIT = cast<ExtIntType>(T);
+ Align =
+ std::min(static_cast<unsigned>(std::max(
+ getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))),
+ Target->getLongLongAlign());
+ Width = llvm::alignTo(EIT->getNumBits(), Align);
+ break;
+ }
case Type::Record:
case Type::Enum: {
const auto *TT = cast<TagType>(T);
@@ -3383,6 +3384,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::DependentVector:
case Type::ExtVector:
case Type::DependentSizedExtVector:
+ case Type::ConstantMatrix:
+ case Type::DependentSizedMatrix:
case Type::DependentAddressSpace:
case Type::ObjCObject:
case Type::ObjCInterface:
@@ -3403,6 +3406,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
+ case Type::ExtInt:
+ case Type::DependentExtInt:
llvm_unreachable("type should never be variably-modified");
// These types can be variably-modified but should never need to
@@ -3629,6 +3634,33 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType,
return QualType(newType, 0);
}
+/// getScalableVectorType - Return the unique reference to a scalable vector
+/// type of the specified element type and size. VectorType must be a built-in
+/// type.
+QualType ASTContext::getScalableVectorType(QualType EltTy,
+ unsigned NumElts) const {
+ if (Target->hasAArch64SVETypes()) {
+ uint64_t EltTySize = getTypeSize(EltTy);
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
+ IsSigned, IsFP, IsBF) \
+ if (!EltTy->isBooleanType() && \
+ ((EltTy->hasIntegerRepresentation() && \
+ EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
+ (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
+ IsFP && !IsBF) || \
+ (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
+ IsBF && !IsFP)) && \
+ EltTySize == ElBits && NumElts == NumEls) { \
+ return SingletonId; \
+ }
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
+ if (EltTy->isBooleanType() && NumElts == NumEls) \
+ return SingletonId;
+#include "clang/Basic/AArch64SVEACLETypes.def"
+ }
+ return QualType();
+}
+
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
@@ -3688,10 +3720,10 @@ ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
(void)CanonCheck;
DependentVectorTypes.InsertNode(New, InsertPos);
} else {
- QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
- SourceLocation());
+ QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
+ SourceLocation(), VecKind);
New = new (*this, TypeAlignment) DependentVectorType(
- *this, VecType, CanonExtTy, SizeExpr, AttrLoc, VecKind);
+ *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
}
}
@@ -3772,6 +3804,78 @@ ASTContext::getDependentSizedExtVectorType(QualType vecType,
return QualType(New, 0);
}
+QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
+ unsigned NumColumns) const {
+ llvm::FoldingSetNodeID ID;
+ ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
+ Type::ConstantMatrix);
+
+ assert(MatrixType::isValidElementType(ElementTy) &&
+ "need a valid element type");
+ assert(ConstantMatrixType::isDimensionValid(NumRows) &&
+ ConstantMatrixType::isDimensionValid(NumColumns) &&
+ "need valid matrix dimensions");
+ void *InsertPos = nullptr;
+ if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(MTP, 0);
+
+ QualType Canonical;
+ if (!ElementTy.isCanonical()) {
+ Canonical =
+ getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns);
+
+ ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!NewIP && "Matrix type shouldn't already exist in the map");
+ (void)NewIP;
+ }
+
+ auto *New = new (*this, TypeAlignment)
+ ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
+ MatrixTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
+ Expr *RowExpr,
+ Expr *ColumnExpr,
+ SourceLocation AttrLoc) const {
+ QualType CanonElementTy = getCanonicalType(ElementTy);
+ llvm::FoldingSetNodeID ID;
+ DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr,
+ ColumnExpr);
+
+ void *InsertPos = nullptr;
+ DependentSizedMatrixType *Canon =
+ DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Canon) {
+ Canon = new (*this, TypeAlignment) DependentSizedMatrixType(
+ *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc);
+#ifndef NDEBUG
+ DependentSizedMatrixType *CanonCheck =
+ DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
+#endif
+ DependentSizedMatrixTypes.InsertNode(Canon, InsertPos);
+ Types.push_back(Canon);
+ }
+
+ // Already have a canonical version of the matrix type
+ //
+ // If it exactly matches the requested type, use it directly.
+ if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
+ Canon->getRowExpr() == ColumnExpr)
+ return QualType(Canon, 0);
+
+ // Use Canon as the canonical type for newly-built type.
+ DependentSizedMatrixType *New = new (*this, TypeAlignment)
+ DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr,
+ ColumnExpr, AttrLoc);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
Expr *AddrSpaceExpr,
SourceLocation AttrLoc) const {
@@ -4075,6 +4179,39 @@ QualType ASTContext::getWritePipeType(QualType T) const {
return getPipeType(T, false);
}
+QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const {
+ llvm::FoldingSetNodeID ID;
+ ExtIntType::Profile(ID, IsUnsigned, NumBits);
+
+ void *InsertPos = nullptr;
+ if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(EIT, 0);
+
+ auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits);
+ ExtIntTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType ASTContext::getDependentExtIntType(bool IsUnsigned,
+ Expr *NumBitsExpr) const {
+ assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
+ llvm::FoldingSetNodeID ID;
+ DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
+
+ void *InsertPos = nullptr;
+ if (DependentExtIntType *Existing =
+ DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(Existing, 0);
+
+ auto *New = new (*this, TypeAlignment)
+ DependentExtIntType(*this, IsUnsigned, NumBitsExpr);
+ DependentExtIntTypes.InsertNode(New, InsertPos);
+
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
#ifndef NDEBUG
static bool NeedsInjectedClassNameType(const RecordDecl *D) {
if (!isa<CXXRecordDecl>(D)) return false;
@@ -4587,7 +4724,7 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
} else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
Expr *E = new (*this) DeclRefExpr(
*this, NTTP, /*enclosing*/ false,
- NTTP->getType().getNonLValueExprType(*this),
+ NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this),
Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
if (NTTP->isParameterPack())
@@ -4859,7 +4996,7 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
ArrayRef<ObjCProtocolDecl *> protocols) const {
// Look in the folding set for an existing type.
llvm::FoldingSetNodeID ID;
- ObjCTypeParamType::Profile(ID, Decl, protocols);
+ ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols);
void *InsertPos = nullptr;
if (ObjCTypeParamType *TypeParam =
ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
@@ -4885,6 +5022,17 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
return QualType(newType, 0);
}
+void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
+ ObjCTypeParamDecl *New) const {
+ New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType()));
+ // Update TypeForDecl after updating TypeSourceInfo.
+ auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl());
+ SmallVector<ObjCProtocolDecl *, 8> protocols;
+ protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end());
+ QualType UpdatedTy = getObjCTypeParamType(New, protocols);
+ New->setTypeForDecl(UpdatedTy.getTypePtr());
+}
+
/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
/// protocol list adopt all protocols in QT's qualified-id protocol
/// list.
@@ -5124,21 +5272,33 @@ QualType ASTContext::getUnaryTransformType(QualType BaseType,
/// getAutoType - Return the uniqued reference to the 'auto' type which has been
/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
/// canonical deduced-but-dependent 'auto' type.
-QualType ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
- bool IsDependent, bool IsPack) const {
+QualType
+ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
+ bool IsDependent, bool IsPack,
+ ConceptDecl *TypeConstraintConcept,
+ ArrayRef<TemplateArgument> TypeConstraintArgs) const {
assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
- if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
+ if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
+ !TypeConstraintConcept && !IsDependent)
return getAutoDeductType();
// Look in the folding set for an existing type.
void *InsertPos = nullptr;
llvm::FoldingSetNodeID ID;
- AutoType::Profile(ID, DeducedType, Keyword, IsDependent, IsPack);
+ AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent,
+ TypeConstraintConcept, TypeConstraintArgs);
if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(AT, 0);
- auto *AT = new (*this, TypeAlignment)
- AutoType(DeducedType, Keyword, IsDependent, IsPack);
+ void *Mem = Allocate(sizeof(AutoType) +
+ sizeof(TemplateArgument) * TypeConstraintArgs.size(),
+ TypeAlignment);
+ auto *AT = new (Mem) AutoType(
+ DeducedType, Keyword,
+ (IsDependent ? TypeDependence::DependentInstantiation
+ : TypeDependence::None) |
+ (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
+ TypeConstraintConcept, TypeConstraintArgs);
Types.push_back(AT);
if (InsertPos)
AutoTypes.InsertNode(AT, InsertPos);
@@ -5198,10 +5358,11 @@ QualType ASTContext::getAtomicType(QualType T) const {
/// getAutoDeductType - Get type pattern for deducing against 'auto'.
QualType ASTContext::getAutoDeductType() const {
if (AutoDeductTy.isNull())
- AutoDeductTy = QualType(
- new (*this, TypeAlignment) AutoType(QualType(), AutoTypeKeyword::Auto,
- /*dependent*/false, /*pack*/false),
- 0);
+ AutoDeductTy = QualType(new (*this, TypeAlignment)
+ AutoType(QualType(), AutoTypeKeyword::Auto,
+ TypeDependence::None,
+ /*concept*/ nullptr, /*args*/ {}),
+ 0);
return AutoDeductTy;
}
@@ -5837,6 +5998,7 @@ static FloatingRank getFloatingRank(QualType T) {
case BuiltinType::Double: return DoubleRank;
case BuiltinType::LongDouble: return LongDoubleRank;
case BuiltinType::Float128: return Float128Rank;
+ case BuiltinType::BFloat16: return BFloat16Rank;
}
}
@@ -5849,6 +6011,7 @@ QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
FloatingRank EltRank = getFloatingRank(Size);
if (Domain->isComplexType()) {
switch (EltRank) {
+ case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported");
case Float16Rank:
case HalfRank: llvm_unreachable("Complex half is not supported");
case FloatRank: return FloatComplexTy;
@@ -5861,6 +6024,7 @@ QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
assert(Domain->isRealFloatingType() && "Unknown domain!");
switch (EltRank) {
case Float16Rank: return HalfTy;
+ case BFloat16Rank: return BFloat16Ty;
case HalfRank: return HalfTy;
case FloatRank: return FloatTy;
case DoubleRank: return DoubleTy;
@@ -5897,6 +6061,11 @@ int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
unsigned ASTContext::getIntegerRank(const Type *T) const {
assert(T->isCanonicalUnqualified() && "T should be canonicalized");
+ // Results in this 'losing' to any type of the same size, but winning if
+ // larger.
+ if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ return 0 + (EIT->getNumBits() << 3);
+
switch (cast<BuiltinType>(T)->getKind()) {
default: llvm_unreachable("getIntegerRank(): not a built-in integer");
case BuiltinType::Bool:
@@ -6287,39 +6456,39 @@ QualType ASTContext::getBlockDescriptorExtendedType() const {
return getTagDeclType(BlockDescriptorExtendedType);
}
-TargetInfo::OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
+OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
const auto *BT = dyn_cast<BuiltinType>(T);
if (!BT) {
if (isa<PipeType>(T))
- return TargetInfo::OCLTK_Pipe;
+ return OCLTK_Pipe;
- return TargetInfo::OCLTK_Default;
+ return OCLTK_Default;
}
switch (BT->getKind()) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id: \
- return TargetInfo::OCLTK_Image;
+ return OCLTK_Image;
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLClkEvent:
- return TargetInfo::OCLTK_ClkEvent;
+ return OCLTK_ClkEvent;
case BuiltinType::OCLEvent:
- return TargetInfo::OCLTK_Event;
+ return OCLTK_Event;
case BuiltinType::OCLQueue:
- return TargetInfo::OCLTK_Queue;
+ return OCLTK_Queue;
case BuiltinType::OCLReserveID:
- return TargetInfo::OCLTK_ReserveID;
+ return OCLTK_ReserveID;
case BuiltinType::OCLSampler:
- return TargetInfo::OCLTK_Sampler;
+ return OCLTK_Sampler;
default:
- return TargetInfo::OCLTK_Default;
+ return OCLTK_Default;
}
}
@@ -6392,6 +6561,24 @@ bool ASTContext::getByrefLifetime(QualType Ty,
return true;
}
+CanQualType ASTContext::getNSUIntegerType() const {
+ assert(Target && "Expected target to be initialized");
+ const llvm::Triple &T = Target->getTriple();
+ // Windows is LLP64 rather than LP64
+ if (T.isOSWindows() && T.isArch64Bit())
+ return UnsignedLongLongTy;
+ return UnsignedLongTy;
+}
+
+CanQualType ASTContext::getNSIntegerType() const {
+ assert(Target && "Expected target to be initialized");
+ const llvm::Triple &T = Target->getTriple();
+ // Windows is LLP64 rather than LP64
+ if (T.isOSWindows() && T.isArch64Bit())
+ return LongLongTy;
+ return LongTy;
+}
+
TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
if (!ObjCInstanceTypeDecl)
ObjCInstanceTypeDecl =
@@ -6695,11 +6882,11 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
if (PD->isReadOnly()) {
S += ",R";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
S += ",C";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
S += ",&";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
S += ",W";
} else {
switch (PD->getSetterKind()) {
@@ -6715,15 +6902,15 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
if (Dynamic)
S += ",D";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
S += ",N";
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
S += ",G";
S += PD->getGetterName().getAsString();
}
- if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
S += ",S";
S += PD->getSetterName().getAsString();
}
@@ -6815,6 +7002,7 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
case BuiltinType::LongDouble: return 'D';
case BuiltinType::NullPtr: return '*'; // like char*
+ case BuiltinType::BFloat16:
case BuiltinType::Float16:
case BuiltinType::Float128:
case BuiltinType::Half:
@@ -7255,6 +7443,11 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
*NotEncodedT = T;
return;
+ case Type::ConstantMatrix:
+ if (NotEncodedT)
+ *NotEncodedT = T;
+ return;
+
// We could see an undeduced auto type here during error recovery.
// Just ignore it.
case Type::Auto:
@@ -7262,6 +7455,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
case Type::Pipe:
+ case Type::ExtInt:
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
#define DEPENDENT_TYPE(KIND, BASE) \
@@ -7783,6 +7977,57 @@ CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
+static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
+ // typedef struct __va_list_tag {
+ RecordDecl *VaListTagDecl;
+ VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
+ VaListTagDecl->startDefinition();
+
+ const size_t NumFields = 3;
+ QualType FieldTypes[NumFields];
+ const char *FieldNames[NumFields];
+
+ // void *CurrentSavedRegisterArea;
+ FieldTypes[0] = Context->getPointerType(Context->VoidTy);
+ FieldNames[0] = "__current_saved_reg_area_pointer";
+
+ // void *SavedRegAreaEnd;
+ FieldTypes[1] = Context->getPointerType(Context->VoidTy);
+ FieldNames[1] = "__saved_reg_area_end_pointer";
+
+ // void *OverflowArea;
+ FieldTypes[2] = Context->getPointerType(Context->VoidTy);
+ FieldNames[2] = "__overflow_area_pointer";
+
+ // Create fields
+ for (unsigned i = 0; i < NumFields; ++i) {
+ FieldDecl *Field = FieldDecl::Create(
+ const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i],
+ /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false, ICIS_NoInit);
+ Field->setAccess(AS_public);
+ VaListTagDecl->addDecl(Field);
+ }
+ VaListTagDecl->completeDefinition();
+ Context->VaListTagDecl = VaListTagDecl;
+ QualType VaListTagType = Context->getRecordType(VaListTagDecl);
+
+ // } __va_list_tag;
+ TypedefDecl *VaListTagTypedefDecl =
+ Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
+
+ QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl);
+
+ // typedef __va_list_tag __builtin_va_list[1];
+ llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
+ QualType VaListTagArrayType = Context->getConstantArrayType(
+ VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0);
+
+ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
+}
+
static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
TargetInfo::BuiltinVaListKind Kind) {
switch (Kind) {
@@ -7802,6 +8047,8 @@ static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
return CreateAAPCSABIBuiltinVaListDecl(Context);
case TargetInfo::SystemZBuiltinVaList:
return CreateSystemZBuiltinVaListDecl(Context);
+ case TargetInfo::HexagonBuiltinVaList:
+ return CreateHexagonBuiltinVaListDecl(Context);
}
llvm_unreachable("Unhandled __builtin_va_list type kind");
@@ -8080,6 +8327,16 @@ static bool areCompatVectorTypes(const VectorType *LHS,
LHS->getNumElements() == RHS->getNumElements();
}
+/// areCompatMatrixTypes - Return true if the two specified matrix types are
+/// compatible.
+static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
+ const ConstantMatrixType *RHS) {
+ assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
+ return LHS->getElementType() == RHS->getElementType() &&
+ LHS->getNumRows() == RHS->getNumRows() &&
+ LHS->getNumColumns() == RHS->getNumColumns();
+}
+
bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
QualType SecondVec) {
assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
@@ -8362,10 +8619,18 @@ bool ASTContext::canAssignObjCInterfacesInBlockPointer(
RHSOPT->isObjCQualifiedIdType());
}
- if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType())
- return finish(ObjCQualifiedIdTypesAreCompatible(
- (BlockReturnType ? LHSOPT : RHSOPT),
- (BlockReturnType ? RHSOPT : LHSOPT), false));
+ if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
+ if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
+ // Use for block parameters previous type checking for compatibility.
+ return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) ||
+ // Or corrected type checking as in non-compat mode.
+ (!BlockReturnType &&
+ ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false)));
+ else
+ return finish(ObjCQualifiedIdTypesAreCompatible(
+ (BlockReturnType ? LHSOPT : RHSOPT),
+ (BlockReturnType ? RHSOPT : LHSOPT), false));
+ }
const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
@@ -8716,8 +8981,8 @@ bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
return canAssignObjCInterfaces(
- getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(),
- getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>());
+ getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(),
+ getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>());
}
/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
@@ -8783,8 +9048,8 @@ QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
}
QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
- bool OfBlockPointer,
- bool Unqualified) {
+ bool OfBlockPointer, bool Unqualified,
+ bool AllowCXX) {
const auto *lbase = lhs->castAs<FunctionType>();
const auto *rbase = rhs->castAs<FunctionType>();
const auto *lproto = dyn_cast<FunctionProtoType>(lbase);
@@ -8858,7 +9123,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
if (lproto && rproto) { // two C99 style function prototypes
- assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
+ assert((AllowCXX ||
+ (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
"C++ shouldn't be here");
// Compatible functions must have the same number of parameters
if (lproto->getNumParams() != rproto->getNumParams())
@@ -8922,7 +9188,7 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
const FunctionProtoType *proto = lproto ? lproto : rproto;
if (proto) {
- assert(!proto->hasExceptionSpec() && "C++ shouldn't be here");
+ assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
if (proto->isVariadic())
return {};
// Check that the types are compatible with the types that
@@ -9276,6 +9542,11 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
RHSCan->castAs<VectorType>()))
return LHS;
return {};
+ case Type::ConstantMatrix:
+ if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(),
+ RHSCan->castAs<ConstantMatrixType>()))
+ return LHS;
+ return {};
case Type::ObjCObject: {
// Check if the types are assignment compatible.
// FIXME: This should be type compatibility, e.g. whether
@@ -9301,6 +9572,21 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
assert(LHS != RHS &&
"Equivalent pipe types should have already been handled!");
return {};
+ case Type::ExtInt: {
+ // Merge two ext-int types, while trying to preserve typedef info.
+ bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned();
+ bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned();
+ unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits();
+ unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits();
+
+ // Like unsigned/int, shouldn't have a type if they dont match.
+ if (LHSUnsigned != RHSUnsigned)
+ return {};
+
+ if (LHSBits != RHSBits)
+ return {};
+ return LHS;
+ }
}
llvm_unreachable("Invalid Type::Class!");
@@ -9441,6 +9727,8 @@ unsigned ASTContext::getIntWidth(QualType T) const {
T = ET->getDecl()->getIntegerType();
if (T->isBooleanType())
return 1;
+ if(const auto *EIT = T->getAs<ExtIntType>())
+ return EIT->getNumBits();
// For builtin types, just use the standard type sizing method
return (unsigned)getTypeSize(T);
}
@@ -9622,6 +9910,11 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
// Read the base type.
switch (*Str++) {
default: llvm_unreachable("Unknown builtin type letter!");
+ case 'y':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'y'!");
+ Type = Context.BFloat16Ty;
+ break;
case 'v':
assert(HowLong == 0 && !Signed && !Unsigned &&
"Bad modifiers used with 'v'!");
@@ -9717,6 +10010,19 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
else
Type = Context.getLValueReferenceType(Type);
break;
+ case 'q': {
+ char *End;
+ unsigned NumElements = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing vector size");
+ Str = End;
+
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
+ RequiresICE, false);
+ assert(!RequiresICE && "Can't require vector ICE");
+
+ Type = Context.getScalableVectorType(ElementType, NumElements);
+ break;
+ }
case 'V': {
char *End;
unsigned NumElements = strtoul(Str, &End, 10);
@@ -10109,6 +10415,8 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return true;
else if (isa<PragmaDetectMismatchDecl>(D))
return true;
+ else if (isa<OMPRequiresDecl>(D))
+ return true;
else if (isa<OMPThreadPrivateDecl>(D))
return !D->getDeclContext()->isDependentContext();
else if (isa<OMPAllocateDecl>(D))
@@ -10298,10 +10606,15 @@ bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
VTableContextBase *ASTContext::getVTableContext() {
if (!VTContext.get()) {
- if (Target->getCXXABI().isMicrosoft())
+ auto ABI = Target->getCXXABI();
+ if (ABI.isMicrosoft())
VTContext.reset(new MicrosoftVTableContext(*this));
- else
- VTContext.reset(new ItaniumVTableContext(*this));
+ else {
+ auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
+ ? ItaniumVTableContext::Relative
+ : ItaniumVTableContext::Pointer;
+ VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout));
+ }
}
return VTContext.get();
}
@@ -10319,6 +10632,7 @@ MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
case TargetCXXABI::iOS64:
case TargetCXXABI::WebAssembly:
case TargetCXXABI::WatchOS:
+ case TargetCXXABI::XL:
return ItaniumMangleContext::create(*this, getDiagnostics());
case TargetCXXABI::Microsoft:
return MicrosoftMangleContext::create(*this, getDiagnostics());
@@ -10360,8 +10674,10 @@ QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
/// getRealTypeForBitwidth -
/// sets floating point QualTy according to specified bitwidth.
/// Returns empty type if there is no appropriate target types.
-QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth) const {
- TargetInfo::RealType Ty = getTargetInfo().getRealTypeByWidth(DestWidth);
+QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
+ bool ExplicitIEEE) const {
+ TargetInfo::RealType Ty =
+ getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE);
switch (Ty) {
case TargetInfo::Float:
return FloatTy;
@@ -10490,6 +10806,23 @@ ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
return Result;
}
+MSGuidDecl *
+ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
+ assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
+
+ llvm::FoldingSetNodeID ID;
+ MSGuidDecl::Profile(ID, Parts);
+
+ void *InsertPos;
+ if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
+ return Existing;
+
+ QualType GUIDType = getMSGuidType().withConst();
+ MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts);
+ MSGuidDecls.InsertNode(New, InsertPos);
+ return New;
+}
+
bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
const llvm::Triple &T = getTargetInfo().getTriple();
if (!T.isOSDarwin())
@@ -10508,146 +10841,6 @@ bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits);
}
-/// Template specializations to abstract away from pointers and TypeLocs.
-/// @{
-template <typename T>
-static ast_type_traits::DynTypedNode createDynTypedNode(const T &Node) {
- return ast_type_traits::DynTypedNode::create(*Node);
-}
-template <>
-ast_type_traits::DynTypedNode createDynTypedNode(const TypeLoc &Node) {
- return ast_type_traits::DynTypedNode::create(Node);
-}
-template <>
-ast_type_traits::DynTypedNode
-createDynTypedNode(const NestedNameSpecifierLoc &Node) {
- return ast_type_traits::DynTypedNode::create(Node);
-}
-/// @}
-
-/// A \c RecursiveASTVisitor that builds a map from nodes to their
-/// parents as defined by the \c RecursiveASTVisitor.
-///
-/// Note that the relationship described here is purely in terms of AST
-/// traversal - there are other relationships (for example declaration context)
-/// in the AST that are better modeled by special matchers.
-///
-/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
-class ASTContext::ParentMap::ASTVisitor
- : public RecursiveASTVisitor<ASTVisitor> {
-public:
- ASTVisitor(ParentMap &Map, ASTContext &Context)
- : Map(Map), Context(Context) {}
-
-private:
- friend class RecursiveASTVisitor<ASTVisitor>;
-
- using VisitorBase = RecursiveASTVisitor<ASTVisitor>;
-
- bool shouldVisitTemplateInstantiations() const { return true; }
-
- bool shouldVisitImplicitCode() const { return true; }
-
- template <typename T, typename MapNodeTy, typename BaseTraverseFn,
- typename MapTy>
- bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
- MapTy *Parents) {
- if (!Node)
- return true;
- if (ParentStack.size() > 0) {
- // FIXME: Currently we add the same parent multiple times, but only
- // when no memoization data is available for the type.
- // For example when we visit all subexpressions of template
- // instantiations; this is suboptimal, but benign: the only way to
- // visit those is with hasAncestor / hasParent, and those do not create
- // new matches.
- // The plan is to enable DynTypedNode to be storable in a map or hash
- // map. The main problem there is to implement hash functions /
- // comparison operators for all types that DynTypedNode supports that
- // do not have pointer identity.
- auto &NodeOrVector = (*Parents)[MapNode];
- if (NodeOrVector.isNull()) {
- if (const auto *D = ParentStack.back().get<Decl>())
- NodeOrVector = D;
- else if (const auto *S = ParentStack.back().get<Stmt>())
- NodeOrVector = S;
- else
- NodeOrVector = new ast_type_traits::DynTypedNode(ParentStack.back());
- } else {
- if (!NodeOrVector.template is<ParentVector *>()) {
- auto *Vector = new ParentVector(
- 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
- delete NodeOrVector
- .template dyn_cast<ast_type_traits::DynTypedNode *>();
- NodeOrVector = Vector;
- }
-
- auto *Vector = NodeOrVector.template get<ParentVector *>();
- // Skip duplicates for types that have memoization data.
- // We must check that the type has memoization data before calling
- // std::find() because DynTypedNode::operator== can't compare all
- // types.
- bool Found = ParentStack.back().getMemoizationData() &&
- std::find(Vector->begin(), Vector->end(),
- ParentStack.back()) != Vector->end();
- if (!Found)
- Vector->push_back(ParentStack.back());
- }
- }
- ParentStack.push_back(createDynTypedNode(Node));
- bool Result = BaseTraverse();
- ParentStack.pop_back();
- return Result;
- }
-
- bool TraverseDecl(Decl *DeclNode) {
- return TraverseNode(
- DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
- &Map.PointerParents);
- }
-
- bool TraverseStmt(Stmt *StmtNode) {
- Stmt *FilteredNode = StmtNode;
- if (auto *ExprNode = dyn_cast_or_null<Expr>(FilteredNode))
- FilteredNode = Context.traverseIgnored(ExprNode);
- return TraverseNode(FilteredNode, FilteredNode,
- [&] { return VisitorBase::TraverseStmt(FilteredNode); },
- &Map.PointerParents);
- }
-
- bool TraverseTypeLoc(TypeLoc TypeLocNode) {
- return TraverseNode(
- TypeLocNode, ast_type_traits::DynTypedNode::create(TypeLocNode),
- [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
- &Map.OtherParents);
- }
-
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
- return TraverseNode(
- NNSLocNode, ast_type_traits::DynTypedNode::create(NNSLocNode),
- [&] { return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode); },
- &Map.OtherParents);
- }
-
- ParentMap &Map;
- ASTContext &Context;
- llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
-};
-
-ASTContext::ParentMap::ParentMap(ASTContext &Ctx) {
- ASTVisitor(*this, Ctx).TraverseAST(Ctx);
-}
-
-ASTContext::DynTypedNodeList
-ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
- std::unique_ptr<ParentMap> &P = Parents[Traversal];
- if (!P)
- // We build the parent map for the traversal scope (usually whole TU), as
- // hasAncestor can escape any subtree.
- P = std::make_unique<ParentMap>(*this);
- return P->getParents(Node);
-}
-
bool
ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
const ObjCMethodDecl *MethodImpl) {
@@ -10958,3 +11151,16 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
Target->getTargetOpts().Features);
}
}
+
+OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
+ OMPTraitInfoVector.emplace_back(new OMPTraitInfo());
+ return *OMPTraitInfoVector.back();
+}
+
+const DiagnosticBuilder &
+clang::operator<<(const DiagnosticBuilder &DB,
+ const ASTContext::SectionInfo &Section) {
+ if (Section.Decl)
+ return DB << Section.Decl;
+ return DB << "a prior #pragma section";
+}
diff --git a/clang/lib/AST/ASTDiagnostic.cpp b/clang/lib/AST/ASTDiagnostic.cpp
index ea4d0dea58a3..05adf226bae3 100644
--- a/clang/lib/AST/ASTDiagnostic.cpp
+++ b/clang/lib/AST/ASTDiagnostic.cpp
@@ -1715,8 +1715,9 @@ class TemplateDiff {
bool FromDefault, bool ToDefault, bool Same) {
assert((FromTD || ToTD) && "Only one template argument may be missing.");
- std::string FromName = FromTD ? FromTD->getName() : "(no argument)";
- std::string ToName = ToTD ? ToTD->getName() : "(no argument)";
+ std::string FromName =
+ std::string(FromTD ? FromTD->getName() : "(no argument)");
+ std::string ToName = std::string(ToTD ? ToTD->getName() : "(no argument)");
if (FromTD && ToTD && FromName == ToName) {
FromName = FromTD->getQualifiedNameAsString();
ToName = ToTD->getQualifiedNameAsString();
diff --git a/clang/lib/AST/ASTDumper.cpp b/clang/lib/AST/ASTDumper.cpp
index 22196a1a2600..284e5bdbc6b0 100644
--- a/clang/lib/AST/ASTDumper.cpp
+++ b/clang/lib/AST/ASTDumper.cpp
@@ -54,7 +54,7 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
NodeDumper.AddChild([=] {
NodeDumper.dumpBareDeclRef(*RI);
- if ((*RI)->isHidden())
+ if (!(*RI)->isUnconditionallyVisible())
OS << " hidden";
// If requested, dump the redecl chain for this lookup.
@@ -159,17 +159,22 @@ void QualType::dump(const char *msg) const {
dump();
}
-LLVM_DUMP_METHOD void QualType::dump() const { dump(llvm::errs()); }
+LLVM_DUMP_METHOD void QualType::dump() const {
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false);
+ Dumper.Visit(*this);
+}
-LLVM_DUMP_METHOD void QualType::dump(llvm::raw_ostream &OS) const {
- ASTDumper Dumper(OS, nullptr, nullptr);
+LLVM_DUMP_METHOD void QualType::dump(llvm::raw_ostream &OS,
+ const ASTContext &Context) const {
+ ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors());
Dumper.Visit(*this);
}
-LLVM_DUMP_METHOD void Type::dump() const { dump(llvm::errs()); }
+LLVM_DUMP_METHOD void Type::dump() const { QualType(this, 0).dump(); }
-LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS) const {
- QualType(this, 0).dump(OS);
+LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS,
+ const ASTContext &Context) const {
+ QualType(this, 0).dump(OS, Context);
}
//===----------------------------------------------------------------------===//
@@ -189,8 +194,7 @@ LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS, bool Deserialize,
(void)Deserialize; // FIXME?
P.Visit(this);
} else {
- ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &SM,
- SM.getDiagnostics().getShowColors(), Ctx.getPrintingPolicy());
+ ASTDumper P(OS, Ctx, Ctx.getDiagnostics().getShowColors());
P.setDeserialize(Deserialize);
P.Visit(this);
}
@@ -198,9 +202,7 @@ LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS, bool Deserialize,
LLVM_DUMP_METHOD void Decl::dumpColor() const {
const ASTContext &Ctx = getASTContext();
- ASTDumper P(llvm::errs(), &Ctx.getCommentCommandTraits(),
- &Ctx.getSourceManager(), /*ShowColors*/ true,
- Ctx.getPrintingPolicy());
+ ASTDumper P(llvm::errs(), Ctx, /*ShowColors=*/true);
P.Visit(this);
}
@@ -214,10 +216,8 @@ LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS,
const DeclContext *DC = this;
while (!DC->isTranslationUnit())
DC = DC->getParent();
- ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
- const SourceManager &SM = Ctx.getSourceManager();
- ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &Ctx.getSourceManager(),
- SM.getDiagnostics().getShowColors(), Ctx.getPrintingPolicy());
+ const ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
+ ASTDumper P(OS, Ctx, Ctx.getDiagnostics().getShowColors());
P.setDeserialize(Deserialize);
P.dumpLookups(this, DumpDecls);
}
@@ -226,27 +226,19 @@ LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS,
// Stmt method implementations
//===----------------------------------------------------------------------===//
-LLVM_DUMP_METHOD void Stmt::dump(SourceManager &SM) const {
- dump(llvm::errs(), SM);
-}
-
-LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
- ASTDumper P(OS, nullptr, &SM);
- P.Visit(this);
-}
-
-LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS) const {
- ASTDumper P(OS, nullptr, nullptr);
+LLVM_DUMP_METHOD void Stmt::dump() const {
+ ASTDumper P(llvm::errs(), /*ShowColors=*/false);
P.Visit(this);
}
-LLVM_DUMP_METHOD void Stmt::dump() const {
- ASTDumper P(llvm::errs(), nullptr, nullptr);
+LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS,
+ const ASTContext &Context) const {
+ ASTDumper P(OS, Context, Context.getDiagnostics().getShowColors());
P.Visit(this);
}
LLVM_DUMP_METHOD void Stmt::dumpColor() const {
- ASTDumper P(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
+ ASTDumper P(llvm::errs(), /*ShowColors=*/true);
P.Visit(this);
}
@@ -255,27 +247,42 @@ LLVM_DUMP_METHOD void Stmt::dumpColor() const {
//===----------------------------------------------------------------------===//
LLVM_DUMP_METHOD void Comment::dump() const {
- dump(llvm::errs(), nullptr, nullptr);
-}
-
-LLVM_DUMP_METHOD void Comment::dump(const ASTContext &Context) const {
- dump(llvm::errs(), &Context.getCommentCommandTraits(),
- &Context.getSourceManager());
+ const auto *FC = dyn_cast<FullComment>(this);
+ if (!FC)
+ return;
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false);
+ Dumper.Visit(FC, FC);
}
-void Comment::dump(raw_ostream &OS, const CommandTraits *Traits,
- const SourceManager *SM) const {
- const FullComment *FC = dyn_cast<FullComment>(this);
+LLVM_DUMP_METHOD void Comment::dump(raw_ostream &OS,
+ const ASTContext &Context) const {
+ const auto *FC = dyn_cast<FullComment>(this);
if (!FC)
return;
- ASTDumper D(OS, Traits, SM);
- D.Visit(FC, FC);
+ ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors());
+ Dumper.Visit(FC, FC);
}
LLVM_DUMP_METHOD void Comment::dumpColor() const {
- const FullComment *FC = dyn_cast<FullComment>(this);
+ const auto *FC = dyn_cast<FullComment>(this);
if (!FC)
return;
- ASTDumper D(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
- D.Visit(FC, FC);
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/true);
+ Dumper.Visit(FC, FC);
+}
+
+//===----------------------------------------------------------------------===//
+// APValue method implementations
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void APValue::dump() const {
+ ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false);
+ Dumper.Visit(*this, /*Ty=*/QualType());
+}
+
+LLVM_DUMP_METHOD void APValue::dump(raw_ostream &OS,
+ const ASTContext &Context) const {
+ ASTDumper Dumper(llvm::errs(), Context,
+ Context.getDiagnostics().getShowColors());
+ Dumper.Visit(*this, /*Ty=*/Context.getPointerType(Context.CharTy));
}
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 22fb67478c96..3779e0cb872b 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -186,36 +186,25 @@ namespace clang {
return import(*From);
}
- template <class T>
- Expected<std::tuple<T>>
- importSeq(const T &From) {
- Expected<T> ToOrErr = import(From);
- if (!ToOrErr)
- return ToOrErr.takeError();
- return std::make_tuple<T>(std::move(*ToOrErr));
- }
-
- // Import multiple objects with a single function call.
- // This should work for every type for which a variant of `import` exists.
- // The arguments are processed from left to right and import is stopped on
- // first error.
- template <class THead, class... TTail>
- Expected<std::tuple<THead, TTail...>>
- importSeq(const THead &FromHead, const TTail &...FromTail) {
- Expected<std::tuple<THead>> ToHeadOrErr = importSeq(FromHead);
- if (!ToHeadOrErr)
- return ToHeadOrErr.takeError();
- Expected<std::tuple<TTail...>> ToTailOrErr = importSeq(FromTail...);
- if (!ToTailOrErr)
- return ToTailOrErr.takeError();
- return std::tuple_cat(*ToHeadOrErr, *ToTailOrErr);
+ // Helper for chaining together multiple imports. If an error is detected,
+ // subsequent imports will return default constructed nodes, so that failure
+ // can be detected with a single conditional branch after a sequence of
+ // imports.
+ template <typename T> T importChecked(Error &Err, const T &From) {
+ // Don't attempt to import nodes if we hit an error earlier.
+ if (Err)
+ return T{};
+ Expected<T> MaybeVal = import(From);
+ if (!MaybeVal) {
+ Err = MaybeVal.takeError();
+ return T{};
+ }
+ return *MaybeVal;
}
-// Wrapper for an overload set.
+ // Wrapper for an overload set.
template <typename ToDeclT> struct CallOverloadedCreateFun {
- template <typename... Args>
- auto operator()(Args &&... args)
- -> decltype(ToDeclT::Create(std::forward<Args>(args)...)) {
+ template <typename... Args> decltype(auto) operator()(Args &&... args) {
return ToDeclT::Create(std::forward<Args>(args)...);
}
};
@@ -474,7 +463,7 @@ namespace clang {
ParmVarDecl *ToParam);
template <typename T>
- bool hasSameVisibilityContext(T *Found, T *From);
+ bool hasSameVisibilityContextAndLinkage(T *Found, T *From);
bool IsStructuralMatch(Decl *From, Decl *To, bool Complain);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
@@ -599,6 +588,7 @@ namespace clang {
ExpectedStmt VisitIntegerLiteral(IntegerLiteral *E);
ExpectedStmt VisitFloatingLiteral(FloatingLiteral *E);
ExpectedStmt VisitImaginaryLiteral(ImaginaryLiteral *E);
+ ExpectedStmt VisitFixedPointLiteral(FixedPointLiteral *E);
ExpectedStmt VisitCharacterLiteral(CharacterLiteral *E);
ExpectedStmt VisitStringLiteral(StringLiteral *E);
ExpectedStmt VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
@@ -659,7 +649,7 @@ namespace clang {
template<typename IIter, typename OIter>
Error ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) {
- using ItemT = typename std::remove_reference<decltype(*Obegin)>::type;
+ using ItemT = std::remove_reference_t<decltype(*Obegin)>;
for (; Ibegin != Iend; ++Ibegin, ++Obegin) {
Expected<ItemT> ToOrErr = import(*Ibegin);
if (!ToOrErr)
@@ -984,7 +974,10 @@ Expected<LambdaCapture> ASTNodeImporter::import(const LambdaCapture &From) {
}
template <typename T>
-bool ASTNodeImporter::hasSameVisibilityContext(T *Found, T *From) {
+bool ASTNodeImporter::hasSameVisibilityContextAndLinkage(T *Found, T *From) {
+ if (Found->getLinkageInternal() != From->getLinkageInternal())
+ return false;
+
if (From->hasExternalFormalLinkage())
return Found->hasExternalFormalLinkage();
if (Importer.GetFromTU(Found) != From->getTranslationUnitDecl())
@@ -997,8 +990,11 @@ bool ASTNodeImporter::hasSameVisibilityContext(T *Found, T *From) {
}
template <>
-bool ASTNodeImporter::hasSameVisibilityContext(TypedefNameDecl *Found,
+bool ASTNodeImporter::hasSameVisibilityContextAndLinkage(TypedefNameDecl *Found,
TypedefNameDecl *From) {
+ if (Found->getLinkageInternal() != From->getLinkageInternal())
+ return false;
+
if (From->isInAnonymousNamespace() && Found->isInAnonymousNamespace())
return Importer.GetFromTU(Found) == From->getTranslationUnitDecl();
return From->isInAnonymousNamespace() == Found->isInAnonymousNamespace();
@@ -1149,12 +1145,11 @@ ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
ExpectedType
ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
- QualType ToElementType;
- const Expr *ToSizeExpr;
- if (auto Imp = importSeq(T->getElementType(), T->getSizeExpr()))
- std::tie(ToElementType, ToSizeExpr) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToElementType = importChecked(Err, T->getElementType());
+ auto ToSizeExpr = importChecked(Err, T->getSizeExpr());
+ if (Err)
+ return std::move(Err);
return Importer.getToContext().getConstantArrayType(
ToElementType, T->getSize(), ToSizeExpr, T->getSizeModifier(),
@@ -1174,15 +1169,12 @@ ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
ExpectedType
ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
- QualType ToElementType;
- Expr *ToSizeExpr;
- SourceRange ToBracketsRange;
- if (auto Imp = importSeq(
- T->getElementType(), T->getSizeExpr(), T->getBracketsRange()))
- std::tie(ToElementType, ToSizeExpr, ToBracketsRange) = *Imp;
- else
- return Imp.takeError();
-
+ Error Err = Error::success();
+ QualType ToElementType = importChecked(Err, T->getElementType());
+ Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr());
+ SourceRange ToBracketsRange = importChecked(Err, T->getBracketsRange());
+ if (Err)
+ return std::move(Err);
return Importer.getToContext().getVariableArrayType(
ToElementType, ToSizeExpr, T->getSizeModifier(),
T->getIndexTypeCVRQualifiers(), ToBracketsRange);
@@ -1190,14 +1182,12 @@ ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
ExpectedType ASTNodeImporter::VisitDependentSizedArrayType(
const DependentSizedArrayType *T) {
- QualType ToElementType;
- Expr *ToSizeExpr;
- SourceRange ToBracketsRange;
- if (auto Imp = importSeq(
- T->getElementType(), T->getSizeExpr(), T->getBracketsRange()))
- std::tie(ToElementType, ToSizeExpr, ToBracketsRange) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ QualType ToElementType = importChecked(Err, T->getElementType());
+ Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr());
+ SourceRange ToBracketsRange = importChecked(Err, T->getBracketsRange());
+ if (Err)
+ return std::move(Err);
// SizeExpr may be null if size is not specified directly.
// For example, 'int a[]'.
@@ -1262,26 +1252,24 @@ ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
}
FunctionProtoType::ExtProtoInfo FromEPI = T->getExtProtoInfo();
+ Error Err = Error::success();
FunctionProtoType::ExtProtoInfo ToEPI;
-
- auto Imp = importSeq(
- FromEPI.ExceptionSpec.NoexceptExpr,
- FromEPI.ExceptionSpec.SourceDecl,
- FromEPI.ExceptionSpec.SourceTemplate);
- if (!Imp)
- return Imp.takeError();
-
ToEPI.ExtInfo = FromEPI.ExtInfo;
ToEPI.Variadic = FromEPI.Variadic;
ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn;
ToEPI.TypeQuals = FromEPI.TypeQuals;
ToEPI.RefQualifier = FromEPI.RefQualifier;
ToEPI.ExceptionSpec.Type = FromEPI.ExceptionSpec.Type;
+ ToEPI.ExceptionSpec.NoexceptExpr =
+ importChecked(Err, FromEPI.ExceptionSpec.NoexceptExpr);
+ ToEPI.ExceptionSpec.SourceDecl =
+ importChecked(Err, FromEPI.ExceptionSpec.SourceDecl);
+ ToEPI.ExceptionSpec.SourceTemplate =
+ importChecked(Err, FromEPI.ExceptionSpec.SourceTemplate);
ToEPI.ExceptionSpec.Exceptions = ExceptionTypes;
- std::tie(
- ToEPI.ExceptionSpec.NoexceptExpr,
- ToEPI.ExceptionSpec.SourceDecl,
- ToEPI.ExceptionSpec.SourceTemplate) = *Imp;
+
+ if (Err)
+ return std::move(Err);
return Importer.getToContext().getFunctionType(
*ToReturnTypeOrErr, ArgTypes, ToEPI);
@@ -1289,12 +1277,11 @@ ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
ExpectedType ASTNodeImporter::VisitUnresolvedUsingType(
const UnresolvedUsingType *T) {
- UnresolvedUsingTypenameDecl *ToD;
- Decl *ToPrevD;
- if (auto Imp = importSeq(T->getDecl(), T->getDecl()->getPreviousDecl()))
- std::tie(ToD, ToPrevD) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToD = importChecked(Err, T->getDecl());
+ auto ToPrevD = importChecked(Err, T->getDecl()->getPreviousDecl());
+ if (Err)
+ return std::move(Err);
return Importer.getToContext().getTypeDeclType(
ToD, cast_or_null<TypeDecl>(ToPrevD));
@@ -1366,9 +1353,21 @@ ExpectedType ASTNodeImporter::VisitAutoType(const AutoType *T) {
if (!ToDeducedTypeOrErr)
return ToDeducedTypeOrErr.takeError();
- return Importer.getToContext().getAutoType(*ToDeducedTypeOrErr,
- T->getKeyword(),
- /*IsDependent*/false);
+ ExpectedDecl ToTypeConstraintConcept = import(T->getTypeConstraintConcept());
+ if (!ToTypeConstraintConcept)
+ return ToTypeConstraintConcept.takeError();
+
+ SmallVector<TemplateArgument, 2> ToTemplateArgs;
+ ArrayRef<TemplateArgument> FromTemplateArgs = T->getTypeConstraintArguments();
+ if (Error Err = ImportTemplateArguments(FromTemplateArgs.data(),
+ FromTemplateArgs.size(),
+ ToTemplateArgs))
+ return std::move(Err);
+
+ return Importer.getToContext().getAutoType(
+ *ToDeducedTypeOrErr, T->getKeyword(), /*IsDependent*/false,
+ /*IsPack=*/false, cast_or_null<ConceptDecl>(*ToTypeConstraintConcept),
+ ToTemplateArgs);
}
ExpectedType ASTNodeImporter::VisitInjectedClassNameType(
@@ -1741,7 +1740,7 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
// fix since operations such as code generation will expect this to be so.
if (ImportedOrErr) {
FieldDecl *FieldFrom = dyn_cast_or_null<FieldDecl>(From);
- Decl *ImportedDecl = (Decl*)*ImportedOrErr;
+ Decl *ImportedDecl = *ImportedOrErr;
FieldDecl *FieldTo = dyn_cast_or_null<FieldDecl>(ImportedDecl);
if (FieldFrom && FieldTo) {
const RecordType *RecordFrom = FieldFrom->getType()->getAs<RecordType>();
@@ -1891,6 +1890,19 @@ Error ASTNodeImporter::ImportDefinition(
// set in CXXRecordDecl::CreateLambda. We must import the contained
// decls here and finish the definition.
(To->isLambda() && shouldForceImportDeclContext(Kind))) {
+ if (To->isLambda()) {
+ auto *FromCXXRD = cast<CXXRecordDecl>(From);
+ SmallVector<LambdaCapture, 8> ToCaptures;
+ ToCaptures.reserve(FromCXXRD->capture_size());
+ for (const auto &FromCapture : FromCXXRD->captures()) {
+ if (auto ToCaptureOrErr = import(FromCapture))
+ ToCaptures.push_back(*ToCaptureOrErr);
+ else
+ return ToCaptureOrErr.takeError();
+ }
+ cast<CXXRecordDecl>(To)->setCaptures(ToCaptures);
+ }
+
Error Result = ImportDeclContext(From, /*ForceImport=*/true);
// Finish the definition of the lambda, set isBeingDefined to false.
if (To->isLambda())
@@ -2236,14 +2248,13 @@ ExpectedDecl ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
DeclContext *DC = *DCOrErr;
DeclContext *LexicalDC = DC;
- SourceLocation ToLocation, ToRParenLoc;
- Expr *ToAssertExpr;
- StringLiteral *ToMessage;
- if (auto Imp = importSeq(
- D->getLocation(), D->getAssertExpr(), D->getMessage(), D->getRParenLoc()))
- std::tie(ToLocation, ToAssertExpr, ToMessage, ToRParenLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToRParenLoc = importChecked(Err, D->getRParenLoc());
+ auto ToAssertExpr = importChecked(Err, D->getAssertExpr());
+ auto ToMessage = importChecked(Err, D->getMessage());
+ if (Err)
+ return std::move(Err);
StaticAssertDecl *ToD;
if (GetImportedOrCreateDecl(
@@ -2352,17 +2363,15 @@ ExpectedDecl ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
// NOTE: No conflict resolution is done for namespace aliases now.
- SourceLocation ToNamespaceLoc, ToAliasLoc, ToTargetNameLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- NamespaceDecl *ToNamespace;
- if (auto Imp = importSeq(
- D->getNamespaceLoc(), D->getAliasLoc(), D->getQualifierLoc(),
- D->getTargetNameLoc(), D->getNamespace()))
- std::tie(
- ToNamespaceLoc, ToAliasLoc, ToQualifierLoc, ToTargetNameLoc,
- ToNamespace) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToNamespaceLoc = importChecked(Err, D->getNamespaceLoc());
+ auto ToAliasLoc = importChecked(Err, D->getAliasLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToTargetNameLoc = importChecked(Err, D->getTargetNameLoc());
+ auto ToNamespace = importChecked(Err, D->getNamespace());
+ if (Err)
+ return std::move(Err);
+
IdentifierInfo *ToIdentifier = Importer.Import(D->getIdentifier());
NamespaceAliasDecl *ToD;
@@ -2403,7 +2412,7 @@ ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
if (auto *FoundTypedef = dyn_cast<TypedefNameDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundTypedef, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundTypedef, D))
continue;
QualType FromUT = D->getUnderlyingType();
@@ -2431,17 +2440,16 @@ ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
}
}
- QualType ToUnderlyingType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToBeginLoc;
- if (auto Imp = importSeq(
- D->getUnderlyingType(), D->getTypeSourceInfo(), D->getBeginLoc()))
- std::tie(ToUnderlyingType, ToTypeSourceInfo, ToBeginLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToUnderlyingType = importChecked(Err, D->getUnderlyingType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToBeginLoc = importChecked(Err, D->getBeginLoc());
+ if (Err)
+ return std::move(Err);
// Create the new typedef node.
// FIXME: ToUnderlyingType is not used.
+ (void)ToUnderlyingType;
TypedefNameDecl *ToTypedef;
if (IsAlias) {
if (GetImportedOrCreateDecl<TypeAliasDecl>(
@@ -2509,12 +2517,11 @@ ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
}
}
- TemplateParameterList *ToTemplateParameters;
- TypeAliasDecl *ToTemplatedDecl;
- if (auto Imp = importSeq(D->getTemplateParameters(), D->getTemplatedDecl()))
- std::tie(ToTemplateParameters, ToTemplatedDecl) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToTemplateParameters = importChecked(Err, D->getTemplateParameters());
+ auto ToTemplatedDecl = importChecked(Err, D->getTemplatedDecl());
+ if (Err)
+ return std::move(Err);
TypeAliasTemplateDecl *ToAlias;
if (GetImportedOrCreateDecl(ToAlias, D, Importer.getToContext(), DC, Loc,
@@ -2591,6 +2598,7 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
IDNS |= Decl::IDNS_Ordinary;
// We may already have an enum of the same name; try to find and match it.
+ EnumDecl *PrevDecl = nullptr;
if (!DC->isFunctionOrMethod() && SearchName) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
auto FoundDecls =
@@ -2605,10 +2613,15 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
}
if (auto *FoundEnum = dyn_cast<EnumDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundEnum, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundEnum, D))
continue;
- if (IsStructuralMatch(D, FoundEnum))
- return Importer.MapImported(D, FoundEnum);
+ if (IsStructuralMatch(D, FoundEnum)) {
+ EnumDecl *FoundDef = FoundEnum->getDefinition();
+ if (D->isThisDeclarationADefinition() && FoundDef)
+ return Importer.MapImported(D, FoundDef);
+ PrevDecl = FoundEnum->getMostRecentDecl();
+ break;
+ }
ConflictingDecls.push_back(FoundDecl);
}
}
@@ -2624,21 +2637,19 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
}
}
- SourceLocation ToBeginLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- QualType ToIntegerType;
- SourceRange ToBraceRange;
- if (auto Imp = importSeq(D->getBeginLoc(), D->getQualifierLoc(),
- D->getIntegerType(), D->getBraceRange()))
- std::tie(ToBeginLoc, ToQualifierLoc, ToIntegerType, ToBraceRange) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, D->getBeginLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToIntegerType = importChecked(Err, D->getIntegerType());
+ auto ToBraceRange = importChecked(Err, D->getBraceRange());
+ if (Err)
+ return std::move(Err);
// Create the enum declaration.
EnumDecl *D2;
if (GetImportedOrCreateDecl(
D2, D, Importer.getToContext(), DC, ToBeginLoc,
- Loc, Name.getAsIdentifierInfo(), nullptr, D->isScoped(),
+ Loc, Name.getAsIdentifierInfo(), PrevDecl, D->isScoped(),
D->isScopedUsingClassTag(), D->isFixed()))
return D2;
@@ -2724,7 +2735,7 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (!IsStructuralMatch(D, FoundRecord, false))
continue;
- if (!hasSameVisibilityContext(FoundRecord, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundRecord, D))
continue;
if (IsStructuralMatch(D, FoundRecord)) {
@@ -2784,7 +2795,7 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
return CDeclOrErr.takeError();
D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), *CDeclOrErr,
DCXX->hasKnownLambdaInternalLinkage());
- } else if (DCXX->isInjectedClassName()) {
+ } else if (DCXX->isInjectedClassName()) {
// We have to be careful to do a similar dance to the one in
// Sema::ActOnStartCXXMemberDeclarations
const bool DelayTypeCreation = true;
@@ -3172,7 +3183,7 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
continue;
if (auto *FoundFunction = dyn_cast<FunctionDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundFunction, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundFunction, D))
continue;
if (IsStructuralMatch(D, FoundFunction)) {
@@ -3275,18 +3286,16 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
FromReturnTy, FromFPT->getParamTypes(), FromEPI);
}
- QualType T;
- TypeSourceInfo *TInfo;
- SourceLocation ToInnerLocStart, ToEndLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- Expr *TrailingRequiresClause;
- if (auto Imp = importSeq(
- FromTy, D->getTypeSourceInfo(), D->getInnerLocStart(),
- D->getQualifierLoc(), D->getEndLoc(), D->getTrailingRequiresClause()))
- std::tie(T, TInfo, ToInnerLocStart, ToQualifierLoc, ToEndLoc,
- TrailingRequiresClause) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto T = importChecked(Err, FromTy);
+ auto TInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ auto ToEndLoc = importChecked(Err, D->getEndLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto TrailingRequiresClause =
+ importChecked(Err, D->getTrailingRequiresClause());
+ if (Err)
+ return std::move(Err);
// Import the function parameters.
SmallVector<ParmVarDecl *, 8> Parameters;
@@ -3302,10 +3311,10 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
if (auto *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
Expr *ExplicitExpr = nullptr;
if (FromConstructor->getExplicitSpecifier().getExpr()) {
- auto Imp = importSeq(FromConstructor->getExplicitSpecifier().getExpr());
+ auto Imp = import(FromConstructor->getExplicitSpecifier().getExpr());
if (!Imp)
return Imp.takeError();
- std::tie(ExplicitExpr) = *Imp;
+ ExplicitExpr = *Imp;
}
if (GetImportedOrCreateDecl<CXXConstructorDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
@@ -3320,16 +3329,12 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
return ToFunction;
} else if (CXXDestructorDecl *FromDtor = dyn_cast<CXXDestructorDecl>(D)) {
- auto Imp =
- importSeq(const_cast<FunctionDecl *>(FromDtor->getOperatorDelete()),
- FromDtor->getOperatorDeleteThisArg());
-
- if (!Imp)
- return Imp.takeError();
-
- FunctionDecl *ToOperatorDelete;
- Expr *ToThisArg;
- std::tie(ToOperatorDelete, ToThisArg) = *Imp;
+ Error Err = Error::success();
+ auto ToOperatorDelete = importChecked(
+ Err, const_cast<FunctionDecl *>(FromDtor->getOperatorDelete()));
+ auto ToThisArg = importChecked(Err, FromDtor->getOperatorDeleteThisArg());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl<CXXDestructorDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
@@ -3344,10 +3349,10 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
dyn_cast<CXXConversionDecl>(D)) {
Expr *ExplicitExpr = nullptr;
if (FromConversion->getExplicitSpecifier().getExpr()) {
- auto Imp = importSeq(FromConversion->getExplicitSpecifier().getExpr());
+ auto Imp = import(FromConversion->getExplicitSpecifier().getExpr());
if (!Imp)
return Imp.takeError();
- std::tie(ExplicitExpr) = *Imp;
+ ExplicitExpr = *Imp;
}
if (GetImportedOrCreateDecl<CXXConversionDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
@@ -3546,18 +3551,14 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
}
}
- QualType ToType;
- TypeSourceInfo *ToTInfo;
- Expr *ToBitWidth;
- SourceLocation ToInnerLocStart;
- Expr *ToInitializer;
- if (auto Imp = importSeq(
- D->getType(), D->getTypeSourceInfo(), D->getBitWidth(),
- D->getInnerLocStart(), D->getInClassInitializer()))
- std::tie(
- ToType, ToTInfo, ToBitWidth, ToInnerLocStart, ToInitializer) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToBitWidth = importChecked(Err, D->getBitWidth());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ auto ToInitializer = importChecked(Err, D->getInClassInitializer());
+ if (Err)
+ return std::move(Err);
FieldDecl *ToField;
if (GetImportedOrCreateDecl(ToField, D, Importer.getToContext(), DC,
@@ -3645,6 +3646,54 @@ ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
return ToIndirectField;
}
+/// Used as return type of getFriendCountAndPosition.
+struct FriendCountAndPosition {
+ /// Number of similar looking friends.
+ unsigned int TotalCount;
+ /// Index of the specific FriendDecl.
+ unsigned int IndexOfDecl;
+};
+
+template <class T>
+static FriendCountAndPosition getFriendCountAndPosition(
+ const FriendDecl *FD,
+ llvm::function_ref<T(const FriendDecl *)> GetCanTypeOrDecl) {
+ unsigned int FriendCount = 0;
+ llvm::Optional<unsigned int> FriendPosition;
+ const auto *RD = cast<CXXRecordDecl>(FD->getLexicalDeclContext());
+
+ T TypeOrDecl = GetCanTypeOrDecl(FD);
+
+ for (const FriendDecl *FoundFriend : RD->friends()) {
+ if (FoundFriend == FD) {
+ FriendPosition = FriendCount;
+ ++FriendCount;
+ } else if (!FoundFriend->getFriendDecl() == !FD->getFriendDecl() &&
+ GetCanTypeOrDecl(FoundFriend) == TypeOrDecl) {
+ ++FriendCount;
+ }
+ }
+
+ assert(FriendPosition && "Friend decl not found in own parent.");
+
+ return {FriendCount, *FriendPosition};
+}
+
+static FriendCountAndPosition getFriendCountAndPosition(const FriendDecl *FD) {
+ if (FD->getFriendType())
+ return getFriendCountAndPosition<QualType>(FD, [](const FriendDecl *F) {
+ if (TypeSourceInfo *TSI = F->getFriendType())
+ return TSI->getType().getCanonicalType();
+ llvm_unreachable("Wrong friend object type.");
+ });
+ else
+ return getFriendCountAndPosition<Decl *>(FD, [](const FriendDecl *F) {
+ if (Decl *D = F->getFriendDecl())
+ return D->getCanonicalDecl();
+ llvm_unreachable("Wrong friend object type.");
+ });
+}
+
ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// Import the major distinguishing characteristics of a declaration.
DeclContext *DC, *LexicalDC;
@@ -3653,25 +3702,37 @@ ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// Determine whether we've already imported this decl.
// FriendDecl is not a NamedDecl so we cannot use lookup.
- auto *RD = cast<CXXRecordDecl>(DC);
+ // We try to maintain order and count of redundant friend declarations.
+ const auto *RD = cast<CXXRecordDecl>(DC);
FriendDecl *ImportedFriend = RD->getFirstFriend();
+ SmallVector<FriendDecl *, 2> ImportedEquivalentFriends;
while (ImportedFriend) {
+ bool Match = false;
if (D->getFriendDecl() && ImportedFriend->getFriendDecl()) {
- if (IsStructuralMatch(D->getFriendDecl(), ImportedFriend->getFriendDecl(),
- /*Complain=*/false))
- return Importer.MapImported(D, ImportedFriend);
-
+ Match =
+ IsStructuralMatch(D->getFriendDecl(), ImportedFriend->getFriendDecl(),
+ /*Complain=*/false);
} else if (D->getFriendType() && ImportedFriend->getFriendType()) {
- if (Importer.IsStructurallyEquivalent(
- D->getFriendType()->getType(),
- ImportedFriend->getFriendType()->getType(), true))
- return Importer.MapImported(D, ImportedFriend);
+ Match = Importer.IsStructurallyEquivalent(
+ D->getFriendType()->getType(),
+ ImportedFriend->getFriendType()->getType(), /*Complain=*/false);
}
+ if (Match)
+ ImportedEquivalentFriends.push_back(ImportedFriend);
+
ImportedFriend = ImportedFriend->getNextFriend();
}
+ FriendCountAndPosition CountAndPosition = getFriendCountAndPosition(D);
+
+ assert(ImportedEquivalentFriends.size() <= CountAndPosition.TotalCount &&
+ "Class with non-matching friends is imported, ODR check wrong?");
+ if (ImportedEquivalentFriends.size() == CountAndPosition.TotalCount)
+ return Importer.MapImported(
+ D, ImportedEquivalentFriends[CountAndPosition.IndexOfDecl]);
// Not found. Create it.
+ // The declarations will be put into order later by ImportDeclContext.
FriendDecl::FriendUnion ToFU;
if (NamedDecl *FriendD = D->getFriendDecl()) {
NamedDecl *ToFriendD;
@@ -3748,15 +3809,13 @@ ExpectedDecl ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
}
}
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- Expr *ToBitWidth;
- SourceLocation ToInnerLocStart;
- if (auto Imp = importSeq(
- D->getType(), D->getTypeSourceInfo(), D->getBitWidth(), D->getInnerLocStart()))
- std::tie(ToType, ToTypeSourceInfo, ToBitWidth, ToInnerLocStart) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToBitWidth = importChecked(Err, D->getBitWidth());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ if (Err)
+ return std::move(Err);
ObjCIvarDecl *ToIvar;
if (GetImportedOrCreateDecl(
@@ -3806,7 +3865,7 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
continue;
if (auto *FoundVar = dyn_cast<VarDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundVar, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundVar, D))
continue;
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundVar->getType())) {
@@ -3870,16 +3929,13 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
}
}
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToInnerLocStart;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getType(), D->getTypeSourceInfo(), D->getInnerLocStart(),
- D->getQualifierLoc()))
- std::tie(ToType, ToTypeSourceInfo, ToInnerLocStart, ToQualifierLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ if (Err)
+ return std::move(Err);
// Create the imported variable.
VarDecl *ToVar;
@@ -3899,6 +3955,13 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
ToVar->setPreviousDecl(Recent);
}
+ // Import the described template, if any.
+ if (D->getDescribedVarTemplate()) {
+ auto ToVTOrErr = import(D->getDescribedVarTemplate());
+ if (!ToVTOrErr)
+ return ToVTOrErr.takeError();
+ }
+
if (Error Err = ImportInitializer(D, ToVar))
return std::move(Err);
@@ -3922,13 +3985,12 @@ ExpectedDecl ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
- DeclarationName ToDeclName;
- SourceLocation ToLocation;
- QualType ToType;
- if (auto Imp = importSeq(D->getDeclName(), D->getLocation(), D->getType()))
- std::tie(ToDeclName, ToLocation, ToType) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToDeclName = importChecked(Err, D->getDeclName());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToType = importChecked(Err, D->getType());
+ if (Err)
+ return std::move(Err);
// Create the imported parameter.
ImplicitParamDecl *ToParm = nullptr;
@@ -3966,18 +4028,14 @@ ExpectedDecl ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
- DeclarationName ToDeclName;
- SourceLocation ToLocation, ToInnerLocStart;
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- if (auto Imp = importSeq(
- D->getDeclName(), D->getLocation(), D->getType(), D->getInnerLocStart(),
- D->getTypeSourceInfo()))
- std::tie(
- ToDeclName, ToLocation, ToType, ToInnerLocStart,
- ToTypeSourceInfo) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToDeclName = importChecked(Err, D->getDeclName());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ if (Err)
+ return std::move(Err);
ParmVarDecl *ToParm;
if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC,
@@ -4080,14 +4138,13 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
}
}
- SourceLocation ToEndLoc;
- QualType ToReturnType;
- TypeSourceInfo *ToReturnTypeSourceInfo;
- if (auto Imp = importSeq(
- D->getEndLoc(), D->getReturnType(), D->getReturnTypeSourceInfo()))
- std::tie(ToEndLoc, ToReturnType, ToReturnTypeSourceInfo) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToEndLoc = importChecked(Err, D->getEndLoc());
+ auto ToReturnType = importChecked(Err, D->getReturnType());
+ auto ToReturnTypeSourceInfo =
+ importChecked(Err, D->getReturnTypeSourceInfo());
+ if (Err)
+ return std::move(Err);
ObjCMethodDecl *ToMethod;
if (GetImportedOrCreateDecl(
@@ -4148,14 +4205,13 @@ ExpectedDecl ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
if (ToD)
return ToD;
- SourceLocation ToVarianceLoc, ToLocation, ToColonLoc;
- TypeSourceInfo *ToTypeSourceInfo;
- if (auto Imp = importSeq(
- D->getVarianceLoc(), D->getLocation(), D->getColonLoc(),
- D->getTypeSourceInfo()))
- std::tie(ToVarianceLoc, ToLocation, ToColonLoc, ToTypeSourceInfo) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToVarianceLoc = importChecked(Err, D->getVarianceLoc());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToColonLoc = importChecked(Err, D->getColonLoc());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ if (Err)
+ return std::move(Err);
ObjCTypeParamDecl *Result;
if (GetImportedOrCreateDecl(
@@ -4189,16 +4245,14 @@ ExpectedDecl ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
= ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
ObjCCategoryDecl *ToCategory = MergeWithCategory;
if (!ToCategory) {
- SourceLocation ToAtStartLoc, ToCategoryNameLoc;
- SourceLocation ToIvarLBraceLoc, ToIvarRBraceLoc;
- if (auto Imp = importSeq(
- D->getAtStartLoc(), D->getCategoryNameLoc(),
- D->getIvarLBraceLoc(), D->getIvarRBraceLoc()))
- std::tie(
- ToAtStartLoc, ToCategoryNameLoc,
- ToIvarLBraceLoc, ToIvarRBraceLoc) = *Imp;
- else
- return Imp.takeError();
+
+ Error Err = Error::success();
+ auto ToAtStartLoc = importChecked(Err, D->getAtStartLoc());
+ auto ToCategoryNameLoc = importChecked(Err, D->getCategoryNameLoc());
+ auto ToIvarLBraceLoc = importChecked(Err, D->getIvarLBraceLoc());
+ auto ToIvarRBraceLoc = importChecked(Err, D->getIvarRBraceLoc());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl(ToCategory, D, Importer.getToContext(), DC,
ToAtStartLoc, Loc,
@@ -4407,13 +4461,12 @@ ExpectedDecl ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
if (ToD)
return ToD;
- SourceLocation ToLoc, ToUsingLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getNameInfo().getLoc(), D->getUsingLoc(), D->getQualifierLoc()))
- std::tie(ToLoc, ToUsingLoc, ToQualifierLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLoc = importChecked(Err, D->getNameInfo().getLoc());
+ auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ if (Err)
+ return std::move(Err);
DeclarationNameInfo NameInfo(Name, ToLoc);
if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
@@ -4504,18 +4557,15 @@ ExpectedDecl ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
if (!ToComAncestorOrErr)
return ToComAncestorOrErr.takeError();
- NamespaceDecl *ToNominatedNamespace;
- SourceLocation ToUsingLoc, ToNamespaceKeyLocation, ToIdentLocation;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getNominatedNamespace(), D->getUsingLoc(),
- D->getNamespaceKeyLocation(), D->getQualifierLoc(),
- D->getIdentLocation()))
- std::tie(
- ToNominatedNamespace, ToUsingLoc, ToNamespaceKeyLocation,
- ToQualifierLoc, ToIdentLocation) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToNominatedNamespace = importChecked(Err, D->getNominatedNamespace());
+ auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
+ auto ToNamespaceKeyLocation =
+ importChecked(Err, D->getNamespaceKeyLocation());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToIdentLocation = importChecked(Err, D->getIdentLocation());
+ if (Err)
+ return std::move(Err);
UsingDirectiveDecl *ToUsingDir;
if (GetImportedOrCreateDecl(ToUsingDir, D, Importer.getToContext(), DC,
@@ -4543,14 +4593,13 @@ ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingValueDecl(
if (ToD)
return ToD;
- SourceLocation ToLoc, ToUsingLoc, ToEllipsisLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getNameInfo().getLoc(), D->getUsingLoc(), D->getQualifierLoc(),
- D->getEllipsisLoc()))
- std::tie(ToLoc, ToUsingLoc, ToQualifierLoc, ToEllipsisLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLoc = importChecked(Err, D->getNameInfo().getLoc());
+ auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToEllipsisLoc = importChecked(Err, D->getEllipsisLoc());
+ if (Err)
+ return std::move(Err);
DeclarationNameInfo NameInfo(Name, ToLoc);
if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
@@ -4580,14 +4629,13 @@ ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
if (ToD)
return ToD;
- SourceLocation ToUsingLoc, ToTypenameLoc, ToEllipsisLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- if (auto Imp = importSeq(
- D->getUsingLoc(), D->getTypenameLoc(), D->getQualifierLoc(),
- D->getEllipsisLoc()))
- std::tie(ToUsingLoc, ToTypenameLoc, ToQualifierLoc, ToEllipsisLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
+ auto ToTypenameLoc = importChecked(Err, D->getTypenameLoc());
+ auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
+ auto ToEllipsisLoc = importChecked(Err, D->getEllipsisLoc());
+ if (Err)
+ return std::move(Err);
UnresolvedUsingTypenameDecl *ToUsing;
if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC,
@@ -4822,12 +4870,12 @@ ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
if (Error Err = ImportDeclContext(D, DC, LexicalDC))
return std::move(Err);
- SourceLocation ToLocation, ToAtStartLoc, ToCategoryNameLoc;
- if (auto Imp = importSeq(
- D->getLocation(), D->getAtStartLoc(), D->getCategoryNameLoc()))
- std::tie(ToLocation, ToAtStartLoc, ToCategoryNameLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToAtStartLoc = importChecked(Err, D->getAtStartLoc());
+ auto ToCategoryNameLoc = importChecked(Err, D->getCategoryNameLoc());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl(
ToImpl, D, Importer.getToContext(), DC,
@@ -4867,16 +4915,14 @@ ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
if (Error Err = ImportDeclContext(D, DC, LexicalDC))
return std::move(Err);
- SourceLocation ToLocation, ToAtStartLoc, ToSuperClassLoc;
- SourceLocation ToIvarLBraceLoc, ToIvarRBraceLoc;
- if (auto Imp = importSeq(
- D->getLocation(), D->getAtStartLoc(), D->getSuperClassLoc(),
- D->getIvarLBraceLoc(), D->getIvarRBraceLoc()))
- std::tie(
- ToLocation, ToAtStartLoc, ToSuperClassLoc,
- ToIvarLBraceLoc, ToIvarRBraceLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToAtStartLoc = importChecked(Err, D->getAtStartLoc());
+ auto ToSuperClassLoc = importChecked(Err, D->getSuperClassLoc());
+ auto ToIvarLBraceLoc = importChecked(Err, D->getIvarLBraceLoc());
+ auto ToIvarRBraceLoc = importChecked(Err, D->getIvarRBraceLoc());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl(Impl, D, Importer.getToContext(),
DC, Iface, Super,
@@ -4966,14 +5012,13 @@ ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
}
}
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToAtLoc, ToLParenLoc;
- if (auto Imp = importSeq(
- D->getType(), D->getTypeSourceInfo(), D->getAtLoc(), D->getLParenLoc()))
- std::tie(ToType, ToTypeSourceInfo, ToAtLoc, ToLParenLoc) = *Imp;
- else
- return Imp.takeError();
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToAtLoc = importChecked(Err, D->getAtLoc());
+ auto ToLParenLoc = importChecked(Err, D->getLParenLoc());
+ if (Err)
+ return std::move(Err);
// Create the new property.
ObjCPropertyDecl *ToProperty;
@@ -4984,22 +5029,15 @@ ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
ToTypeSourceInfo, D->getPropertyImplementation()))
return ToProperty;
- Selector ToGetterName, ToSetterName;
- SourceLocation ToGetterNameLoc, ToSetterNameLoc;
- ObjCMethodDecl *ToGetterMethodDecl, *ToSetterMethodDecl;
- ObjCIvarDecl *ToPropertyIvarDecl;
- if (auto Imp = importSeq(
- D->getGetterName(), D->getSetterName(),
- D->getGetterNameLoc(), D->getSetterNameLoc(),
- D->getGetterMethodDecl(), D->getSetterMethodDecl(),
- D->getPropertyIvarDecl()))
- std::tie(
- ToGetterName, ToSetterName,
- ToGetterNameLoc, ToSetterNameLoc,
- ToGetterMethodDecl, ToSetterMethodDecl,
- ToPropertyIvarDecl) = *Imp;
- else
- return Imp.takeError();
+ auto ToGetterName = importChecked(Err, D->getGetterName());
+ auto ToSetterName = importChecked(Err, D->getSetterName());
+ auto ToGetterNameLoc = importChecked(Err, D->getGetterNameLoc());
+ auto ToSetterNameLoc = importChecked(Err, D->getSetterNameLoc());
+ auto ToGetterMethodDecl = importChecked(Err, D->getGetterMethodDecl());
+ auto ToSetterMethodDecl = importChecked(Err, D->getSetterMethodDecl());
+ auto ToPropertyIvarDecl = importChecked(Err, D->getPropertyIvarDecl());
+ if (Err)
+ return std::move(Err);
ToProperty->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToProperty);
@@ -5036,12 +5074,14 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
= InImpl->FindPropertyImplDecl(Property->getIdentifier(),
Property->getQueryKind());
if (!ToImpl) {
- SourceLocation ToBeginLoc, ToLocation, ToPropertyIvarDeclLoc;
- if (auto Imp = importSeq(
- D->getBeginLoc(), D->getLocation(), D->getPropertyIvarDeclLoc()))
- std::tie(ToBeginLoc, ToLocation, ToPropertyIvarDeclLoc) = *Imp;
- else
- return Imp.takeError();
+
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, D->getBeginLoc());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToPropertyIvarDeclLoc =
+ importChecked(Err, D->getPropertyIvarDeclLoc());
+ if (Err)
+ return std::move(Err);
if (GetImportedOrCreateDecl(ToImpl, D, Importer.getToContext(), DC,
ToBeginLoc,
@@ -5119,20 +5159,16 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
// Import the type-constraint
if (const TypeConstraint *TC = D->getTypeConstraint()) {
- NestedNameSpecifierLoc ToNNS;
- DeclarationName ToName;
- SourceLocation ToNameLoc;
- NamedDecl *ToFoundDecl;
- ConceptDecl *ToNamedConcept;
- Expr *ToIDC;
- if (auto Imp = importSeq(TC->getNestedNameSpecifierLoc(),
- TC->getConceptNameInfo().getName(), TC->getConceptNameInfo().getLoc(),
- TC->getFoundDecl(), TC->getNamedConcept(),
- TC->getImmediatelyDeclaredConstraint()))
- std::tie(ToNNS, ToName, ToNameLoc, ToFoundDecl, ToNamedConcept,
- ToIDC) = *Imp;
- else
- return Imp.takeError();
+
+ Error Err = Error::success();
+ auto ToNNS = importChecked(Err, TC->getNestedNameSpecifierLoc());
+ auto ToName = importChecked(Err, TC->getConceptNameInfo().getName());
+ auto ToNameLoc = importChecked(Err, TC->getConceptNameInfo().getLoc());
+ auto ToFoundDecl = importChecked(Err, TC->getFoundDecl());
+ auto ToNamedConcept = importChecked(Err, TC->getNamedConcept());
+ auto ToIDC = importChecked(Err, TC->getImmediatelyDeclaredConstraint());
+ if (Err)
+ return std::move(Err);
TemplateArgumentListInfo ToTAInfo;
const auto *ASTTemplateArgs = TC->getTemplateArgsAsWritten();
@@ -5154,18 +5190,15 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
ExpectedDecl
ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
- DeclarationName ToDeclName;
- SourceLocation ToLocation, ToInnerLocStart;
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- if (auto Imp = importSeq(
- D->getDeclName(), D->getLocation(), D->getType(), D->getTypeSourceInfo(),
- D->getInnerLocStart()))
- std::tie(
- ToDeclName, ToLocation, ToType, ToTypeSourceInfo,
- ToInnerLocStart) = *Imp;
- else
- return Imp.takeError();
+
+ Error Err = Error::success();
+ auto ToDeclName = importChecked(Err, D->getDeclName());
+ auto ToLocation = importChecked(Err, D->getLocation());
+ auto ToType = importChecked(Err, D->getType());
+ auto ToTypeSourceInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
+ if (Err)
+ return std::move(Err);
// FIXME: Import default argument.
@@ -5245,7 +5278,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
Decl *Found = FoundDecl;
auto *FoundTemplate = dyn_cast<ClassTemplateDecl>(Found);
if (FoundTemplate) {
- if (!hasSameVisibilityContext(FoundTemplate, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundTemplate, D))
continue;
if (IsStructuralMatch(D, FoundTemplate)) {
@@ -5508,20 +5541,6 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
}
ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
- // If this variable has a definition in the translation unit we're coming
- // from,
- // but this particular declaration is not that definition, import the
- // definition and map to that.
- auto *Definition =
- cast_or_null<VarDecl>(D->getTemplatedDecl()->getDefinition());
- if (Definition && Definition != D->getTemplatedDecl()) {
- if (ExpectedDecl ImportedDefOrErr = import(
- Definition->getDescribedVarTemplate()))
- return Importer.MapImported(D, *ImportedDefOrErr);
- else
- return ImportedDefOrErr.takeError();
- }
-
// Import the major distinguishing characteristics of this variable template.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -5535,19 +5554,30 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
// We may already have a template of the same name; try to find and match it.
assert(!DC->isFunctionOrMethod() &&
"Variable templates cannot be declared at function scope");
+
SmallVector<NamedDecl *, 4> ConflictingDecls;
auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
+ VarTemplateDecl *FoundByLookup = nullptr;
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
- Decl *Found = FoundDecl;
- if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
+ if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(FoundDecl)) {
+ // Use the templated decl, some linkage flags are set only there.
+ if (!hasSameVisibilityContextAndLinkage(FoundTemplate->getTemplatedDecl(),
+ D->getTemplatedDecl()))
+ continue;
if (IsStructuralMatch(D, FoundTemplate)) {
- // The variable templates structurally match; call it the same template.
- Importer.MapImported(D->getTemplatedDecl(),
- FoundTemplate->getTemplatedDecl());
- return Importer.MapImported(D, FoundTemplate);
+ // The Decl in the "From" context has a definition, but in the
+ // "To" context we already have a definition.
+ VarTemplateDecl *FoundDef = getTemplateDefinition(FoundTemplate);
+ if (D->isThisDeclarationADefinition() && FoundDef)
+ // FIXME Check for ODR error if the two definitions have
+ // different initializers?
+ return Importer.MapImported(D, FoundDef);
+
+ FoundByLookup = FoundTemplate;
+ break;
}
ConflictingDecls.push_back(FoundDecl);
}
@@ -5592,6 +5622,18 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
ToVarTD->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToVarTD);
+ if (FoundByLookup) {
+ auto *Recent =
+ const_cast<VarTemplateDecl *>(FoundByLookup->getMostRecentDecl());
+ if (!ToTemplated->getPreviousDecl()) {
+ auto *PrevTemplated =
+ FoundByLookup->getTemplatedDecl()->getMostRecentDecl();
+ if (ToTemplated != PrevTemplated)
+ ToTemplated->setPreviousDecl(PrevTemplated);
+ }
+ ToVarTD->setPreviousDecl(Recent);
+ }
+
if (DTemplated->isThisDeclarationADefinition() &&
!ToTemplated->isThisDeclarationADefinition()) {
// FIXME: Import definition!
@@ -5773,7 +5815,7 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
continue;
if (auto *FoundTemplate = dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
- if (!hasSameVisibilityContext(FoundTemplate, D))
+ if (!hasSameVisibilityContextAndLinkage(FoundTemplate, D))
continue;
if (IsStructuralMatch(D, FoundTemplate)) {
FunctionTemplateDecl *TemplateWithDef =
@@ -5919,14 +5961,13 @@ ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitDeclStmt(DeclStmt *S) {
- auto Imp = importSeq(S->getDeclGroup(), S->getBeginLoc(), S->getEndLoc());
- if (!Imp)
- return Imp.takeError();
-
- DeclGroupRef ToDG;
- SourceLocation ToBeginLoc, ToEndLoc;
- std::tie(ToDG, ToBeginLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToDG = importChecked(Err, S->getDeclGroup());
+ auto ToBeginLoc = importChecked(Err, S->getBeginLoc());
+ auto ToEndLoc = importChecked(Err, S->getEndLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) DeclStmt(ToDG, ToBeginLoc, ToEndLoc);
}
@@ -5958,17 +5999,16 @@ ExpectedStmt ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
- auto Imp = importSeq(
- S->getLHS(), S->getRHS(), S->getSubStmt(), S->getCaseLoc(),
- S->getEllipsisLoc(), S->getColonLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToLHS, *ToRHS;
- Stmt *ToSubStmt;
- SourceLocation ToCaseLoc, ToEllipsisLoc, ToColonLoc;
- std::tie(ToLHS, ToRHS, ToSubStmt, ToCaseLoc, ToEllipsisLoc, ToColonLoc) =
- *Imp;
+
+ Error Err = Error::success();
+ auto ToLHS = importChecked(Err, S->getLHS());
+ auto ToRHS = importChecked(Err, S->getRHS());
+ auto ToSubStmt = importChecked(Err, S->getSubStmt());
+ auto ToCaseLoc = importChecked(Err, S->getCaseLoc());
+ auto ToEllipsisLoc = importChecked(Err, S->getEllipsisLoc());
+ auto ToColonLoc = importChecked(Err, S->getColonLoc());
+ if (Err)
+ return std::move(Err);
auto *ToStmt = CaseStmt::Create(Importer.getToContext(), ToLHS, ToRHS,
ToCaseLoc, ToEllipsisLoc, ToColonLoc);
@@ -5978,27 +6018,26 @@ ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) {
- auto Imp = importSeq(S->getDefaultLoc(), S->getColonLoc(), S->getSubStmt());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToDefaultLoc, ToColonLoc;
- Stmt *ToSubStmt;
- std::tie(ToDefaultLoc, ToColonLoc, ToSubStmt) = *Imp;
+ Error Err = Error::success();
+ auto ToDefaultLoc = importChecked(Err, S->getDefaultLoc());
+ auto ToColonLoc = importChecked(Err, S->getColonLoc());
+ auto ToSubStmt = importChecked(Err, S->getSubStmt());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) DefaultStmt(
ToDefaultLoc, ToColonLoc, ToSubStmt);
}
ExpectedStmt ASTNodeImporter::VisitLabelStmt(LabelStmt *S) {
- auto Imp = importSeq(S->getIdentLoc(), S->getDecl(), S->getSubStmt());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToIdentLoc;
- LabelDecl *ToLabelDecl;
- Stmt *ToSubStmt;
- std::tie(ToIdentLoc, ToLabelDecl, ToSubStmt) = *Imp;
+ Error Err = Error::success();
+ auto ToIdentLoc = importChecked(Err, S->getIdentLoc());
+ auto ToLabelDecl = importChecked(Err, S->getDecl());
+ auto ToSubStmt = importChecked(Err, S->getSubStmt());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) LabelStmt(
ToIdentLoc, ToLabelDecl, ToSubStmt);
@@ -6021,19 +6060,17 @@ ExpectedStmt ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) {
- auto Imp = importSeq(
- S->getIfLoc(), S->getInit(), S->getConditionVariable(), S->getCond(),
- S->getThen(), S->getElseLoc(), S->getElse());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToIfLoc, ToElseLoc;
- Stmt *ToInit, *ToThen, *ToElse;
- VarDecl *ToConditionVariable;
- Expr *ToCond;
- std::tie(
- ToIfLoc, ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc, ToElse) =
- *Imp;
+
+ Error Err = Error::success();
+ auto ToIfLoc = importChecked(Err, S->getIfLoc());
+ auto ToInit = importChecked(Err, S->getInit());
+ auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToThen = importChecked(Err, S->getThen());
+ auto ToElseLoc = importChecked(Err, S->getElseLoc());
+ auto ToElse = importChecked(Err, S->getElse());
+ if (Err)
+ return std::move(Err);
return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->isConstexpr(),
ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc,
@@ -6041,17 +6078,15 @@ ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
- auto Imp = importSeq(
- S->getInit(), S->getConditionVariable(), S->getCond(),
- S->getBody(), S->getSwitchLoc());
- if (!Imp)
- return Imp.takeError();
-
- Stmt *ToInit, *ToBody;
- VarDecl *ToConditionVariable;
- Expr *ToCond;
- SourceLocation ToSwitchLoc;
- std::tie(ToInit, ToConditionVariable, ToCond, ToBody, ToSwitchLoc) = *Imp;
+
+ Error Err = Error::success();
+ auto ToInit = importChecked(Err, S->getInit());
+ auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToSwitchLoc = importChecked(Err, S->getSwitchLoc());
+ if (Err)
+ return std::move(Err);
auto *ToStmt = SwitchStmt::Create(Importer.getToContext(), ToInit,
ToConditionVariable, ToCond);
@@ -6076,52 +6111,49 @@ ExpectedStmt ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitWhileStmt(WhileStmt *S) {
- auto Imp = importSeq(
- S->getConditionVariable(), S->getCond(), S->getBody(), S->getWhileLoc());
- if (!Imp)
- return Imp.takeError();
- VarDecl *ToConditionVariable;
- Expr *ToCond;
- Stmt *ToBody;
- SourceLocation ToWhileLoc;
- std::tie(ToConditionVariable, ToCond, ToBody, ToWhileLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToWhileLoc = importChecked(Err, S->getWhileLoc());
+ auto ToLParenLoc = importChecked(Err, S->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return WhileStmt::Create(Importer.getToContext(), ToConditionVariable, ToCond,
- ToBody, ToWhileLoc);
+ ToBody, ToWhileLoc, ToLParenLoc, ToRParenLoc);
}
ExpectedStmt ASTNodeImporter::VisitDoStmt(DoStmt *S) {
- auto Imp = importSeq(
- S->getBody(), S->getCond(), S->getDoLoc(), S->getWhileLoc(),
- S->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
- Stmt *ToBody;
- Expr *ToCond;
- SourceLocation ToDoLoc, ToWhileLoc, ToRParenLoc;
- std::tie(ToBody, ToCond, ToDoLoc, ToWhileLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToDoLoc = importChecked(Err, S->getDoLoc());
+ auto ToWhileLoc = importChecked(Err, S->getWhileLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) DoStmt(
ToBody, ToCond, ToDoLoc, ToWhileLoc, ToRParenLoc);
}
ExpectedStmt ASTNodeImporter::VisitForStmt(ForStmt *S) {
- auto Imp = importSeq(
- S->getInit(), S->getCond(), S->getConditionVariable(), S->getInc(),
- S->getBody(), S->getForLoc(), S->getLParenLoc(), S->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- Stmt *ToInit;
- Expr *ToCond, *ToInc;
- VarDecl *ToConditionVariable;
- Stmt *ToBody;
- SourceLocation ToForLoc, ToLParenLoc, ToRParenLoc;
- std::tie(
- ToInit, ToCond, ToConditionVariable, ToInc, ToBody, ToForLoc,
- ToLParenLoc, ToRParenLoc) = *Imp;
+
+ Error Err = Error::success();
+ auto ToInit = importChecked(Err, S->getInit());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToConditionVariable = importChecked(Err, S->getConditionVariable());
+ auto ToInc = importChecked(Err, S->getInc());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToForLoc = importChecked(Err, S->getForLoc());
+ auto ToLParenLoc = importChecked(Err, S->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ForStmt(
Importer.getToContext(),
@@ -6130,26 +6162,26 @@ ExpectedStmt ASTNodeImporter::VisitForStmt(ForStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitGotoStmt(GotoStmt *S) {
- auto Imp = importSeq(S->getLabel(), S->getGotoLoc(), S->getLabelLoc());
- if (!Imp)
- return Imp.takeError();
- LabelDecl *ToLabel;
- SourceLocation ToGotoLoc, ToLabelLoc;
- std::tie(ToLabel, ToGotoLoc, ToLabelLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToLabel = importChecked(Err, S->getLabel());
+ auto ToGotoLoc = importChecked(Err, S->getGotoLoc());
+ auto ToLabelLoc = importChecked(Err, S->getLabelLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) GotoStmt(
ToLabel, ToGotoLoc, ToLabelLoc);
}
ExpectedStmt ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
- auto Imp = importSeq(S->getGotoLoc(), S->getStarLoc(), S->getTarget());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToGotoLoc, ToStarLoc;
- Expr *ToTarget;
- std::tie(ToGotoLoc, ToStarLoc, ToTarget) = *Imp;
+ Error Err = Error::success();
+ auto ToGotoLoc = importChecked(Err, S->getGotoLoc());
+ auto ToStarLoc = importChecked(Err, S->getStarLoc());
+ auto ToTarget = importChecked(Err, S->getTarget());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) IndirectGotoStmt(
ToGotoLoc, ToStarLoc, ToTarget);
@@ -6170,30 +6202,26 @@ ExpectedStmt ASTNodeImporter::VisitBreakStmt(BreakStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) {
- auto Imp = importSeq(
- S->getReturnLoc(), S->getRetValue(), S->getNRVOCandidate());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToReturnLoc;
- Expr *ToRetValue;
- const VarDecl *ToNRVOCandidate;
- std::tie(ToReturnLoc, ToRetValue, ToNRVOCandidate) = *Imp;
+ Error Err = Error::success();
+ auto ToReturnLoc = importChecked(Err, S->getReturnLoc());
+ auto ToRetValue = importChecked(Err, S->getRetValue());
+ auto ToNRVOCandidate = importChecked(Err, S->getNRVOCandidate());
+ if (Err)
+ return std::move(Err);
return ReturnStmt::Create(Importer.getToContext(), ToReturnLoc, ToRetValue,
ToNRVOCandidate);
}
ExpectedStmt ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) {
- auto Imp = importSeq(
- S->getCatchLoc(), S->getExceptionDecl(), S->getHandlerBlock());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToCatchLoc;
- VarDecl *ToExceptionDecl;
- Stmt *ToHandlerBlock;
- std::tie(ToCatchLoc, ToExceptionDecl, ToHandlerBlock) = *Imp;
+ Error Err = Error::success();
+ auto ToCatchLoc = importChecked(Err, S->getCatchLoc());
+ auto ToExceptionDecl = importChecked(Err, S->getExceptionDecl());
+ auto ToHandlerBlock = importChecked(Err, S->getHandlerBlock());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXCatchStmt (
ToCatchLoc, ToExceptionDecl, ToHandlerBlock);
@@ -6222,24 +6250,22 @@ ExpectedStmt ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
- auto Imp1 = importSeq(
- S->getInit(), S->getRangeStmt(), S->getBeginStmt(), S->getEndStmt(),
- S->getCond(), S->getInc(), S->getLoopVarStmt(), S->getBody());
- if (!Imp1)
- return Imp1.takeError();
- auto Imp2 = importSeq(
- S->getForLoc(), S->getCoawaitLoc(), S->getColonLoc(), S->getRParenLoc());
- if (!Imp2)
- return Imp2.takeError();
-
- DeclStmt *ToRangeStmt, *ToBeginStmt, *ToEndStmt, *ToLoopVarStmt;
- Expr *ToCond, *ToInc;
- Stmt *ToInit, *ToBody;
- std::tie(
- ToInit, ToRangeStmt, ToBeginStmt, ToEndStmt, ToCond, ToInc, ToLoopVarStmt,
- ToBody) = *Imp1;
- SourceLocation ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc;
- std::tie(ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc) = *Imp2;
+
+ Error Err = Error::success();
+ auto ToInit = importChecked(Err, S->getInit());
+ auto ToRangeStmt = importChecked(Err, S->getRangeStmt());
+ auto ToBeginStmt = importChecked(Err, S->getBeginStmt());
+ auto ToEndStmt = importChecked(Err, S->getEndStmt());
+ auto ToCond = importChecked(Err, S->getCond());
+ auto ToInc = importChecked(Err, S->getInc());
+ auto ToLoopVarStmt = importChecked(Err, S->getLoopVarStmt());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToForLoc = importChecked(Err, S->getForLoc());
+ auto ToCoawaitLoc = importChecked(Err, S->getCoawaitLoc());
+ auto ToColonLoc = importChecked(Err, S->getColonLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXForRangeStmt(
ToInit, ToRangeStmt, ToBeginStmt, ToEndStmt, ToCond, ToInc, ToLoopVarStmt,
@@ -6248,16 +6274,14 @@ ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
ExpectedStmt
ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
- auto Imp = importSeq(
- S->getElement(), S->getCollection(), S->getBody(),
- S->getForLoc(), S->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- Stmt *ToElement, *ToBody;
- Expr *ToCollection;
- SourceLocation ToForLoc, ToRParenLoc;
- std::tie(ToElement, ToCollection, ToBody, ToForLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToElement = importChecked(Err, S->getElement());
+ auto ToCollection = importChecked(Err, S->getCollection());
+ auto ToBody = importChecked(Err, S->getBody());
+ auto ToForLoc = importChecked(Err, S->getForLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ObjCForCollectionStmt(ToElement,
ToCollection,
@@ -6267,16 +6291,14 @@ ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
- auto Imp = importSeq(
- S->getAtCatchLoc(), S->getRParenLoc(), S->getCatchParamDecl(),
- S->getCatchBody());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToAtCatchLoc, ToRParenLoc;
- VarDecl *ToCatchParamDecl;
- Stmt *ToCatchBody;
- std::tie(ToAtCatchLoc, ToRParenLoc, ToCatchParamDecl, ToCatchBody) = *Imp;
+ Error Err = Error::success();
+ auto ToAtCatchLoc = importChecked(Err, S->getAtCatchLoc());
+ auto ToRParenLoc = importChecked(Err, S->getRParenLoc());
+ auto ToCatchParamDecl = importChecked(Err, S->getCatchParamDecl());
+ auto ToCatchBody = importChecked(Err, S->getCatchBody());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ObjCAtCatchStmt (
ToAtCatchLoc, ToRParenLoc, ToCatchParamDecl, ToCatchBody);
@@ -6294,14 +6316,13 @@ ExpectedStmt ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
}
ExpectedStmt ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
- auto Imp = importSeq(
- S->getAtTryLoc(), S->getTryBody(), S->getFinallyStmt());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToAtTryLoc;
- Stmt *ToTryBody, *ToFinallyStmt;
- std::tie(ToAtTryLoc, ToTryBody, ToFinallyStmt) = *Imp;
+ Error Err = Error::success();
+ auto ToAtTryLoc = importChecked(Err, S->getAtTryLoc());
+ auto ToTryBody = importChecked(Err, S->getTryBody());
+ auto ToFinallyStmt = importChecked(Err, S->getFinallyStmt());
+ if (Err)
+ return std::move(Err);
SmallVector<Stmt *, 1> ToCatchStmts(S->getNumCatchStmts());
for (unsigned CI = 0, CE = S->getNumCatchStmts(); CI != CE; ++CI) {
@@ -6318,17 +6339,15 @@ ExpectedStmt ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
ToFinallyStmt);
}
-ExpectedStmt ASTNodeImporter::VisitObjCAtSynchronizedStmt
- (ObjCAtSynchronizedStmt *S) {
- auto Imp = importSeq(
- S->getAtSynchronizedLoc(), S->getSynchExpr(), S->getSynchBody());
- if (!Imp)
- return Imp.takeError();
+ExpectedStmt
+ASTNodeImporter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
- SourceLocation ToAtSynchronizedLoc;
- Expr *ToSynchExpr;
- Stmt *ToSynchBody;
- std::tie(ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody) = *Imp;
+ Error Err = Error::success();
+ auto ToAtSynchronizedLoc = importChecked(Err, S->getAtSynchronizedLoc());
+ auto ToSynchExpr = importChecked(Err, S->getSynchExpr());
+ auto ToSynchBody = importChecked(Err, S->getSynchBody());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ObjCAtSynchronizedStmt(
ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody);
@@ -6367,18 +6386,15 @@ ExpectedStmt ASTNodeImporter::VisitExpr(Expr *E) {
}
ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
- auto Imp = importSeq(
- E->getBuiltinLoc(), E->getSubExpr(), E->getWrittenTypeInfo(),
- E->getRParenLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToBuiltinLoc, ToRParenLoc;
- Expr *ToSubExpr;
- TypeSourceInfo *ToWrittenTypeInfo;
- QualType ToType;
- std::tie(ToBuiltinLoc, ToSubExpr, ToWrittenTypeInfo, ToRParenLoc, ToType) =
- *Imp;
+
+ Error Err = Error::success();
+ auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToWrittenTypeInfo = importChecked(Err, E->getWrittenTypeInfo());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) VAArgExpr(
ToBuiltinLoc, ToSubExpr, ToWrittenTypeInfo, ToRParenLoc, ToType,
@@ -6386,31 +6402,27 @@ ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitChooseExpr(ChooseExpr *E) {
- auto Imp = importSeq(E->getCond(), E->getLHS(), E->getRHS(),
- E->getBuiltinLoc(), E->getRParenLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCond;
- Expr *ToLHS;
- Expr *ToRHS;
- SourceLocation ToBuiltinLoc, ToRParenLoc;
- QualType ToType;
- std::tie(ToCond, ToLHS, ToRHS, ToBuiltinLoc, ToRParenLoc, ToType) = *Imp;
+
+ Error Err = Error::success();
+ auto ToCond = importChecked(Err, E->getCond());
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
ExprValueKind VK = E->getValueKind();
ExprObjectKind OK = E->getObjectKind();
- bool TypeDependent = ToCond->isTypeDependent();
- bool ValueDependent = ToCond->isValueDependent();
-
// The value of CondIsTrue only matters if the value is not
// condition-dependent.
bool CondIsTrue = !E->isConditionDependent() && E->isConditionTrue();
return new (Importer.getToContext())
ChooseExpr(ToBuiltinLoc, ToCond, ToLHS, ToRHS, ToType, VK, OK,
- ToRParenLoc, CondIsTrue, TypeDependent, ValueDependent);
+ ToRParenLoc, CondIsTrue);
}
ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
@@ -6426,33 +6438,28 @@ ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
- auto Imp = importSeq(
- E->getBeginLoc(), E->getType(), E->getFunctionName());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToBeginLoc;
- QualType ToType;
- StringLiteral *ToFunctionName;
- std::tie(ToBeginLoc, ToType, ToFunctionName) = *Imp;
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToFunctionName = importChecked(Err, E->getFunctionName());
+ if (Err)
+ return std::move(Err);
return PredefinedExpr::Create(Importer.getToContext(), ToBeginLoc, ToType,
E->getIdentKind(), ToFunctionName);
}
ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
- auto Imp = importSeq(
- E->getQualifierLoc(), E->getTemplateKeywordLoc(), E->getDecl(),
- E->getLocation(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- NestedNameSpecifierLoc ToQualifierLoc;
- SourceLocation ToTemplateKeywordLoc, ToLocation;
- ValueDecl *ToDecl;
- QualType ToType;
- std::tie(ToQualifierLoc, ToTemplateKeywordLoc, ToDecl, ToLocation, ToType) =
- *Imp;
+
+ Error Err = Error::success();
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToDecl = importChecked(Err, E->getDecl());
+ auto ToLocation = importChecked(Err, E->getLocation());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
NamedDecl *ToFoundD = nullptr;
if (E->getDecl() != E->getFoundDecl()) {
@@ -6572,6 +6579,20 @@ ExpectedStmt ASTNodeImporter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
*ToSubExprOrErr, *ToTypeOrErr);
}
+ExpectedStmt ASTNodeImporter::VisitFixedPointLiteral(FixedPointLiteral *E) {
+ auto ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return new (Importer.getToContext()) FixedPointLiteral(
+ Importer.getToContext(), E->getValue(), *ToTypeOrErr, *ToLocationOrErr,
+ Importer.getToContext().getFixedPointScale(*ToTypeOrErr));
+}
+
ExpectedStmt ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
ExpectedType ToTypeOrErr = import(E->getType());
if (!ToTypeOrErr)
@@ -6601,17 +6622,14 @@ ExpectedStmt ASTNodeImporter::VisitStringLiteral(StringLiteral *E) {
}
ExpectedStmt ASTNodeImporter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
- auto Imp = importSeq(
- E->getLParenLoc(), E->getTypeSourceInfo(), E->getType(),
- E->getInitializer());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLParenLoc;
- TypeSourceInfo *ToTypeSourceInfo;
- QualType ToType;
- Expr *ToInitializer;
- std::tie(ToLParenLoc, ToTypeSourceInfo, ToType, ToInitializer) = *Imp;
+
+ Error Err = Error::success();
+ auto ToLParenLoc = importChecked(Err, E->getLParenLoc());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToInitializer = importChecked(Err, E->getInitializer());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CompoundLiteralExpr(
ToLParenLoc, ToTypeSourceInfo, ToType, E->getValueKind(),
@@ -6619,14 +6637,13 @@ ExpectedStmt ASTNodeImporter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitAtomicExpr(AtomicExpr *E) {
- auto Imp = importSeq(
- E->getBuiltinLoc(), E->getType(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
- SourceLocation ToBuiltinLoc, ToRParenLoc;
- QualType ToType;
- std::tie(ToBuiltinLoc, ToType, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 6> ToExprs(E->getNumSubExprs());
if (Error Err = ImportArrayChecked(
@@ -6635,33 +6652,30 @@ ExpectedStmt ASTNodeImporter::VisitAtomicExpr(AtomicExpr *E) {
return std::move(Err);
return new (Importer.getToContext()) AtomicExpr(
+
ToBuiltinLoc, ToExprs, ToType, E->getOp(), ToRParenLoc);
}
ExpectedStmt ASTNodeImporter::VisitAddrLabelExpr(AddrLabelExpr *E) {
- auto Imp = importSeq(
- E->getAmpAmpLoc(), E->getLabelLoc(), E->getLabel(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToAmpAmpLoc, ToLabelLoc;
- LabelDecl *ToLabel;
- QualType ToType;
- std::tie(ToAmpAmpLoc, ToLabelLoc, ToLabel, ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToAmpAmpLoc = importChecked(Err, E->getAmpAmpLoc());
+ auto ToLabelLoc = importChecked(Err, E->getLabelLoc());
+ auto ToLabel = importChecked(Err, E->getLabel());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) AddrLabelExpr(
ToAmpAmpLoc, ToLabelLoc, ToLabel, ToType);
}
-
ExpectedStmt ASTNodeImporter::VisitConstantExpr(ConstantExpr *E) {
- auto Imp = importSeq(E->getSubExpr());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToSubExpr;
- std::tie(ToSubExpr) = *Imp;
+ Error Err = Error::success();
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ if (Err)
+ return std::move(Err);
// TODO : Handle APValue::ValueKind that require importing.
+
APValue::ValueKind Kind = E->getResultAPValueKind();
if (Kind == APValue::Int || Kind == APValue::Float ||
Kind == APValue::FixedPoint || Kind == APValue::ComplexFloat ||
@@ -6670,15 +6684,13 @@ ExpectedStmt ASTNodeImporter::VisitConstantExpr(ConstantExpr *E) {
E->getAPValueResult());
return ConstantExpr::Create(Importer.getToContext(), ToSubExpr);
}
-
ExpectedStmt ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
- auto Imp = importSeq(E->getLParen(), E->getRParen(), E->getSubExpr());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLParen, ToRParen;
- Expr *ToSubExpr;
- std::tie(ToLParen, ToRParen, ToSubExpr) = *Imp;
+ Error Err = Error::success();
+ auto ToLParen = importChecked(Err, E->getLParen());
+ auto ToRParen = importChecked(Err, E->getRParen());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext())
ParenExpr(ToLParen, ToRParen, ToSubExpr);
@@ -6702,45 +6714,42 @@ ExpectedStmt ASTNodeImporter::VisitParenListExpr(ParenListExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitStmtExpr(StmtExpr *E) {
- auto Imp = importSeq(
- E->getSubStmt(), E->getType(), E->getLParenLoc(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- CompoundStmt *ToSubStmt;
- QualType ToType;
- SourceLocation ToLParenLoc, ToRParenLoc;
- std::tie(ToSubStmt, ToType, ToLParenLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToSubStmt = importChecked(Err, E->getSubStmt());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToLParenLoc = importChecked(Err, E->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
- return new (Importer.getToContext()) StmtExpr(
- ToSubStmt, ToType, ToLParenLoc, ToRParenLoc);
+ return new (Importer.getToContext())
+ StmtExpr(ToSubStmt, ToType, ToLParenLoc, ToRParenLoc,
+ E->getTemplateDepth());
}
ExpectedStmt ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
- auto Imp = importSeq(
- E->getSubExpr(), E->getType(), E->getOperatorLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToSubExpr;
- QualType ToType;
- SourceLocation ToOperatorLoc;
- std::tie(ToSubExpr, ToType, ToOperatorLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ if (Err)
+ return std::move(Err);
- return new (Importer.getToContext()) UnaryOperator(
- ToSubExpr, E->getOpcode(), ToType, E->getValueKind(), E->getObjectKind(),
- ToOperatorLoc, E->canOverflow());
+ return UnaryOperator::Create(
+ Importer.getToContext(), ToSubExpr, E->getOpcode(), ToType,
+ E->getValueKind(), E->getObjectKind(), ToOperatorLoc, E->canOverflow(),
+ E->getFPOptionsOverride());
}
ExpectedStmt
-ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
- auto Imp = importSeq(E->getType(), E->getOperatorLoc(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
- QualType ToType;
- SourceLocation ToOperatorLoc, ToRParenLoc;
- std::tie(ToType, ToOperatorLoc, ToRParenLoc) = *Imp;
+ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
if (E->isArgumentType()) {
Expected<TypeSourceInfo *> ToArgumentTypeInfoOrErr =
@@ -6762,53 +6771,49 @@ ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
- auto Imp = importSeq(
- E->getLHS(), E->getRHS(), E->getType(), E->getOperatorLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToLHS, *ToRHS;
- QualType ToType;
- SourceLocation ToOperatorLoc;
- std::tie(ToLHS, ToRHS, ToType, ToOperatorLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ if (Err)
+ return std::move(Err);
- return new (Importer.getToContext()) BinaryOperator(
- ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(),
- E->getObjectKind(), ToOperatorLoc, E->getFPFeatures());
+ return BinaryOperator::Create(
+ Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType,
+ E->getValueKind(), E->getObjectKind(), ToOperatorLoc,
+ E->getFPFeatures(Importer.getFromContext().getLangOpts()));
}
ExpectedStmt ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) {
- auto Imp = importSeq(
- E->getCond(), E->getQuestionLoc(), E->getLHS(), E->getColonLoc(),
- E->getRHS(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCond, *ToLHS, *ToRHS;
- SourceLocation ToQuestionLoc, ToColonLoc;
- QualType ToType;
- std::tie(ToCond, ToQuestionLoc, ToLHS, ToColonLoc, ToRHS, ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToCond = importChecked(Err, E->getCond());
+ auto ToQuestionLoc = importChecked(Err, E->getQuestionLoc());
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToColonLoc = importChecked(Err, E->getColonLoc());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ConditionalOperator(
ToCond, ToQuestionLoc, ToLHS, ToColonLoc, ToRHS, ToType,
E->getValueKind(), E->getObjectKind());
}
-ExpectedStmt ASTNodeImporter::VisitBinaryConditionalOperator(
- BinaryConditionalOperator *E) {
- auto Imp = importSeq(
- E->getCommon(), E->getOpaqueValue(), E->getCond(), E->getTrueExpr(),
- E->getFalseExpr(), E->getQuestionLoc(), E->getColonLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCommon, *ToCond, *ToTrueExpr, *ToFalseExpr;
- OpaqueValueExpr *ToOpaqueValue;
- SourceLocation ToQuestionLoc, ToColonLoc;
- QualType ToType;
- std::tie(
- ToCommon, ToOpaqueValue, ToCond, ToTrueExpr, ToFalseExpr, ToQuestionLoc,
- ToColonLoc, ToType) = *Imp;
+ExpectedStmt
+ASTNodeImporter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ Error Err = Error::success();
+ auto ToCommon = importChecked(Err, E->getCommon());
+ auto ToOpaqueValue = importChecked(Err, E->getOpaqueValue());
+ auto ToCond = importChecked(Err, E->getCond());
+ auto ToTrueExpr = importChecked(Err, E->getTrueExpr());
+ auto ToFalseExpr = importChecked(Err, E->getFalseExpr());
+ auto ToQuestionLoc = importChecked(Err, E->getQuestionLoc());
+ auto ToColonLoc = importChecked(Err, E->getColonLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) BinaryConditionalOperator(
ToCommon, ToOpaqueValue, ToCond, ToTrueExpr, ToFalseExpr,
@@ -6817,19 +6822,15 @@ ExpectedStmt ASTNodeImporter::VisitBinaryConditionalOperator(
}
ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
- auto Imp = importSeq(
- E->getBeginLoc(), E->getQueriedTypeSourceInfo(),
- E->getDimensionExpression(), E->getEndLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToBeginLoc, ToEndLoc;
- TypeSourceInfo *ToQueriedTypeSourceInfo;
- Expr *ToDimensionExpression;
- QualType ToType;
- std::tie(
- ToBeginLoc, ToQueriedTypeSourceInfo, ToDimensionExpression, ToEndLoc,
- ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToQueriedTypeSourceInfo =
+ importChecked(Err, E->getQueriedTypeSourceInfo());
+ auto ToDimensionExpression = importChecked(Err, E->getDimensionExpression());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ArrayTypeTraitExpr(
ToBeginLoc, E->getTrait(), ToQueriedTypeSourceInfo, E->getValue(),
@@ -6837,15 +6838,13 @@ ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
- auto Imp = importSeq(
- E->getBeginLoc(), E->getQueriedExpression(), E->getEndLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToBeginLoc, ToEndLoc;
- Expr *ToQueriedExpression;
- QualType ToType;
- std::tie(ToBeginLoc, ToQueriedExpression, ToEndLoc, ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToQueriedExpression = importChecked(Err, E->getQueriedExpression());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ExpressionTraitExpr(
ToBeginLoc, E->getTrait(), ToQueriedExpression, E->getValue(),
@@ -6853,30 +6852,25 @@ ExpectedStmt ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
- auto Imp = importSeq(
- E->getLocation(), E->getType(), E->getSourceExpr());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLocation;
- QualType ToType;
- Expr *ToSourceExpr;
- std::tie(ToLocation, ToType, ToSourceExpr) = *Imp;
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, E->getLocation());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToSourceExpr = importChecked(Err, E->getSourceExpr());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) OpaqueValueExpr(
ToLocation, ToType, E->getValueKind(), E->getObjectKind(), ToSourceExpr);
}
ExpectedStmt ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
- auto Imp = importSeq(
- E->getLHS(), E->getRHS(), E->getType(), E->getRBracketLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToLHS, *ToRHS;
- SourceLocation ToRBracketLoc;
- QualType ToType;
- std::tie(ToLHS, ToRHS, ToType, ToRBracketLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToRBracketLoc = importChecked(Err, E->getRBracketLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ArraySubscriptExpr(
ToLHS, ToRHS, ToType, E->getValueKind(), E->getObjectKind(),
@@ -6885,22 +6879,22 @@ ExpectedStmt ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
ExpectedStmt
ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
- auto Imp = importSeq(
- E->getLHS(), E->getRHS(), E->getType(), E->getComputationLHSType(),
- E->getComputationResultType(), E->getOperatorLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToLHS, *ToRHS;
- QualType ToType, ToComputationLHSType, ToComputationResultType;
- SourceLocation ToOperatorLoc;
- std::tie(ToLHS, ToRHS, ToType, ToComputationLHSType, ToComputationResultType,
- ToOperatorLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToLHS = importChecked(Err, E->getLHS());
+ auto ToRHS = importChecked(Err, E->getRHS());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToComputationLHSType = importChecked(Err, E->getComputationLHSType());
+ auto ToComputationResultType =
+ importChecked(Err, E->getComputationResultType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ if (Err)
+ return std::move(Err);
- return new (Importer.getToContext()) CompoundAssignOperator(
- ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(),
- E->getObjectKind(), ToComputationLHSType, ToComputationResultType,
- ToOperatorLoc, E->getFPFeatures());
+ return CompoundAssignOperator::Create(
+ Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType,
+ E->getValueKind(), E->getObjectKind(), ToOperatorLoc,
+ E->getFPFeatures(Importer.getFromContext().getLangOpts()),
+ ToComputationLHSType, ToComputationResultType);
}
Expected<CXXCastPath>
@@ -6934,15 +6928,12 @@ ExpectedStmt ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
- auto Imp1 = importSeq(
- E->getType(), E->getSubExpr(), E->getTypeInfoAsWritten());
- if (!Imp1)
- return Imp1.takeError();
-
- QualType ToType;
- Expr *ToSubExpr;
- TypeSourceInfo *ToTypeInfoAsWritten;
- std::tie(ToType, ToSubExpr, ToTypeInfoAsWritten) = *Imp1;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToTypeInfoAsWritten = importChecked(Err, E->getTypeInfoAsWritten());
+ if (Err)
+ return std::move(Err);
Expected<CXXCastPath> ToBasePathOrErr = ImportCastPath(E);
if (!ToBasePathOrErr)
@@ -7002,11 +6993,13 @@ ExpectedStmt ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *E) {
const OffsetOfNode &FromNode = E->getComponent(I);
SourceLocation ToBeginLoc, ToEndLoc;
+
if (FromNode.getKind() != OffsetOfNode::Base) {
- auto Imp = importSeq(FromNode.getBeginLoc(), FromNode.getEndLoc());
- if (!Imp)
- return Imp.takeError();
- std::tie(ToBeginLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ ToBeginLoc = importChecked(Err, FromNode.getBeginLoc());
+ ToEndLoc = importChecked(Err, FromNode.getEndLoc());
+ if (Err)
+ return std::move(Err);
}
switch (FromNode.getKind()) {
@@ -7044,16 +7037,13 @@ ExpectedStmt ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *E) {
ToExprs[I] = *ToIndexExprOrErr;
}
- auto Imp = importSeq(
- E->getType(), E->getTypeSourceInfo(), E->getOperatorLoc(),
- E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToOperatorLoc, ToRParenLoc;
- std::tie(ToType, ToTypeSourceInfo, ToOperatorLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return OffsetOfExpr::Create(
Importer.getToContext(), ToType, ToOperatorLoc, ToTypeSourceInfo, ToNodes,
@@ -7061,15 +7051,13 @@ ExpectedStmt ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getOperand(), E->getBeginLoc(), E->getEndLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToOperand;
- SourceLocation ToBeginLoc, ToEndLoc;
- std::tie(ToType, ToOperand, ToBeginLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperand = importChecked(Err, E->getOperand());
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ if (Err)
+ return std::move(Err);
CanThrowResult ToCanThrow;
if (E->isValueDependent())
@@ -7082,14 +7070,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXThrowExpr(CXXThrowExpr *E) {
- auto Imp = importSeq(E->getSubExpr(), E->getType(), E->getThrowLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToSubExpr;
- QualType ToType;
- SourceLocation ToThrowLoc;
- std::tie(ToSubExpr, ToType, ToThrowLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToThrowLoc = importChecked(Err, E->getThrowLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXThrowExpr(
ToSubExpr, ToType, ToThrowLoc, E->isThrownVariableInScope());
@@ -7129,15 +7115,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
ExpectedStmt
ASTNodeImporter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getTypeSourceInfo(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceLocation ToRParenLoc;
- std::tie(ToType, ToTypeSourceInfo, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXScalarValueInitExpr(
ToType, ToTypeSourceInfo, ToRParenLoc);
@@ -7159,18 +7142,15 @@ ASTNodeImporter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
}
ExpectedStmt
+
ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
- auto Imp = importSeq(
- E->getConstructor(), E->getType(), E->getTypeSourceInfo(),
- E->getParenOrBraceRange());
- if (!Imp)
- return Imp.takeError();
-
- CXXConstructorDecl *ToConstructor;
- QualType ToType;
- TypeSourceInfo *ToTypeSourceInfo;
- SourceRange ToParenOrBraceRange;
- std::tie(ToConstructor, ToType, ToTypeSourceInfo, ToParenOrBraceRange) = *Imp;
+ Error Err = Error::success();
+ auto ToConstructor = importChecked(Err, E->getConstructor());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ auto ToParenOrBraceRange = importChecked(Err, E->getParenOrBraceRange());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 8> ToArgs(E->getNumArgs());
if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
@@ -7189,14 +7169,11 @@ ExpectedDecl ASTNodeImporter::VisitLifetimeExtendedTemporaryDecl(
if (Error Err = ImportDeclContext(D, DC, LexicalDC))
return std::move(Err);
- auto Imp = importSeq(D->getTemporaryExpr(), D->getExtendingDecl());
- // FIXME: the APValue should be imported as well if present.
- if (!Imp)
- return Imp.takeError();
-
- Expr *Temporary;
- ValueDecl *ExtendingDecl;
- std::tie(Temporary, ExtendingDecl) = *Imp;
+ Error Err = Error::success();
+ auto Temporary = importChecked(Err, D->getTemporaryExpr());
+ auto ExtendingDecl = importChecked(Err, D->getExtendingDecl());
+ if (Err)
+ return std::move(Err);
// FIXME: Should ManglingNumber get numbers associated with 'to' context?
LifetimeExtendedTemporaryDecl *To;
@@ -7211,17 +7188,15 @@ ExpectedDecl ASTNodeImporter::VisitLifetimeExtendedTemporaryDecl(
ExpectedStmt
ASTNodeImporter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
- auto Imp = importSeq(E->getType(),
- E->getLifetimeExtendedTemporaryDecl() ? nullptr
- : E->getSubExpr(),
- E->getLifetimeExtendedTemporaryDecl());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToTemporaryExpr;
- LifetimeExtendedTemporaryDecl *ToMaterializedDecl;
- std::tie(ToType, ToTemporaryExpr, ToMaterializedDecl) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ Expr *ToTemporaryExpr = importChecked(
+ Err, E->getLifetimeExtendedTemporaryDecl() ? nullptr : E->getSubExpr());
+ auto ToMaterializedDecl =
+ importChecked(Err, E->getLifetimeExtendedTemporaryDecl());
+ if (Err)
+ return std::move(Err);
+
if (!ToTemporaryExpr)
ToTemporaryExpr = cast<Expr>(ToMaterializedDecl->getTemporaryExpr());
@@ -7233,29 +7208,25 @@ ASTNodeImporter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitPackExpansionExpr(PackExpansionExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getPattern(), E->getEllipsisLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToPattern;
- SourceLocation ToEllipsisLoc;
- std::tie(ToType, ToPattern, ToEllipsisLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToPattern = importChecked(Err, E->getPattern());
+ auto ToEllipsisLoc = importChecked(Err, E->getEllipsisLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) PackExpansionExpr(
ToType, ToPattern, ToEllipsisLoc, E->getNumExpansions());
}
ExpectedStmt ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
- auto Imp = importSeq(
- E->getOperatorLoc(), E->getPack(), E->getPackLoc(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToOperatorLoc, ToPackLoc, ToRParenLoc;
- NamedDecl *ToPack;
- std::tie(ToOperatorLoc, ToPack, ToPackLoc, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToPack = importChecked(Err, E->getPack());
+ auto ToPackLoc = importChecked(Err, E->getPackLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
Optional<unsigned> Length;
if (!E->isValueDependent())
@@ -7277,23 +7248,19 @@ ExpectedStmt ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
ExpectedStmt ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *E) {
- auto Imp = importSeq(
- E->getOperatorNew(), E->getOperatorDelete(), E->getTypeIdParens(),
- E->getArraySize(), E->getInitializer(), E->getType(),
- E->getAllocatedTypeSourceInfo(), E->getSourceRange(),
- E->getDirectInitRange());
- if (!Imp)
- return Imp.takeError();
-
- FunctionDecl *ToOperatorNew, *ToOperatorDelete;
- SourceRange ToTypeIdParens, ToSourceRange, ToDirectInitRange;
- Optional<Expr *> ToArraySize;
- Expr *ToInitializer;
- QualType ToType;
- TypeSourceInfo *ToAllocatedTypeSourceInfo;
- std::tie(
- ToOperatorNew, ToOperatorDelete, ToTypeIdParens, ToArraySize, ToInitializer,
- ToType, ToAllocatedTypeSourceInfo, ToSourceRange, ToDirectInitRange) = *Imp;
+ Error Err = Error::success();
+ auto ToOperatorNew = importChecked(Err, E->getOperatorNew());
+ auto ToOperatorDelete = importChecked(Err, E->getOperatorDelete());
+ auto ToTypeIdParens = importChecked(Err, E->getTypeIdParens());
+ auto ToArraySize = importChecked(Err, E->getArraySize());
+ auto ToInitializer = importChecked(Err, E->getInitializer());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToAllocatedTypeSourceInfo =
+ importChecked(Err, E->getAllocatedTypeSourceInfo());
+ auto ToSourceRange = importChecked(Err, E->getSourceRange());
+ auto ToDirectInitRange = importChecked(Err, E->getDirectInitRange());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 4> ToPlacementArgs(E->getNumPlacementArgs());
if (Error Err =
@@ -7309,16 +7276,13 @@ ExpectedStmt ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getOperatorDelete(), E->getArgument(), E->getBeginLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- FunctionDecl *ToOperatorDelete;
- Expr *ToArgument;
- SourceLocation ToBeginLoc;
- std::tie(ToType, ToOperatorDelete, ToArgument, ToBeginLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorDelete = importChecked(Err, E->getOperatorDelete());
+ auto ToArgument = importChecked(Err, E->getArgument());
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXDeleteExpr(
ToType, E->isGlobalDelete(), E->isArrayForm(), E->isArrayFormAsWritten(),
@@ -7327,17 +7291,13 @@ ExpectedStmt ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getLocation(), E->getConstructor(),
- E->getParenOrBraceRange());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- SourceLocation ToLocation;
- CXXConstructorDecl *ToConstructor;
- SourceRange ToParenOrBraceRange;
- std::tie(ToType, ToLocation, ToConstructor, ToParenOrBraceRange) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToLocation = importChecked(Err, E->getLocation());
+ auto ToConstructor = importChecked(Err, E->getConstructor());
+ auto ToParenOrBraceRange = importChecked(Err, E->getParenOrBraceRange());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 6> ToArgs(E->getNumArgs());
if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
@@ -7366,15 +7326,12 @@ ExpectedStmt ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
- auto Imp = importSeq(
- E->getCallee(), E->getType(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCallee;
- QualType ToType;
- SourceLocation ToRParenLoc;
- std::tie(ToCallee, ToType, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToCallee = importChecked(Err, E->getCallee());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 4> ToArgs(E->getNumArgs());
if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
@@ -7411,30 +7368,18 @@ ExpectedStmt ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
- auto Imp1 = importSeq(
- E->getBase(), E->getOperatorLoc(), E->getQualifierLoc(),
- E->getTemplateKeywordLoc(), E->getMemberDecl(), E->getType());
- if (!Imp1)
- return Imp1.takeError();
-
- Expr *ToBase;
- SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- ValueDecl *ToMemberDecl;
- QualType ToType;
- std::tie(
- ToBase, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc, ToMemberDecl,
- ToType) = *Imp1;
-
- auto Imp2 = importSeq(
- E->getFoundDecl().getDecl(), E->getMemberNameInfo().getName(),
- E->getMemberNameInfo().getLoc(), E->getLAngleLoc(), E->getRAngleLoc());
- if (!Imp2)
- return Imp2.takeError();
- NamedDecl *ToDecl;
- DeclarationName ToName;
- SourceLocation ToLoc, ToLAngleLoc, ToRAngleLoc;
- std::tie(ToDecl, ToName, ToLoc, ToLAngleLoc, ToRAngleLoc) = *Imp2;
+ Error Err = Error::success();
+ auto ToBase = importChecked(Err, E->getBase());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToMemberDecl = importChecked(Err, E->getMemberDecl());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToDecl = importChecked(Err, E->getFoundDecl().getDecl());
+ auto ToName = importChecked(Err, E->getMemberNameInfo().getName());
+ auto ToLoc = importChecked(Err, E->getMemberNameInfo().getLoc());
+ if (Err)
+ return std::move(Err);
DeclAccessPair ToFoundDecl =
DeclAccessPair::make(ToDecl, E->getFoundDecl().getAccess());
@@ -7459,19 +7404,15 @@ ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
ExpectedStmt
ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
- auto Imp = importSeq(
- E->getBase(), E->getOperatorLoc(), E->getQualifierLoc(),
- E->getScopeTypeInfo(), E->getColonColonLoc(), E->getTildeLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToBase;
- SourceLocation ToOperatorLoc, ToColonColonLoc, ToTildeLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- TypeSourceInfo *ToScopeTypeInfo;
- std::tie(
- ToBase, ToOperatorLoc, ToQualifierLoc, ToScopeTypeInfo, ToColonColonLoc,
- ToTildeLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToBase = importChecked(Err, E->getBase());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToScopeTypeInfo = importChecked(Err, E->getScopeTypeInfo());
+ auto ToColonColonLoc = importChecked(Err, E->getColonColonLoc());
+ auto ToTildeLoc = importChecked(Err, E->getTildeLoc());
+ if (Err)
+ return std::move(Err);
PseudoDestructorTypeStorage Storage;
if (IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) {
@@ -7494,19 +7435,15 @@ ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
ExpectedStmt ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
CXXDependentScopeMemberExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getOperatorLoc(), E->getQualifierLoc(),
- E->getTemplateKeywordLoc(), E->getFirstQualifierFoundInScope());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- NamedDecl *ToFirstQualifierFoundInScope;
- std::tie(
- ToType, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc,
- ToFirstQualifierFoundInScope) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToFirstQualifierFoundInScope =
+ importChecked(Err, E->getFirstQualifierFoundInScope());
+ if (Err)
+ return std::move(Err);
Expr *ToBase = nullptr;
if (!E->isImplicitAccess()) {
@@ -7517,22 +7454,23 @@ ExpectedStmt ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
}
TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr;
+
if (E->hasExplicitTemplateArgs()) {
- if (Error Err = ImportTemplateArgumentListInfo(
- E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(),
- ToTAInfo))
+ if (Error Err =
+ ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(),
+ E->template_arguments(), ToTAInfo))
return std::move(Err);
ResInfo = &ToTAInfo;
}
+ auto ToMember = importChecked(Err, E->getMember());
+ auto ToMemberLoc = importChecked(Err, E->getMemberLoc());
+ if (Err)
+ return std::move(Err);
+ DeclarationNameInfo ToMemberNameInfo(ToMember, ToMemberLoc);
- auto ToMemberNameInfoOrErr = importSeq(E->getMember(), E->getMemberLoc());
- if (!ToMemberNameInfoOrErr)
- return ToMemberNameInfoOrErr.takeError();
- DeclarationNameInfo ToMemberNameInfo(
- std::get<0>(*ToMemberNameInfoOrErr), std::get<1>(*ToMemberNameInfoOrErr));
// Import additional name location/type info.
- if (Error Err = ImportDeclarationNameLoc(
- E->getMemberNameInfo(), ToMemberNameInfo))
+ if (Error Err =
+ ImportDeclarationNameLoc(E->getMemberNameInfo(), ToMemberNameInfo))
return std::move(Err);
return CXXDependentScopeMemberExpr::Create(
@@ -7543,17 +7481,15 @@ ExpectedStmt ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
ExpectedStmt
ASTNodeImporter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
- auto Imp = importSeq(E->getQualifierLoc(), E->getTemplateKeywordLoc(),
- E->getDeclName(), E->getNameInfo().getLoc(),
- E->getLAngleLoc(), E->getRAngleLoc());
- if (!Imp)
- return Imp.takeError();
-
- NestedNameSpecifierLoc ToQualifierLoc;
- SourceLocation ToTemplateKeywordLoc, ToNameLoc, ToLAngleLoc, ToRAngleLoc;
- DeclarationName ToDeclName;
- std::tie(ToQualifierLoc, ToTemplateKeywordLoc, ToDeclName, ToNameLoc,
- ToLAngleLoc, ToRAngleLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToDeclName = importChecked(Err, E->getDeclName());
+ auto ToNameLoc = importChecked(Err, E->getNameInfo().getLoc());
+ auto ToLAngleLoc = importChecked(Err, E->getLAngleLoc());
+ auto ToRAngleLoc = importChecked(Err, E->getRAngleLoc());
+ if (Err)
+ return std::move(Err);
DeclarationNameInfo ToNameInfo(ToDeclName, ToNameLoc);
if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
@@ -7575,14 +7511,12 @@ ASTNodeImporter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
ExpectedStmt ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
CXXUnresolvedConstructExpr *E) {
- auto Imp = importSeq(
- E->getLParenLoc(), E->getRParenLoc(), E->getTypeSourceInfo());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLParenLoc, ToRParenLoc;
- TypeSourceInfo *ToTypeSourceInfo;
- std::tie(ToLParenLoc, ToRParenLoc, ToTypeSourceInfo) = *Imp;
+ Error Err = Error::success();
+ auto ToLParenLoc = importChecked(Err, E->getLParenLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToTypeSourceInfo = importChecked(Err, E->getTypeSourceInfo());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 8> ToArgs(E->arg_size());
if (Error Err =
@@ -7604,11 +7538,13 @@ ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
if (!ToQualifierLocOrErr)
return ToQualifierLocOrErr.takeError();
- auto ToNameInfoOrErr = importSeq(E->getName(), E->getNameLoc());
- if (!ToNameInfoOrErr)
- return ToNameInfoOrErr.takeError();
- DeclarationNameInfo ToNameInfo(
- std::get<0>(*ToNameInfoOrErr), std::get<1>(*ToNameInfoOrErr));
+ Error Err = Error::success();
+ auto ToName = importChecked(Err, E->getName());
+ auto ToNameLoc = importChecked(Err, E->getNameLoc());
+ if (Err)
+ return std::move(Err);
+ DeclarationNameInfo ToNameInfo(ToName, ToNameLoc);
+
// Import additional name location/type info.
if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
return std::move(Err);
@@ -7645,21 +7581,17 @@ ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
ExpectedStmt
ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
- auto Imp1 = importSeq(
- E->getType(), E->getOperatorLoc(), E->getQualifierLoc(),
- E->getTemplateKeywordLoc());
- if (!Imp1)
- return Imp1.takeError();
-
- QualType ToType;
- SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
- NestedNameSpecifierLoc ToQualifierLoc;
- std::tie(ToType, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc) = *Imp1;
-
- auto Imp2 = importSeq(E->getName(), E->getNameLoc());
- if (!Imp2)
- return Imp2.takeError();
- DeclarationNameInfo ToNameInfo(std::get<0>(*Imp2), std::get<1>(*Imp2));
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToQualifierLoc = importChecked(Err, E->getQualifierLoc());
+ auto ToTemplateKeywordLoc = importChecked(Err, E->getTemplateKeywordLoc());
+ auto ToName = importChecked(Err, E->getName());
+ auto ToNameLoc = importChecked(Err, E->getNameLoc());
+ if (Err)
+ return std::move(Err);
+
+ DeclarationNameInfo ToNameInfo(ToName, ToNameLoc);
// Import additional name location/type info.
if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
return std::move(Err);
@@ -7696,14 +7628,12 @@ ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCallExpr(CallExpr *E) {
- auto Imp = importSeq(E->getCallee(), E->getType(), E->getRParenLoc());
- if (!Imp)
- return Imp.takeError();
-
- Expr *ToCallee;
- QualType ToType;
- SourceLocation ToRParenLoc;
- std::tie(ToCallee, ToType, ToRParenLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToCallee = importChecked(Err, E->getCallee());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ if (Err)
+ return std::move(Err);
unsigned NumArgs = E->getNumArgs();
llvm::SmallVector<Expr *, 2> ToArgs(NumArgs);
@@ -7733,44 +7663,32 @@ ExpectedStmt ASTNodeImporter::VisitLambdaExpr(LambdaExpr *E) {
if (!ToCallOpOrErr)
return ToCallOpOrErr.takeError();
- SmallVector<LambdaCapture, 8> ToCaptures;
- ToCaptures.reserve(E->capture_size());
- for (const auto &FromCapture : E->captures()) {
- if (auto ToCaptureOrErr = import(FromCapture))
- ToCaptures.push_back(*ToCaptureOrErr);
- else
- return ToCaptureOrErr.takeError();
- }
-
SmallVector<Expr *, 8> ToCaptureInits(E->capture_size());
if (Error Err = ImportContainerChecked(E->capture_inits(), ToCaptureInits))
return std::move(Err);
- auto Imp = importSeq(
- E->getIntroducerRange(), E->getCaptureDefaultLoc(), E->getEndLoc());
- if (!Imp)
- return Imp.takeError();
-
- SourceRange ToIntroducerRange;
- SourceLocation ToCaptureDefaultLoc, ToEndLoc;
- std::tie(ToIntroducerRange, ToCaptureDefaultLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToIntroducerRange = importChecked(Err, E->getIntroducerRange());
+ auto ToCaptureDefaultLoc = importChecked(Err, E->getCaptureDefaultLoc());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ if (Err)
+ return std::move(Err);
- return LambdaExpr::Create(
- Importer.getToContext(), ToClass, ToIntroducerRange,
- E->getCaptureDefault(), ToCaptureDefaultLoc, ToCaptures,
- E->hasExplicitParameters(), E->hasExplicitResultType(), ToCaptureInits,
- ToEndLoc, E->containsUnexpandedParameterPack());
+ return LambdaExpr::Create(Importer.getToContext(), ToClass, ToIntroducerRange,
+ E->getCaptureDefault(), ToCaptureDefaultLoc,
+ E->hasExplicitParameters(),
+ E->hasExplicitResultType(), ToCaptureInits,
+ ToEndLoc, E->containsUnexpandedParameterPack());
}
ExpectedStmt ASTNodeImporter::VisitInitListExpr(InitListExpr *E) {
- auto Imp = importSeq(E->getLBraceLoc(), E->getRBraceLoc(), E->getType());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLBraceLoc, ToRBraceLoc;
- QualType ToType;
- std::tie(ToLBraceLoc, ToRBraceLoc, ToType) = *Imp;
+ Error Err = Error::success();
+ auto ToLBraceLoc = importChecked(Err, E->getLBraceLoc());
+ auto ToRBraceLoc = importChecked(Err, E->getRBraceLoc());
+ auto ToType = importChecked(Err, E->getType());
+ if (Err)
+ return std::move(Err);
SmallVector<Expr *, 4> ToExprs(E->getNumInits());
if (Error Err = ImportContainerChecked(E->inits(), ToExprs))
@@ -7825,14 +7743,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXStdInitializerListExpr(
ExpectedStmt ASTNodeImporter::VisitCXXInheritedCtorInitExpr(
CXXInheritedCtorInitExpr *E) {
- auto Imp = importSeq(E->getLocation(), E->getType(), E->getConstructor());
- if (!Imp)
- return Imp.takeError();
-
- SourceLocation ToLocation;
- QualType ToType;
- CXXConstructorDecl *ToConstructor;
- std::tie(ToLocation, ToType, ToConstructor) = *Imp;
+ Error Err = Error::success();
+ auto ToLocation = importChecked(Err, E->getLocation());
+ auto ToType = importChecked(Err, E->getType());
+ auto ToConstructor = importChecked(Err, E->getConstructor());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) CXXInheritedCtorInitExpr(
ToLocation, ToType, ToConstructor, E->constructsVBase(),
@@ -7840,13 +7756,12 @@ ExpectedStmt ASTNodeImporter::VisitCXXInheritedCtorInitExpr(
}
ExpectedStmt ASTNodeImporter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
- auto Imp = importSeq(E->getType(), E->getCommonExpr(), E->getSubExpr());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToCommonExpr, *ToSubExpr;
- std::tie(ToType, ToCommonExpr, ToSubExpr) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToCommonExpr = importChecked(Err, E->getCommonExpr());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) ArrayInitLoopExpr(
ToType, ToCommonExpr, ToSubExpr);
@@ -7877,20 +7792,15 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
}
ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getSubExpr(), E->getTypeInfoAsWritten(),
- E->getOperatorLoc(), E->getRParenLoc(), E->getAngleBrackets());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- Expr *ToSubExpr;
- TypeSourceInfo *ToTypeInfoAsWritten;
- SourceLocation ToOperatorLoc, ToRParenLoc;
- SourceRange ToAngleBrackets;
- std::tie(
- ToType, ToSubExpr, ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc,
- ToAngleBrackets) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToSubExpr = importChecked(Err, E->getSubExpr());
+ auto ToTypeInfoAsWritten = importChecked(Err, E->getTypeInfoAsWritten());
+ auto ToOperatorLoc = importChecked(Err, E->getOperatorLoc());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToAngleBrackets = importChecked(Err, E->getAngleBrackets());
+ if (Err)
+ return std::move(Err);
ExprValueKind VK = E->getValueKind();
CastKind CK = E->getCastKind();
@@ -7922,30 +7832,25 @@ ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getExprLoc(), E->getParameter(), E->getReplacement());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- SourceLocation ToExprLoc;
- NonTypeTemplateParmDecl *ToParameter;
- Expr *ToReplacement;
- std::tie(ToType, ToExprLoc, ToParameter, ToReplacement) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToExprLoc = importChecked(Err, E->getExprLoc());
+ auto ToParameter = importChecked(Err, E->getParameter());
+ auto ToReplacement = importChecked(Err, E->getReplacement());
+ if (Err)
+ return std::move(Err);
return new (Importer.getToContext()) SubstNonTypeTemplateParmExpr(
ToType, E->getValueKind(), ToExprLoc, ToParameter, ToReplacement);
}
ExpectedStmt ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) {
- auto Imp = importSeq(
- E->getType(), E->getBeginLoc(), E->getEndLoc());
- if (!Imp)
- return Imp.takeError();
-
- QualType ToType;
- SourceLocation ToBeginLoc, ToEndLoc;
- std::tie(ToType, ToBeginLoc, ToEndLoc) = *Imp;
+ Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToEndLoc = importChecked(Err, E->getEndLoc());
+ if (Err)
+ return std::move(Err);
SmallVector<TypeSourceInfo *, 4> ToArgs(E->getNumArgs());
if (Error Err = ImportContainerChecked(E->getArgs(), ToArgs))
@@ -8087,6 +7992,18 @@ void ASTImporter::RegisterImportedDecl(Decl *FromD, Decl *ToD) {
MapImported(FromD, ToD);
}
+llvm::Expected<ExprWithCleanups::CleanupObject>
+ASTImporter::Import(ExprWithCleanups::CleanupObject From) {
+ if (auto *CLE = From.dyn_cast<CompoundLiteralExpr *>()) {
+ if (Expected<Expr *> R = Import(CLE))
+ return ExprWithCleanups::CleanupObject(cast<CompoundLiteralExpr>(*R));
+ }
+
+ // FIXME: Handle BlockDecl when we implement importing BlockExpr in
+ // ASTNodeImporter.
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
+}
+
Expected<QualType> ASTImporter::Import(QualType FromT) {
if (FromT.isNull())
return QualType{};
@@ -8128,12 +8045,47 @@ Expected<TypeSourceInfo *> ASTImporter::Import(TypeSourceInfo *FromTSI) {
}
Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
- Attr *ToAttr = FromAttr->clone(ToContext);
- if (auto ToRangeOrErr = Import(FromAttr->getRange()))
- ToAttr->setRange(*ToRangeOrErr);
- else
- return ToRangeOrErr.takeError();
+ Attr *ToAttr = nullptr;
+ SourceRange ToRange;
+ if (Error Err = importInto(ToRange, FromAttr->getRange()))
+ return std::move(Err);
+ // FIXME: Is there some kind of AttrVisitor to use here?
+ switch (FromAttr->getKind()) {
+ case attr::Aligned: {
+ auto *From = cast<AlignedAttr>(FromAttr);
+ AlignedAttr *To;
+ auto CreateAlign = [&](bool IsAlignmentExpr, void *Alignment) {
+ return AlignedAttr::Create(ToContext, IsAlignmentExpr, Alignment, ToRange,
+ From->getSyntax(),
+ From->getSemanticSpelling());
+ };
+ if (From->isAlignmentExpr()) {
+ if (auto ToEOrErr = Import(From->getAlignmentExpr()))
+ To = CreateAlign(true, *ToEOrErr);
+ else
+ return ToEOrErr.takeError();
+ } else {
+ if (auto ToTOrErr = Import(From->getAlignmentType()))
+ To = CreateAlign(false, *ToTOrErr);
+ else
+ return ToTOrErr.takeError();
+ }
+ To->setInherited(From->isInherited());
+ To->setPackExpansion(From->isPackExpansion());
+ To->setImplicit(From->isImplicit());
+ ToAttr = To;
+ break;
+ }
+ default:
+ // FIXME: 'clone' copies every member but some of them should be imported.
+ // Handle other Attrs that have parameters that should be imported.
+ ToAttr = FromAttr->clone(ToContext);
+ ToAttr->setRange(ToRange);
+ break;
+ }
+ assert(ToAttr && "Attribute should be created.");
+
return ToAttr;
}
@@ -8241,7 +8193,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// FIXME Should we remove these Decls from the LookupTable,
// and from ImportedFromDecls?
}
- SavedImportPaths[FromD].clear();
+ SavedImportPaths.erase(FromD);
// Do not return ToDOrErr, error was taken out of it.
return make_error<ImportError>(ErrOut);
@@ -8274,7 +8226,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
Imported(FromD, ToD);
updateFlags(FromD, ToD);
- SavedImportPaths[FromD].clear();
+ SavedImportPaths.erase(FromD);
return ToDOrErr;
}
@@ -8291,15 +8243,22 @@ Expected<DeclContext *> ASTImporter::ImportContext(DeclContext *FromDC) {
// need it to have a definition.
if (auto *ToRecord = dyn_cast<RecordDecl>(ToDC)) {
auto *FromRecord = cast<RecordDecl>(FromDC);
- if (ToRecord->isCompleteDefinition()) {
- // Do nothing.
- } else if (FromRecord->isCompleteDefinition()) {
+ if (ToRecord->isCompleteDefinition())
+ return ToDC;
+
+ // If FromRecord is not defined we need to force it to be.
+ // Simply calling CompleteDecl(...) for a RecordDecl will break some cases
+ // it will start the definition but we never finish it.
+ // If there are base classes they won't be imported and we will
+ // be missing anything that we inherit from those bases.
+ if (FromRecord->getASTContext().getExternalSource() &&
+ !FromRecord->isCompleteDefinition())
+ FromRecord->getASTContext().getExternalSource()->CompleteType(FromRecord);
+
+ if (FromRecord->isCompleteDefinition())
if (Error Err = ASTNodeImporter(*this).ImportDefinition(
FromRecord, ToRecord, ASTNodeImporter::IDK_Basic))
return std::move(Err);
- } else {
- CompleteDecl(ToRecord);
- }
} else if (auto *ToEnum = dyn_cast<EnumDecl>(ToDC)) {
auto *FromEnum = cast<EnumDecl>(FromDC);
if (ToEnum->isCompleteDefinition()) {
@@ -8366,11 +8325,7 @@ Expected<Stmt *> ASTImporter::Import(Stmt *FromS) {
// constructors.
ToE->setValueKind(FromE->getValueKind());
ToE->setObjectKind(FromE->getObjectKind());
- ToE->setTypeDependent(FromE->isTypeDependent());
- ToE->setValueDependent(FromE->isValueDependent());
- ToE->setInstantiationDependent(FromE->isInstantiationDependent());
- ToE->setContainsUnexpandedParameterPack(
- FromE->containsUnexpandedParameterPack());
+ ToE->setDependence(FromE->getDependence());
}
// Record the imported statement object.
@@ -8672,12 +8627,21 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
} else {
const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
- if (!IsBuiltin) {
+ if (!IsBuiltin && !Cache->BufferOverridden) {
// Include location of this file.
ExpectedSLoc ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
if (!ToIncludeLoc)
return ToIncludeLoc.takeError();
+ // Every FileID that is not the main FileID needs to have a valid include
+ // location so that the include chain points to the main FileID. When
+ // importing the main FileID (which has no include location), we need to
+ // create a fake include location in the main file to keep this property
+ // intact.
+ SourceLocation ToIncludeLocOrFakeLoc = *ToIncludeLoc;
+ if (FromID == FromSM.getMainFileID())
+ ToIncludeLocOrFakeLoc = ToSM.getLocForStartOfFile(ToSM.getMainFileID());
+
if (Cache->OrigEntry && Cache->OrigEntry->getDir()) {
// FIXME: We probably want to use getVirtualFile(), so we don't hit the
// disk again
@@ -8689,7 +8653,7 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
// point to a valid file and we get no Entry here. In this case try with
// the memory buffer below.
if (Entry)
- ToID = ToSM.createFileID(*Entry, *ToIncludeLoc,
+ ToID = ToSM.createFileID(*Entry, ToIncludeLocOrFakeLoc,
FromSLoc.getFile().getFileCharacteristic());
}
}
diff --git a/clang/lib/AST/ASTImporterLookupTable.cpp b/clang/lib/AST/ASTImporterLookupTable.cpp
index 7390329d4ed8..4d6fff8f3419 100644
--- a/clang/lib/AST/ASTImporterLookupTable.cpp
+++ b/clang/lib/AST/ASTImporterLookupTable.cpp
@@ -45,7 +45,11 @@ struct Builder : RecursiveASTVisitor<Builder> {
LT.add(RTy->getAsCXXRecordDecl());
else if (const auto *SpecTy = dyn_cast<TemplateSpecializationType>(Ty))
LT.add(SpecTy->getAsCXXRecordDecl());
- else if (isa<TypedefType>(Ty)) {
+ else if (const auto *SubstTy =
+ dyn_cast<SubstTemplateTypeParmType>(Ty)) {
+ if (SubstTy->getAsCXXRecordDecl())
+ LT.add(SubstTy->getAsCXXRecordDecl());
+ } else if (isa<TypedefType>(Ty)) {
// We do not put friend typedefs to the lookup table because
// ASTImporter does not organize typedefs into redecl chains.
} else {
diff --git a/clang/lib/AST/ASTStructuralEquivalence.cpp b/clang/lib/AST/ASTStructuralEquivalence.cpp
index db48405055cd..8b5b2444f1e2 100644
--- a/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -31,10 +31,9 @@
// }
// ```
// Indeed, it has it's queue, which holds pairs of nodes, one from each graph,
-// this is the `DeclsToCheck` and it's pair is in `TentativeEquivalences`.
-// `TentativeEquivalences` also plays the role of the marking (`marked`)
-// functionality above, we use it to check whether we've already seen a pair of
-// nodes.
+// this is the `DeclsToCheck` member. `VisitedDecls` plays the role of the
+// marking (`marked`) functionality above, we use it to check whether we've
+// already seen a pair of nodes.
//
// We put in the elements into the queue only in the toplevel decl check
// function:
@@ -57,11 +56,6 @@
// doing. Thus, static implementation functions must not call the **member**
// functions.
//
-// So, now `TentativeEquivalences` plays two roles. It is used to store the
-// second half of the decls which we want to compare, plus it plays a role in
-// closing the recursion. On a long term, we could refactor structural
-// equivalency to be more alike to the traditional BFS.
-//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTStructuralEquivalence.h"
@@ -623,6 +617,34 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
}
+ case Type::DependentSizedMatrix: {
+ const DependentSizedMatrixType *Mat1 = cast<DependentSizedMatrixType>(T1);
+ const DependentSizedMatrixType *Mat2 = cast<DependentSizedMatrixType>(T2);
+ // The element types, row and column expressions must be structurally
+ // equivalent.
+ if (!IsStructurallyEquivalent(Context, Mat1->getRowExpr(),
+ Mat2->getRowExpr()) ||
+ !IsStructurallyEquivalent(Context, Mat1->getColumnExpr(),
+ Mat2->getColumnExpr()) ||
+ !IsStructurallyEquivalent(Context, Mat1->getElementType(),
+ Mat2->getElementType()))
+ return false;
+ break;
+ }
+
+ case Type::ConstantMatrix: {
+ const ConstantMatrixType *Mat1 = cast<ConstantMatrixType>(T1);
+ const ConstantMatrixType *Mat2 = cast<ConstantMatrixType>(T2);
+ // The element types must be structurally equivalent and the number of rows
+ // and columns must match.
+ if (!IsStructurallyEquivalent(Context, Mat1->getElementType(),
+ Mat2->getElementType()) ||
+ Mat1->getNumRows() != Mat2->getNumRows() ||
+ Mat1->getNumColumns() != Mat2->getNumColumns())
+ return false;
+ break;
+ }
+
case Type::FunctionProto: {
const auto *Proto1 = cast<FunctionProtoType>(T1);
const auto *Proto2 = cast<FunctionProtoType>(T2);
@@ -729,11 +751,31 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
- case Type::Auto:
- if (!IsStructurallyEquivalent(Context, cast<AutoType>(T1)->getDeducedType(),
- cast<AutoType>(T2)->getDeducedType()))
+ case Type::Auto: {
+ auto *Auto1 = cast<AutoType>(T1);
+ auto *Auto2 = cast<AutoType>(T2);
+ if (!IsStructurallyEquivalent(Context, Auto1->getDeducedType(),
+ Auto2->getDeducedType()))
return false;
+ if (Auto1->isConstrained() != Auto2->isConstrained())
+ return false;
+ if (Auto1->isConstrained()) {
+ if (Auto1->getTypeConstraintConcept() !=
+ Auto2->getTypeConstraintConcept())
+ return false;
+ ArrayRef<TemplateArgument> Auto1Args =
+ Auto1->getTypeConstraintArguments();
+ ArrayRef<TemplateArgument> Auto2Args =
+ Auto2->getTypeConstraintArguments();
+ if (Auto1Args.size() != Auto2Args.size())
+ return false;
+ for (unsigned I = 0, N = Auto1Args.size(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context, Auto1Args[I], Auto2Args[I]))
+ return false;
+ }
+ }
break;
+ }
case Type::DeducedTemplateSpecialization: {
const auto *DT1 = cast<DeducedTemplateSpecializationType>(T1);
@@ -935,6 +977,24 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
cast<PipeType>(T2)->getElementType()))
return false;
break;
+ case Type::ExtInt: {
+ const auto *Int1 = cast<ExtIntType>(T1);
+ const auto *Int2 = cast<ExtIntType>(T2);
+
+ if (Int1->isUnsigned() != Int2->isUnsigned() ||
+ Int1->getNumBits() != Int2->getNumBits())
+ return false;
+ break;
+ }
+ case Type::DependentExtInt: {
+ const auto *Int1 = cast<DependentExtIntType>(T1);
+ const auto *Int2 = cast<DependentExtIntType>(T2);
+
+ if (Int1->isUnsigned() != Int2->isUnsigned() ||
+ !IsStructurallyEquivalent(Context, Int1->getNumBitsExpr(),
+ Int2->getNumBitsExpr()))
+ return false;
+ }
} // end switch
return true;
diff --git a/clang/lib/AST/ASTTypeTraits.cpp b/clang/lib/AST/ASTTypeTraits.cpp
index a5570c329ae8..34fc587694be 100644
--- a/clang/lib/AST/ASTTypeTraits.cpp
+++ b/clang/lib/AST/ASTTypeTraits.cpp
@@ -18,8 +18,7 @@
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
-namespace clang {
-namespace ast_type_traits {
+using namespace clang;
const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{ NKI_None, "<None>" },
@@ -28,6 +27,7 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{ NKI_None, "NestedNameSpecifierLoc" },
{ NKI_None, "QualType" },
{ NKI_None, "TypeLoc" },
+ { NKI_None, "CXXBaseSpecifier" },
{ NKI_None, "CXXCtorInitializer" },
{ NKI_None, "NestedNameSpecifier" },
{ NKI_None, "Decl" },
@@ -40,8 +40,8 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
#define TYPE(DERIVED, BASE) { NKI_##BASE, #DERIVED "Type" },
#include "clang/AST/TypeNodes.inc"
{ NKI_None, "OMPClause" },
-#define OPENMP_CLAUSE(TextualSpelling, Class) {NKI_OMPClause, #Class},
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) {NKI_OMPClause, #Class},
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
};
bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
@@ -112,15 +112,15 @@ ASTNodeKind ASTNodeKind::getFromNode(const Type &T) {
ASTNodeKind ASTNodeKind::getFromNode(const OMPClause &C) {
switch (C.getClauseKind()) {
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_##Name: return ASTNodeKind(NKI_##Class);
-#include "clang/Basic/OpenMPKinds.def"
- case OMPC_threadprivate:
- case OMPC_uniform:
- case OMPC_device_type:
- case OMPC_match:
- case OMPC_unknown:
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case llvm::omp::Clause::Enum: \
+ return ASTNodeKind(NKI_##Class);
+#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
+ case llvm::omp::Clause::Enum: \
llvm_unreachable("unexpected OpenMP clause kind");
+ default:
+ break;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
}
llvm_unreachable("invalid stmt kind");
}
@@ -152,13 +152,14 @@ void DynTypedNode::print(llvm::raw_ostream &OS,
OS << "Unable to print values of type " << NodeKind.asStringRef() << "\n";
}
-void DynTypedNode::dump(llvm::raw_ostream &OS, SourceManager &SM) const {
+void DynTypedNode::dump(llvm::raw_ostream &OS,
+ const ASTContext &Context) const {
if (const Decl *D = get<Decl>())
D->dump(OS);
else if (const Stmt *S = get<Stmt>())
- S->dump(OS, SM);
+ S->dump(OS, Context);
else if (const Type *T = get<Type>())
- T->dump(OS);
+ T->dump(OS, Context);
else
OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n";
}
@@ -178,6 +179,3 @@ SourceRange DynTypedNode::getSourceRange() const {
return SourceRange(C->getBeginLoc(), C->getEndLoc());
return SourceRange();
}
-
-} // end namespace ast_type_traits
-} // end namespace clang
diff --git a/clang/lib/AST/AttrImpl.cpp b/clang/lib/AST/AttrImpl.cpp
index 0ef925ec1c90..7818fbb1918b 100644
--- a/clang/lib/AST/AttrImpl.cpp
+++ b/clang/lib/AST/AttrImpl.cpp
@@ -16,4 +16,155 @@
#include "clang/AST/Type.h"
using namespace clang;
+void LoopHintAttr::printPrettyPragma(raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ unsigned SpellingIndex = getAttributeSpellingListIndex();
+ // For "#pragma unroll" and "#pragma nounroll" the string "unroll" or
+ // "nounroll" is already emitted as the pragma name.
+ if (SpellingIndex == Pragma_nounroll ||
+ SpellingIndex == Pragma_nounroll_and_jam)
+ return;
+ else if (SpellingIndex == Pragma_unroll ||
+ SpellingIndex == Pragma_unroll_and_jam) {
+ OS << ' ' << getValueString(Policy);
+ return;
+ }
+
+ assert(SpellingIndex == Pragma_clang_loop && "Unexpected spelling");
+ OS << ' ' << getOptionName(option) << getValueString(Policy);
+}
+
+// Return a string containing the loop hint argument including the
+// enclosing parentheses.
+std::string LoopHintAttr::getValueString(const PrintingPolicy &Policy) const {
+ std::string ValueName;
+ llvm::raw_string_ostream OS(ValueName);
+ OS << "(";
+ if (state == Numeric)
+ value->printPretty(OS, nullptr, Policy);
+ else if (state == Enable)
+ OS << "enable";
+ else if (state == Full)
+ OS << "full";
+ else if (state == AssumeSafety)
+ OS << "assume_safety";
+ else
+ OS << "disable";
+ OS << ")";
+ return OS.str();
+}
+
+// Return a string suitable for identifying this attribute in diagnostics.
+std::string
+LoopHintAttr::getDiagnosticName(const PrintingPolicy &Policy) const {
+ unsigned SpellingIndex = getAttributeSpellingListIndex();
+ if (SpellingIndex == Pragma_nounroll)
+ return "#pragma nounroll";
+ else if (SpellingIndex == Pragma_unroll)
+ return "#pragma unroll" +
+ (option == UnrollCount ? getValueString(Policy) : "");
+ else if (SpellingIndex == Pragma_nounroll_and_jam)
+ return "#pragma nounroll_and_jam";
+ else if (SpellingIndex == Pragma_unroll_and_jam)
+ return "#pragma unroll_and_jam" +
+ (option == UnrollAndJamCount ? getValueString(Policy) : "");
+
+ assert(SpellingIndex == Pragma_clang_loop && "Unexpected spelling");
+ return getOptionName(option) + getValueString(Policy);
+}
+
+void OMPDeclareSimdDeclAttr::printPrettyPragma(
+ raw_ostream &OS, const PrintingPolicy &Policy) const {
+ if (getBranchState() != BS_Undefined)
+ OS << ' ' << ConvertBranchStateTyToStr(getBranchState());
+ if (auto *E = getSimdlen()) {
+ OS << " simdlen(";
+ E->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ }
+ if (uniforms_size() > 0) {
+ OS << " uniform";
+ StringRef Sep = "(";
+ for (auto *E : uniforms()) {
+ OS << Sep;
+ E->printPretty(OS, nullptr, Policy);
+ Sep = ", ";
+ }
+ OS << ")";
+ }
+ alignments_iterator NI = alignments_begin();
+ for (auto *E : aligneds()) {
+ OS << " aligned(";
+ E->printPretty(OS, nullptr, Policy);
+ if (*NI) {
+ OS << ": ";
+ (*NI)->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")";
+ ++NI;
+ }
+ steps_iterator I = steps_begin();
+ modifiers_iterator MI = modifiers_begin();
+ for (auto *E : linears()) {
+ OS << " linear(";
+ if (*MI != OMPC_LINEAR_unknown)
+ OS << getOpenMPSimpleClauseTypeName(llvm::omp::Clause::OMPC_linear, *MI)
+ << "(";
+ E->printPretty(OS, nullptr, Policy);
+ if (*MI != OMPC_LINEAR_unknown)
+ OS << ")";
+ if (*I) {
+ OS << ": ";
+ (*I)->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")";
+ ++I;
+ ++MI;
+ }
+}
+
+void OMPDeclareTargetDeclAttr::printPrettyPragma(
+ raw_ostream &OS, const PrintingPolicy &Policy) const {
+ // Use fake syntax because it is for testing and debugging purpose only.
+ if (getDevType() != DT_Any)
+ OS << " device_type(" << ConvertDevTypeTyToStr(getDevType()) << ")";
+ if (getMapType() != MT_To)
+ OS << ' ' << ConvertMapTypeTyToStr(getMapType());
+}
+
+llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
+OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) {
+ if (!VD->hasAttrs())
+ return llvm::None;
+ if (const auto *Attr = VD->getAttr<OMPDeclareTargetDeclAttr>())
+ return Attr->getMapType();
+
+ return llvm::None;
+}
+
+llvm::Optional<OMPDeclareTargetDeclAttr::DevTypeTy>
+OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) {
+ if (!VD->hasAttrs())
+ return llvm::None;
+ if (const auto *Attr = VD->getAttr<OMPDeclareTargetDeclAttr>())
+ return Attr->getDevType();
+
+ return llvm::None;
+}
+
+namespace clang {
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI);
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI);
+}
+
+void OMPDeclareVariantAttr::printPrettyPragma(
+ raw_ostream &OS, const PrintingPolicy &Policy) const {
+ if (const Expr *E = getVariantFuncRef()) {
+ OS << "(";
+ E->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ }
+ OS << " match(" << traitInfos << ")";
+}
+
#include "clang/AST/AttrImpl.inc"
diff --git a/clang/lib/AST/CXXInheritance.cpp b/clang/lib/AST/CXXInheritance.cpp
index a3a3794b2edd..8af97119e3cf 100644
--- a/clang/lib/AST/CXXInheritance.cpp
+++ b/clang/lib/AST/CXXInheritance.cpp
@@ -147,37 +147,27 @@ CXXRecordDecl::isCurrentInstantiation(const DeclContext *CurContext) const {
return false;
}
-bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches,
- bool AllowShortCircuit) const {
+bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches) const {
SmallVector<const CXXRecordDecl*, 8> Queue;
const CXXRecordDecl *Record = this;
- bool AllMatches = true;
while (true) {
for (const auto &I : Record->bases()) {
const RecordType *Ty = I.getType()->getAs<RecordType>();
- if (!Ty) {
- if (AllowShortCircuit) return false;
- AllMatches = false;
- continue;
- }
+ if (!Ty)
+ return false;
CXXRecordDecl *Base =
cast_or_null<CXXRecordDecl>(Ty->getDecl()->getDefinition());
if (!Base ||
(Base->isDependentContext() &&
!Base->isCurrentInstantiation(Record))) {
- if (AllowShortCircuit) return false;
- AllMatches = false;
- continue;
+ return false;
}
Queue.push_back(Base);
- if (!BaseMatches(Base)) {
- if (AllowShortCircuit) return false;
- AllMatches = false;
- continue;
- }
+ if (!BaseMatches(Base))
+ return false;
}
if (Queue.empty())
@@ -185,7 +175,7 @@ bool CXXRecordDecl::forallBases(ForallBasesCallback BaseMatches,
Record = Queue.pop_back_val(); // not actually a queue.
}
- return AllMatches;
+ return true;
}
bool CXXBasePaths::lookupInBases(ASTContext &Context,
@@ -758,6 +748,8 @@ CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const {
return false;
};
+ // FIXME: IsHidden reads from Overriding from the middle of a remove_if
+ // over the same sequence! Is this guaranteed to work?
Overriding.erase(
std::remove_if(Overriding.begin(), Overriding.end(), IsHidden),
Overriding.end());
diff --git a/clang/lib/AST/CommentCommandTraits.cpp b/clang/lib/AST/CommentCommandTraits.cpp
index b306fcbb154f..bdc0dd47fb7d 100644
--- a/clang/lib/AST/CommentCommandTraits.cpp
+++ b/clang/lib/AST/CommentCommandTraits.cpp
@@ -8,6 +8,7 @@
#include "clang/AST/CommentCommandTraits.h"
#include "llvm/ADT/STLExtras.h"
+#include <cassert>
namespace clang {
namespace comments {
diff --git a/clang/lib/AST/CommentSema.cpp b/clang/lib/AST/CommentSema.cpp
index 53c1832d1dd2..7642e73fa171 100644
--- a/clang/lib/AST/CommentSema.cpp
+++ b/clang/lib/AST/CommentSema.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/CommentDiagnostic.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
@@ -134,7 +135,9 @@ void Sema::checkContainerDeclVerbatimLine(const BlockCommandComment *Comment) {
unsigned DiagSelect;
switch (Comment->getCommandID()) {
case CommandTraits::KCI_class:
- DiagSelect = (!isClassOrStructDecl() && !isClassTemplateDecl()) ? 1 : 0;
+ DiagSelect =
+ (!isClassOrStructOrTagTypedefDecl() && !isClassTemplateDecl()) ? 1
+ : 0;
// Allow @class command on @interface declarations.
// FIXME. Currently, \class and @class are indistinguishable. So,
// \class is also allowed on an @interface declaration
@@ -148,7 +151,7 @@ void Sema::checkContainerDeclVerbatimLine(const BlockCommandComment *Comment) {
DiagSelect = !isObjCProtocolDecl() ? 3 : 0;
break;
case CommandTraits::KCI_struct:
- DiagSelect = !isClassOrStructDecl() ? 4 : 0;
+ DiagSelect = !isClassOrStructOrTagTypedefDecl() ? 4 : 0;
break;
case CommandTraits::KCI_union:
DiagSelect = !isUnionDecl() ? 5 : 0;
@@ -688,7 +691,7 @@ void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) {
FD->doesThisDeclarationHaveABody())
return;
- const LangOptions &LO = FD->getASTContext().getLangOpts();
+ const LangOptions &LO = FD->getLangOpts();
const bool DoubleSquareBracket = LO.CPlusPlus14 || LO.C2x;
StringRef AttributeSpelling =
DoubleSquareBracket ? "[[deprecated]]" : "__attribute__((deprecated))";
@@ -935,15 +938,50 @@ bool Sema::isUnionDecl() {
return RD->isUnion();
return false;
}
+static bool isClassOrStructDeclImpl(const Decl *D) {
+ if (auto *record = dyn_cast_or_null<RecordDecl>(D))
+ return !record->isUnion();
+
+ return false;
+}
bool Sema::isClassOrStructDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
- return ThisDeclInfo->CurrentDecl &&
- isa<RecordDecl>(ThisDeclInfo->CurrentDecl) &&
- !isUnionDecl();
+
+ if (!ThisDeclInfo->CurrentDecl)
+ return false;
+
+ return isClassOrStructDeclImpl(ThisDeclInfo->CurrentDecl);
+}
+
+bool Sema::isClassOrStructOrTagTypedefDecl() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+
+ if (!ThisDeclInfo->CurrentDecl)
+ return false;
+
+ if (isClassOrStructDeclImpl(ThisDeclInfo->CurrentDecl))
+ return true;
+
+ if (auto *ThisTypedefDecl = dyn_cast<TypedefDecl>(ThisDeclInfo->CurrentDecl)) {
+ auto UnderlyingType = ThisTypedefDecl->getUnderlyingType();
+ if (auto ThisElaboratedType = dyn_cast<ElaboratedType>(UnderlyingType)) {
+ auto DesugaredType = ThisElaboratedType->desugar();
+ if (auto *DesugaredTypePtr = DesugaredType.getTypePtrOrNull()) {
+ if (auto *ThisRecordType = dyn_cast<RecordType>(DesugaredTypePtr)) {
+ return isClassOrStructDeclImpl(ThisRecordType->getAsRecordDecl());
+ }
+ }
+ }
+ }
+
+ return false;
}
bool Sema::isClassTemplateDecl() {
diff --git a/clang/lib/AST/ComparisonCategories.cpp b/clang/lib/AST/ComparisonCategories.cpp
index 07673230357f..6b6826c02a12 100644
--- a/clang/lib/AST/ComparisonCategories.cpp
+++ b/clang/lib/AST/ComparisonCategories.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ComparisonCategories.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Type.h"
diff --git a/clang/lib/AST/ComputeDependence.cpp b/clang/lib/AST/ComputeDependence.cpp
new file mode 100644
index 000000000000..2333993dbeb4
--- /dev/null
+++ b/clang/lib/AST/ComputeDependence.cpp
@@ -0,0 +1,796 @@
+//===- ComputeDependence.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ComputeDependence.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "llvm/ADT/ArrayRef.h"
+
+using namespace clang;
+
+ExprDependence clang::computeDependence(FullExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(OpaqueValueExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ if (auto *S = E->getSourceExpr())
+ D |= S->getDependence();
+ assert(!(D & ExprDependence::UnexpandedPack));
+ return D;
+}
+
+ExprDependence clang::computeDependence(ParenExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(UnaryOperator *E) {
+ return toExprDependence(E->getType()->getDependence()) |
+ E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(UnaryExprOrTypeTraitExpr *E) {
+ // Never type-dependent (C++ [temp.dep.expr]p3).
+ // Value-dependent if the argument is type-dependent.
+ if (E->isArgumentType())
+ return turnTypeToValueDependence(
+ toExprDependence(E->getArgumentType()->getDependence()));
+
+ auto ArgDeps = E->getArgumentExpr()->getDependence();
+ auto Deps = ArgDeps & ~ExprDependence::TypeValue;
+ // Value-dependent if the argument is type-dependent.
+ if (ArgDeps & ExprDependence::Type)
+ Deps |= ExprDependence::Value;
+ // Check to see if we are in the situation where alignof(decl) should be
+ // dependent because decl's alignment is dependent.
+ auto ExprKind = E->getKind();
+ if (ExprKind != UETT_AlignOf && ExprKind != UETT_PreferredAlignOf)
+ return Deps;
+ if ((Deps & ExprDependence::Value) && (Deps & ExprDependence::Instantiation))
+ return Deps;
+
+ auto *NoParens = E->getArgumentExpr()->IgnoreParens();
+ const ValueDecl *D = nullptr;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(NoParens))
+ D = DRE->getDecl();
+ else if (const auto *ME = dyn_cast<MemberExpr>(NoParens))
+ D = ME->getMemberDecl();
+ if (!D)
+ return Deps;
+ for (const auto *I : D->specific_attrs<AlignedAttr>()) {
+ if (I->isAlignmentErrorDependent())
+ Deps |= ExprDependence::Error;
+ if (I->isAlignmentDependent())
+ Deps |= ExprDependence::ValueInstantiation;
+ }
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(ArraySubscriptExpr *E) {
+ return E->getLHS()->getDependence() | E->getRHS()->getDependence();
+}
+
+ExprDependence clang::computeDependence(MatrixSubscriptExpr *E) {
+ return E->getBase()->getDependence() | E->getRowIdx()->getDependence() |
+ (E->getColumnIdx() ? E->getColumnIdx()->getDependence()
+ : ExprDependence::None);
+}
+
+ExprDependence clang::computeDependence(CompoundLiteralExpr *E) {
+ return toExprDependence(E->getTypeSourceInfo()->getType()->getDependence()) |
+ turnTypeToValueDependence(E->getInitializer()->getDependence());
+}
+
+ExprDependence clang::computeDependence(CastExpr *E) {
+ // Cast expressions are type-dependent if the type is
+ // dependent (C++ [temp.dep.expr]p3).
+ // Cast expressions are value-dependent if the type is
+ // dependent or if the subexpression is value-dependent.
+ auto D = toExprDependence(E->getType()->getDependence());
+ if (E->getStmtClass() == Stmt::ImplicitCastExprClass) {
+ // An implicit cast expression doesn't (lexically) contain an
+ // unexpanded pack, even if its target type does.
+ D &= ~ExprDependence::UnexpandedPack;
+ }
+ if (auto *S = E->getSubExpr())
+ D |= S->getDependence() & ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(BinaryOperator *E) {
+ return E->getLHS()->getDependence() | E->getRHS()->getDependence();
+}
+
+ExprDependence clang::computeDependence(ConditionalOperator *E) {
+ // The type of the conditional operator depends on the type of the conditional
+ // to support the GCC vector conditional extension. Additionally,
+ // [temp.dep.expr] does specify state that this should be dependent on ALL sub
+ // expressions.
+ return E->getCond()->getDependence() | E->getLHS()->getDependence() |
+ E->getRHS()->getDependence();
+}
+
+ExprDependence clang::computeDependence(BinaryConditionalOperator *E) {
+ return E->getCommon()->getDependence() | E->getFalseExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(StmtExpr *E, unsigned TemplateDepth) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ // Propagate dependence of the result.
+ if (const auto *CompoundExprResult =
+ dyn_cast_or_null<ValueStmt>(E->getSubStmt()->getStmtExprResult()))
+ if (const Expr *ResultExpr = CompoundExprResult->getExprStmt())
+ D |= ResultExpr->getDependence();
+ // Note: we treat a statement-expression in a dependent context as always
+ // being value- and instantiation-dependent. This matches the behavior of
+ // lambda-expressions and GCC.
+ if (TemplateDepth)
+ D |= ExprDependence::ValueInstantiation;
+ // A param pack cannot be expanded over stmtexpr boundaries.
+ return D & ~ExprDependence::UnexpandedPack;
+}
+
+ExprDependence clang::computeDependence(ConvertVectorExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence()) |
+ E->getSrcExpr()->getDependence();
+ if (!E->getType()->isDependentType())
+ D &= ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(ChooseExpr *E) {
+ if (E->isConditionDependent())
+ return ExprDependence::TypeValueInstantiation |
+ E->getCond()->getDependence() | E->getLHS()->getDependence() |
+ E->getRHS()->getDependence();
+
+ auto Cond = E->getCond()->getDependence();
+ auto Active = E->getLHS()->getDependence();
+ auto Inactive = E->getRHS()->getDependence();
+ if (!E->isConditionTrue())
+ std::swap(Active, Inactive);
+ // Take type- and value- dependency from the active branch. Propagate all
+ // other flags from all branches.
+ return (Active & ExprDependence::TypeValue) |
+ ((Cond | Active | Inactive) & ~ExprDependence::TypeValue);
+}
+
+ExprDependence clang::computeDependence(ParenListExpr *P) {
+ auto D = ExprDependence::None;
+ for (auto *E : P->exprs())
+ D |= E->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(VAArgExpr *E) {
+ auto D =
+ toExprDependence(E->getWrittenTypeInfo()->getType()->getDependence()) |
+ (E->getSubExpr()->getDependence() & ~ExprDependence::Type);
+ return D & ~ExprDependence::Value;
+}
+
+ExprDependence clang::computeDependence(NoInitExpr *E) {
+ return toExprDependence(E->getType()->getDependence()) &
+ (ExprDependence::Instantiation | ExprDependence::Error);
+}
+
+ExprDependence clang::computeDependence(ArrayInitLoopExpr *E) {
+ auto D = E->getCommonExpr()->getDependence() |
+ E->getSubExpr()->getDependence() | ExprDependence::Instantiation;
+ if (!E->getType()->isInstantiationDependentType())
+ D &= ~ExprDependence::Instantiation;
+ return turnTypeToValueDependence(D);
+}
+
+ExprDependence clang::computeDependence(ImplicitValueInitExpr *E) {
+ return toExprDependence(E->getType()->getDependence()) &
+ ExprDependence::Instantiation;
+}
+
+ExprDependence clang::computeDependence(ExtVectorElementExpr *E) {
+ return E->getBase()->getDependence();
+}
+
+ExprDependence clang::computeDependence(BlockExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ if (E->getBlockDecl()->isDependentContext())
+ D |= ExprDependence::Instantiation;
+ return D & ~ExprDependence::UnexpandedPack;
+}
+
+ExprDependence clang::computeDependence(AsTypeExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence()) |
+ E->getSrcExpr()->getDependence();
+ if (!E->getType()->isDependentType())
+ D &= ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXRewrittenBinaryOperator *E) {
+ return E->getSemanticForm()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CXXStdInitializerListExpr *E) {
+ auto D = turnTypeToValueDependence(E->getSubExpr()->getDependence());
+ D |= toExprDependence(E->getType()->getDependence()) &
+ (ExprDependence::Type | ExprDependence::Error);
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXTypeidExpr *E) {
+ auto D = ExprDependence::None;
+ if (E->isTypeOperand())
+ D = toExprDependence(
+ E->getTypeOperandSourceInfo()->getType()->getDependence());
+ else
+ D = turnTypeToValueDependence(E->getExprOperand()->getDependence());
+ // typeid is never type-dependent (C++ [temp.dep.expr]p4)
+ return D & ~ExprDependence::Type;
+}
+
+ExprDependence clang::computeDependence(MSPropertyRefExpr *E) {
+ return E->getBaseExpr()->getDependence() & ~ExprDependence::Type;
+}
+
+ExprDependence clang::computeDependence(MSPropertySubscriptExpr *E) {
+ return E->getIdx()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CXXUuidofExpr *E) {
+ if (E->isTypeOperand())
+ return turnTypeToValueDependence(toExprDependence(
+ E->getTypeOperandSourceInfo()->getType()->getDependence()));
+
+ return turnTypeToValueDependence(E->getExprOperand()->getDependence());
+}
+
+ExprDependence clang::computeDependence(CXXThisExpr *E) {
+ // 'this' is type-dependent if the class type of the enclosing
+ // member function is dependent (C++ [temp.dep.expr]p2)
+ auto D = toExprDependence(E->getType()->getDependence());
+ assert(!(D & ExprDependence::UnexpandedPack));
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXThrowExpr *E) {
+ auto *Op = E->getSubExpr();
+ if (!Op)
+ return ExprDependence::None;
+ return Op->getDependence() & ~ExprDependence::TypeValue;
+}
+
+ExprDependence clang::computeDependence(CXXBindTemporaryExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CXXScalarValueInitExpr *E) {
+ return toExprDependence(E->getType()->getDependence()) &
+ ~ExprDependence::TypeValue;
+}
+
+ExprDependence clang::computeDependence(CXXDeleteExpr *E) {
+ return turnTypeToValueDependence(E->getArgument()->getDependence());
+}
+
+ExprDependence clang::computeDependence(ArrayTypeTraitExpr *E) {
+ auto D = toExprDependence(E->getQueriedType()->getDependence());
+ if (auto *Dim = E->getDimensionExpression())
+ D |= Dim->getDependence();
+ return turnTypeToValueDependence(D);
+}
+
+ExprDependence clang::computeDependence(ExpressionTraitExpr *E) {
+ // Never type-dependent.
+ auto D = E->getQueriedExpression()->getDependence() & ~ExprDependence::Type;
+ // Value-dependent if the argument is type-dependent.
+ if (E->getQueriedExpression()->isTypeDependent())
+ D |= ExprDependence::Value;
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXNoexceptExpr *E, CanThrowResult CT) {
+ auto D = E->getOperand()->getDependence() & ~ExprDependence::TypeValue;
+ if (CT == CT_Dependent)
+ D |= ExprDependence::ValueInstantiation;
+ return D;
+}
+
+ExprDependence clang::computeDependence(PackExpansionExpr *E) {
+ return (E->getPattern()->getDependence() & ~ExprDependence::UnexpandedPack) |
+ ExprDependence::TypeValueInstantiation;
+}
+
+ExprDependence clang::computeDependence(SubstNonTypeTemplateParmExpr *E) {
+ return E->getReplacement()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CoroutineSuspendExpr *E) {
+ if (auto *Resume = E->getResumeExpr())
+ return (Resume->getDependence() &
+ (ExprDependence::TypeValue | ExprDependence::Error)) |
+ (E->getCommonExpr()->getDependence() & ~ExprDependence::TypeValue);
+ return E->getCommonExpr()->getDependence() |
+ ExprDependence::TypeValueInstantiation;
+}
+
+ExprDependence clang::computeDependence(DependentCoawaitExpr *E) {
+ return E->getOperand()->getDependence() |
+ ExprDependence::TypeValueInstantiation;
+}
+
+ExprDependence clang::computeDependence(ObjCBoxedExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(ObjCEncodeExpr *E) {
+ return toExprDependence(E->getEncodedType()->getDependence());
+}
+
+ExprDependence clang::computeDependence(ObjCIvarRefExpr *E) {
+ return turnTypeToValueDependence(E->getBase()->getDependence());
+}
+
+ExprDependence clang::computeDependence(ObjCPropertyRefExpr *E) {
+ if (E->isObjectReceiver())
+ return E->getBase()->getDependence() & ~ExprDependence::Type;
+ if (E->isSuperReceiver())
+ return toExprDependence(E->getSuperReceiverType()->getDependence()) &
+ ~ExprDependence::TypeValue;
+ assert(E->isClassReceiver());
+ return ExprDependence::None;
+}
+
+ExprDependence clang::computeDependence(ObjCSubscriptRefExpr *E) {
+ return E->getBaseExpr()->getDependence() | E->getKeyExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(ObjCIsaExpr *E) {
+ return E->getBase()->getDependence() & ~ExprDependence::Type &
+ ~ExprDependence::UnexpandedPack;
+}
+
+ExprDependence clang::computeDependence(ObjCIndirectCopyRestoreExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(OMPArraySectionExpr *E) {
+ auto D = E->getBase()->getDependence();
+ if (auto *LB = E->getLowerBound())
+ D |= LB->getDependence();
+ if (auto *Len = E->getLength())
+ D |= Len->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(OMPArrayShapingExpr *E) {
+ auto D = E->getBase()->getDependence() |
+ toExprDependence(E->getType()->getDependence());
+ for (Expr *Dim: E->getDimensions())
+ if (Dim)
+ D |= Dim->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(OMPIteratorExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
+ if (auto *VD = cast_or_null<ValueDecl>(E->getIteratorDecl(I)))
+ D |= toExprDependence(VD->getType()->getDependence());
+ OMPIteratorExpr::IteratorRange IR = E->getIteratorRange(I);
+ if (Expr *BE = IR.Begin)
+ D |= BE->getDependence();
+ if (Expr *EE = IR.End)
+ D |= EE->getDependence();
+ if (Expr *SE = IR.Step)
+ D |= SE->getDependence();
+ }
+ return D;
+}
+
+/// Compute the type-, value-, and instantiation-dependence of a
+/// declaration reference
+/// based on the declaration being referenced.
+ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) {
+ auto Deps = ExprDependence::None;
+
+ if (auto *NNS = E->getQualifier())
+ Deps |= toExprDependence(NNS->getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
+
+ if (auto *FirstArg = E->getTemplateArgs()) {
+ unsigned NumArgs = E->getNumTemplateArgs();
+ for (auto *Arg = FirstArg, *End = FirstArg + NumArgs; Arg < End; ++Arg)
+ Deps |= toExprDependence(Arg->getArgument().getDependence());
+ }
+
+ auto *Decl = E->getDecl();
+ auto Type = E->getType();
+
+ if (Decl->isParameterPack())
+ Deps |= ExprDependence::UnexpandedPack;
+ Deps |= toExprDependence(Type->getDependence()) & ExprDependence::Error;
+
+ // (TD) C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ //
+ // and
+ //
+ // (VD) C++ [temp.dep.constexpr]p2:
+ // An identifier is value-dependent if it is:
+
+ // (TD) - an identifier that was declared with dependent type
+ // (VD) - a name declared with a dependent type,
+ if (Type->isDependentType())
+ return Deps | ExprDependence::TypeValueInstantiation;
+ else if (Type->isInstantiationDependentType())
+ Deps |= ExprDependence::Instantiation;
+
+ // (TD) - a conversion-function-id that specifies a dependent type
+ if (Decl->getDeclName().getNameKind() ==
+ DeclarationName::CXXConversionFunctionName) {
+ QualType T = Decl->getDeclName().getCXXNameType();
+ if (T->isDependentType())
+ return Deps | ExprDependence::TypeValueInstantiation;
+
+ if (T->isInstantiationDependentType())
+ Deps |= ExprDependence::Instantiation;
+ }
+
+ // (VD) - the name of a non-type template parameter,
+ if (isa<NonTypeTemplateParmDecl>(Decl))
+ return Deps | ExprDependence::ValueInstantiation;
+
+ // (VD) - a constant with integral or enumeration type and is
+ // initialized with an expression that is value-dependent.
+ // (VD) - a constant with literal type and is initialized with an
+ // expression that is value-dependent [C++11].
+ // (VD) - FIXME: Missing from the standard:
+ // - an entity with reference type and is initialized with an
+ // expression that is value-dependent [C++11]
+ if (VarDecl *Var = dyn_cast<VarDecl>(Decl)) {
+ if ((Ctx.getLangOpts().CPlusPlus11
+ ? Var->getType()->isLiteralType(Ctx)
+ : Var->getType()->isIntegralOrEnumerationType()) &&
+ (Var->getType().isConstQualified() ||
+ Var->getType()->isReferenceType())) {
+ if (const Expr *Init = Var->getAnyInitializer())
+ if (Init->isValueDependent()) {
+ Deps |= ExprDependence::ValueInstantiation;
+ }
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (Var->isStaticDataMember() &&
+ Var->getDeclContext()->isDependentContext()) {
+ Deps |= ExprDependence::ValueInstantiation;
+ TypeSourceInfo *TInfo = Var->getFirstDecl()->getTypeSourceInfo();
+ if (TInfo->getType()->isIncompleteArrayType())
+ Deps |= ExprDependence::Type;
+ }
+
+ return Deps;
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (isa<CXXMethodDecl>(Decl) && Decl->getDeclContext()->isDependentContext())
+ Deps |= ExprDependence::ValueInstantiation;
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(RecoveryExpr *E) {
+ // RecoveryExpr is
+ // - always value-dependent, and therefore instantiation dependent
+ // - contains errors (ExprDependence::Error), by definition
+ // - type-dependent if we don't know the type (fallback to an opaque
+ // dependent type), or the type is known and dependent, or it has
+ // type-dependent subexpressions.
+ auto D = toExprDependence(E->getType()->getDependence()) |
+ ExprDependence::ValueInstantiation | ExprDependence::Error;
+ // FIXME: remove the type-dependent bit from subexpressions, if the
+ // RecoveryExpr has a non-dependent type.
+ for (auto *S : E->subExpressions())
+ D |= S->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(PredefinedExpr *E) {
+ return toExprDependence(E->getType()->getDependence()) &
+ ~ExprDependence::UnexpandedPack;
+}
+
+ExprDependence clang::computeDependence(CallExpr *E,
+ llvm::ArrayRef<Expr *> PreArgs) {
+ auto D = E->getCallee()->getDependence();
+ for (auto *A : llvm::makeArrayRef(E->getArgs(), E->getNumArgs())) {
+ if (A)
+ D |= A->getDependence();
+ }
+ for (auto *A : PreArgs)
+ D |= A->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(OffsetOfExpr *E) {
+ auto D = turnTypeToValueDependence(
+ toExprDependence(E->getTypeSourceInfo()->getType()->getDependence()));
+ for (unsigned I = 0, N = E->getNumExpressions(); I < N; ++I)
+ D |= turnTypeToValueDependence(E->getIndexExpr(I)->getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(MemberExpr *E) {
+ auto *MemberDecl = E->getMemberDecl();
+ auto D = E->getBase()->getDependence();
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) {
+ DeclContext *DC = MemberDecl->getDeclContext();
+ // dyn_cast_or_null is used to handle objC variables which do not
+ // have a declaration context.
+ CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(DC);
+ if (RD && RD->isDependentContext() && RD->isCurrentInstantiation(DC)) {
+ if (!E->getType()->isDependentType())
+ D &= ~ExprDependence::Type;
+ }
+
+ // Bitfield with value-dependent width is type-dependent.
+ if (FD && FD->isBitField() && FD->getBitWidth()->isValueDependent()) {
+ D |= ExprDependence::Type;
+ }
+ }
+ // FIXME: move remaining dependence computation from MemberExpr::Create()
+ return D;
+}
+
+ExprDependence clang::computeDependence(InitListExpr *E) {
+ auto D = ExprDependence::None;
+ for (auto *A : E->inits())
+ D |= A->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(ShuffleVectorExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ for (auto *C : llvm::makeArrayRef(E->getSubExprs(), E->getNumSubExprs()))
+ D |= C->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(GenericSelectionExpr *E,
+ bool ContainsUnexpandedPack) {
+ auto D = ContainsUnexpandedPack ? ExprDependence::UnexpandedPack
+ : ExprDependence::None;
+ for (auto *AE : E->getAssocExprs())
+ D |= AE->getDependence() & ExprDependence::Error;
+ D |= E->getControllingExpr()->getDependence() & ExprDependence::Error;
+
+ if (E->isResultDependent())
+ return D | ExprDependence::TypeValueInstantiation;
+ return D | (E->getResultExpr()->getDependence() &
+ ~ExprDependence::UnexpandedPack);
+}
+
+ExprDependence clang::computeDependence(DesignatedInitExpr *E) {
+ auto Deps = E->getInit()->getDependence();
+ for (auto D : E->designators()) {
+ auto DesignatorDeps = ExprDependence::None;
+ if (D.isArrayDesignator())
+ DesignatorDeps |= E->getArrayIndex(D)->getDependence();
+ else if (D.isArrayRangeDesignator())
+ DesignatorDeps |= E->getArrayRangeStart(D)->getDependence() |
+ E->getArrayRangeEnd(D)->getDependence();
+ Deps |= DesignatorDeps;
+ if (DesignatorDeps & ExprDependence::TypeValue)
+ Deps |= ExprDependence::TypeValueInstantiation;
+ }
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(PseudoObjectExpr *O) {
+ auto D = O->getSyntacticForm()->getDependence();
+ for (auto *E : O->semantics())
+ D |= E->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(AtomicExpr *A) {
+ auto D = ExprDependence::None;
+ for (auto *E : llvm::makeArrayRef(A->getSubExprs(), A->getNumSubExprs()))
+ D |= E->getDependence();
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXNewExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ auto Size = E->getArraySize();
+ if (Size.hasValue() && *Size)
+ D |= turnTypeToValueDependence((*Size)->getDependence());
+ if (auto *I = E->getInitializer())
+ D |= turnTypeToValueDependence(I->getDependence());
+ for (auto *A : E->placement_arguments())
+ D |= turnTypeToValueDependence(A->getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXPseudoDestructorExpr *E) {
+ auto D = E->getBase()->getDependence();
+ if (!E->getDestroyedType().isNull())
+ D |= toExprDependence(E->getDestroyedType()->getDependence());
+ if (auto *ST = E->getScopeTypeInfo())
+ D |= turnTypeToValueDependence(
+ toExprDependence(ST->getType()->getDependence()));
+ if (auto *Q = E->getQualifier())
+ D |= toExprDependence(Q->getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
+ return D;
+}
+
+static inline ExprDependence getDependenceInExpr(DeclarationNameInfo Name) {
+ auto D = ExprDependence::None;
+ if (Name.isInstantiationDependent())
+ D |= ExprDependence::Instantiation;
+ if (Name.containsUnexpandedParameterPack())
+ D |= ExprDependence::UnexpandedPack;
+ return D;
+}
+
+ExprDependence
+clang::computeDependence(OverloadExpr *E, bool KnownDependent,
+ bool KnownInstantiationDependent,
+ bool KnownContainsUnexpandedParameterPack) {
+ auto Deps = ExprDependence::None;
+ if (KnownDependent)
+ Deps |= ExprDependence::TypeValue;
+ if (KnownInstantiationDependent)
+ Deps |= ExprDependence::Instantiation;
+ if (KnownContainsUnexpandedParameterPack)
+ Deps |= ExprDependence::UnexpandedPack;
+ Deps |= getDependenceInExpr(E->getNameInfo());
+ if (auto *Q = E->getQualifier())
+ Deps |= toExprDependence(Q->getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
+ for (auto *D : E->decls()) {
+ if (D->getDeclContext()->isDependentContext() ||
+ isa<UnresolvedUsingValueDecl>(D))
+ Deps |= ExprDependence::TypeValueInstantiation;
+ }
+ // If we have explicit template arguments, check for dependent
+ // template arguments and whether they contain any unexpanded pack
+ // expansions.
+ for (auto A : E->template_arguments())
+ Deps |= toExprDependence(A.getArgument().getDependence());
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(DependentScopeDeclRefExpr *E) {
+ auto D = ExprDependence::TypeValue;
+ D |= getDependenceInExpr(E->getNameInfo());
+ if (auto *Q = E->getQualifier())
+ D |= toExprDependence(Q->getDependence());
+ for (auto A : E->template_arguments())
+ D |= toExprDependence(A.getArgument().getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXConstructExpr *E) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ for (auto *A : E->arguments())
+ D |= A->getDependence() & ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(LambdaExpr *E,
+ bool ContainsUnexpandedParameterPack) {
+ auto D = toExprDependence(E->getType()->getDependence());
+ if (ContainsUnexpandedParameterPack)
+ D |= ExprDependence::UnexpandedPack;
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXUnresolvedConstructExpr *E) {
+ auto D = ExprDependence::ValueInstantiation;
+ D |= toExprDependence(E->getType()->getDependence());
+ if (E->getType()->getContainedDeducedType())
+ D |= ExprDependence::Type;
+ for (auto *A : E->arguments())
+ D |= A->getDependence() &
+ (ExprDependence::UnexpandedPack | ExprDependence::Error);
+ return D;
+}
+
+ExprDependence clang::computeDependence(CXXDependentScopeMemberExpr *E) {
+ auto D = ExprDependence::TypeValueInstantiation;
+ if (!E->isImplicitAccess())
+ D |= E->getBase()->getDependence();
+ if (auto *Q = E->getQualifier())
+ D |= toExprDependence(Q->getDependence());
+ D |= getDependenceInExpr(E->getMemberNameInfo());
+ for (auto A : E->template_arguments())
+ D |= toExprDependence(A.getArgument().getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(MaterializeTemporaryExpr *E) {
+ return E->getSubExpr()->getDependence();
+}
+
+ExprDependence clang::computeDependence(CXXFoldExpr *E) {
+ auto D = ExprDependence::TypeValueInstantiation;
+ for (const auto *C : {E->getLHS(), E->getRHS()}) {
+ if (C)
+ D |= C->getDependence() & ~ExprDependence::UnexpandedPack;
+ }
+ return D;
+}
+
+ExprDependence clang::computeDependence(TypeTraitExpr *E) {
+ auto D = ExprDependence::None;
+ for (const auto *A : E->getArgs())
+ D |=
+ toExprDependence(A->getType()->getDependence()) & ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(ConceptSpecializationExpr *E,
+ bool ValueDependent) {
+ auto TA = TemplateArgumentDependence::None;
+ const auto InterestingDeps = TemplateArgumentDependence::Instantiation |
+ TemplateArgumentDependence::UnexpandedPack;
+ for (const TemplateArgumentLoc &ArgLoc :
+ E->getTemplateArgsAsWritten()->arguments()) {
+ TA |= ArgLoc.getArgument().getDependence() & InterestingDeps;
+ if (TA == InterestingDeps)
+ break;
+ }
+
+ ExprDependence D =
+ ValueDependent ? ExprDependence::Value : ExprDependence::None;
+ return D | toExprDependence(TA);
+}
+
+ExprDependence clang::computeDependence(ObjCArrayLiteral *E) {
+ auto D = ExprDependence::None;
+ Expr **Elements = E->getElements();
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I)
+ D |= turnTypeToValueDependence(Elements[I]->getDependence());
+ return D;
+}
+
+ExprDependence clang::computeDependence(ObjCDictionaryLiteral *E) {
+ auto Deps = ExprDependence::None;
+ for (unsigned I = 0, N = E->getNumElements(); I < N; ++I) {
+ auto KV = E->getKeyValueElement(I);
+ auto KVDeps = turnTypeToValueDependence(KV.Key->getDependence() |
+ KV.Value->getDependence());
+ if (KV.EllipsisLoc.isValid())
+ KVDeps &= ~ExprDependence::UnexpandedPack;
+ Deps |= KVDeps;
+ }
+ return Deps;
+}
+
+ExprDependence clang::computeDependence(ObjCMessageExpr *E) {
+ auto D = ExprDependence::None;
+ if (auto *R = E->getInstanceReceiver())
+ D |= R->getDependence();
+ else
+ D |= toExprDependence(E->getType()->getDependence());
+ for (auto *A : E->arguments())
+ D |= A->getDependence();
+ return D;
+}
diff --git a/clang/lib/AST/DataCollection.cpp b/clang/lib/AST/DataCollection.cpp
index 8e67c101dee1..d3f2c22e9cc3 100644
--- a/clang/lib/AST/DataCollection.cpp
+++ b/clang/lib/AST/DataCollection.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/DataCollection.h"
-
+#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
namespace clang {
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index 0d30f64b992e..5c0a98815dd7 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -892,6 +892,10 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
if (!TD->getAnonDeclWithTypedefName(/*AnyRedecl*/true))
return LinkageInfo::none();
+ } else if (isa<MSGuidDecl>(D)) {
+ // A GUID behaves like an inline variable with external linkage. Fall
+ // through.
+
// Everything not covered here has no linkage.
} else {
return LinkageInfo::none();
@@ -1318,19 +1322,6 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
LV.isVisibilityExplicit());
}
-static inline const CXXRecordDecl*
-getOutermostEnclosingLambda(const CXXRecordDecl *Record) {
- const CXXRecordDecl *Ret = Record;
- while (Record && Record->isLambda()) {
- Ret = Record;
- if (!Record->getParent()) break;
- // Get the Containing Class of this Lambda Class
- Record = dyn_cast_or_null<CXXRecordDecl>(
- Record->getParent()->getParent());
- }
- return Ret;
-}
-
LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
LVComputationKind computation,
bool IgnoreVarTypeLinkage) {
@@ -1396,25 +1387,9 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
return getInternalLinkageFor(D);
}
- // This lambda has its linkage/visibility determined:
- // - either by the outermost lambda if that lambda has no mangling
- // number.
- // - or by the parent of the outer most lambda
- // This prevents infinite recursion in settings such as nested lambdas
- // used in NSDMI's, for e.g.
- // struct L {
- // int t{};
- // int t2 = ([](int a) { return [](int b) { return b; };})(t)(t);
- // };
- const CXXRecordDecl *OuterMostLambda =
- getOutermostEnclosingLambda(Record);
- if (OuterMostLambda->hasKnownLambdaInternalLinkage() ||
- !OuterMostLambda->getLambdaManglingNumber())
- return getInternalLinkageFor(D);
-
return getLVForClosure(
- OuterMostLambda->getDeclContext()->getRedeclContext(),
- OuterMostLambda->getLambdaContextDecl(), computation);
+ Record->getDeclContext()->getRedeclContext(),
+ Record->getLambdaContextDecl(), computation);
}
break;
@@ -1571,10 +1546,19 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
return;
}
printNestedNameSpecifier(OS, P);
- if (getDeclName() || isa<DecompositionDecl>(this))
+ if (getDeclName())
OS << *this;
- else
- OS << "(anonymous)";
+ else {
+ // Give the printName override a chance to pick a different name before we
+ // fall back to "(anonymous)".
+ SmallString<64> NameBuffer;
+ llvm::raw_svector_ostream NameOS(NameBuffer);
+ printName(NameOS);
+ if (NameBuffer.empty())
+ OS << "(anonymous)";
+ else
+ OS << NameBuffer;
+ }
}
void NamedDecl::printNestedNameSpecifier(raw_ostream &OS) const {
@@ -1587,13 +1571,16 @@ void NamedDecl::printNestedNameSpecifier(raw_ostream &OS,
// For ObjC methods and properties, look through categories and use the
// interface as context.
- if (auto *MD = dyn_cast<ObjCMethodDecl>(this))
+ if (auto *MD = dyn_cast<ObjCMethodDecl>(this)) {
if (auto *ID = MD->getClassInterface())
Ctx = ID;
- if (auto *PD = dyn_cast<ObjCPropertyDecl>(this)) {
+ } else if (auto *PD = dyn_cast<ObjCPropertyDecl>(this)) {
if (auto *MD = PD->getGetterMethodDecl())
if (auto *ID = MD->getClassInterface())
Ctx = ID;
+ } else if (auto *ID = dyn_cast<ObjCIvarDecl>(this)) {
+ if (auto *CI = ID->getContainingInterface())
+ Ctx = CI;
}
if (Ctx->isFunctionOrMethod())
@@ -2981,7 +2968,8 @@ bool FunctionDecl::isReservedGlobalPlacementOperator() const {
return (proto->getParamType(1).getCanonicalType() == Context.VoidPtrTy);
}
-bool FunctionDecl::isReplaceableGlobalAllocationFunction(bool *IsAligned) const {
+bool FunctionDecl::isReplaceableGlobalAllocationFunction(
+ Optional<unsigned> *AlignmentParam, bool *IsNothrow) const {
if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName)
return false;
if (getDeclName().getCXXOverloadedOperator() != OO_New &&
@@ -3028,9 +3016,9 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(bool *IsAligned) const
// In C++17, the next parameter can be a 'std::align_val_t' for aligned
// new/delete.
if (Ctx.getLangOpts().AlignedAllocation && !Ty.isNull() && Ty->isAlignValT()) {
- if (IsAligned)
- *IsAligned = true;
Consume();
+ if (AlignmentParam)
+ *AlignmentParam = Params;
}
// Finally, if this is not a sized delete, the final parameter can
@@ -3039,8 +3027,11 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(bool *IsAligned) const
Ty = Ty->getPointeeType();
if (Ty.getCVRQualifiers() != Qualifiers::Const)
return false;
- if (Ty->isNothrowT())
+ if (Ty->isNothrowT()) {
+ if (IsNothrow)
+ *IsNothrow = true;
Consume();
+ }
}
return Params == FPT->getNumParams();
@@ -3173,8 +3164,8 @@ FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); }
unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
unsigned BuiltinID;
- if (const auto *AMAA = getAttr<ArmMveAliasAttr>()) {
- BuiltinID = AMAA->getBuiltinName()->getBuiltinID();
+ if (const auto *ABAA = getAttr<ArmBuiltinAliasAttr>()) {
+ BuiltinID = ABAA->getBuiltinName()->getBuiltinID();
} else {
if (!getIdentifier())
return 0;
@@ -3206,7 +3197,7 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
// If the function is marked "overloadable", it has a different mangled name
// and is not the C library function.
if (!ConsiderWrapperFunctions && hasAttr<OverloadableAttr>() &&
- !hasAttr<ArmMveAliasAttr>())
+ !hasAttr<ArmBuiltinAliasAttr>())
return 0;
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
@@ -3233,6 +3224,15 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
!(BuiltinID == Builtin::BIprintf || BuiltinID == Builtin::BImalloc))
return 0;
+ // As AMDGCN implementation of OpenMP does not have a device-side standard
+ // library, none of the predefined library functions except printf and malloc
+ // should be treated as a builtin i.e. 0 should be returned for them.
+ if (Context.getTargetInfo().getTriple().isAMDGCN() &&
+ Context.getLangOpts().OpenMPIsDevice &&
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) &&
+ !(BuiltinID == Builtin::BIprintf || BuiltinID == Builtin::BImalloc))
+ return 0;
+
return BuiltinID;
}
@@ -3264,13 +3264,27 @@ unsigned FunctionDecl::getMinRequiredArguments() const {
if (!getASTContext().getLangOpts().CPlusPlus)
return getNumParams();
+ // Note that it is possible for a parameter with no default argument to
+ // follow a parameter with a default argument.
unsigned NumRequiredArgs = 0;
- for (auto *Param : parameters())
- if (!Param->isParameterPack() && !Param->hasDefaultArg())
- ++NumRequiredArgs;
+ unsigned MinParamsSoFar = 0;
+ for (auto *Param : parameters()) {
+ if (!Param->isParameterPack()) {
+ ++MinParamsSoFar;
+ if (!Param->hasDefaultArg())
+ NumRequiredArgs = MinParamsSoFar;
+ }
+ }
return NumRequiredArgs;
}
+bool FunctionDecl::hasOneParamOrDefaultArgs() const {
+ return getNumParams() == 1 ||
+ (getNumParams() > 1 &&
+ std::all_of(param_begin() + 1, param_end(),
+ [](ParmVarDecl *P) { return P->hasDefaultArg(); }));
+}
+
/// The combination of the extern and inline keywords under MSVC forces
/// the function to be required.
///
@@ -3609,7 +3623,8 @@ bool FunctionDecl::isTemplateInstantiation() const {
return clang::isTemplateInstantiation(getTemplateSpecializationKind());
}
-FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
+FunctionDecl *
+FunctionDecl::getTemplateInstantiationPattern(bool ForDefinition) const {
// If this is a generic lambda call operator specialization, its
// instantiation pattern is always its primary template's pattern
// even if its primary template was instantiated from another
@@ -3626,18 +3641,20 @@ FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
}
if (MemberSpecializationInfo *Info = getMemberSpecializationInfo()) {
- if (!clang::isTemplateInstantiation(Info->getTemplateSpecializationKind()))
+ if (ForDefinition &&
+ !clang::isTemplateInstantiation(Info->getTemplateSpecializationKind()))
return nullptr;
return getDefinitionOrSelf(cast<FunctionDecl>(Info->getInstantiatedFrom()));
}
- if (!clang::isTemplateInstantiation(getTemplateSpecializationKind()))
+ if (ForDefinition &&
+ !clang::isTemplateInstantiation(getTemplateSpecializationKind()))
return nullptr;
if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
// If we hit a point where the user provided a specialization of this
// template, we're done looking.
- while (!Primary->isMemberSpecialization()) {
+ while (!ForDefinition || !Primary->isMemberSpecialization()) {
auto *NewPrimary = Primary->getInstantiatedFromMemberTemplate();
if (!NewPrimary)
break;
@@ -4422,6 +4439,21 @@ void RecordDecl::setCapturedRecord() {
addAttr(CapturedRecordAttr::CreateImplicit(getASTContext()));
}
+bool RecordDecl::isOrContainsUnion() const {
+ if (isUnion())
+ return true;
+
+ if (const RecordDecl *Def = getDefinition()) {
+ for (const FieldDecl *FD : Def->fields()) {
+ const RecordType *RT = FD->getType()->getAs<RecordType>();
+ if (RT && RT->getDecl()->isOrContainsUnion())
+ return true;
+ }
+ }
+
+ return false;
+}
+
RecordDecl::field_iterator RecordDecl::field_begin() const {
if (hasExternalLexicalStorage() && !hasLoadedFieldsFromExternalStorage())
LoadFieldsFromExternalStorage();
@@ -4493,11 +4525,11 @@ bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
ReasonToReject = 5; // is standard layout.
else if (Blacklist.isBlacklistedLocation(EnabledAsanMask, getLocation(),
"field-padding"))
- ReasonToReject = 6; // is in a blacklisted file.
+ ReasonToReject = 6; // is in an excluded file.
else if (Blacklist.isBlacklistedType(EnabledAsanMask,
getQualifiedNameAsString(),
"field-padding"))
- ReasonToReject = 7; // is blacklisted.
+ ReasonToReject = 7; // The type is excluded.
if (EmitRemark) {
if (ReasonToReject >= 0)
@@ -4921,7 +4953,8 @@ static unsigned getNumModuleIdentifiers(Module *Mod) {
ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
Module *Imported,
ArrayRef<SourceLocation> IdentifierLocs)
- : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, true) {
+ : Decl(Import, DC, StartLoc), ImportedModule(Imported),
+ NextLocalImportAndComplete(nullptr, true) {
assert(getNumModuleIdentifiers(Imported) == IdentifierLocs.size());
auto *StoredLocs = getTrailingObjects<SourceLocation>();
std::uninitialized_copy(IdentifierLocs.begin(), IdentifierLocs.end(),
@@ -4930,7 +4963,8 @@ ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
Module *Imported, SourceLocation EndLoc)
- : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, false) {
+ : Decl(Import, DC, StartLoc), ImportedModule(Imported),
+ NextLocalImportAndComplete(nullptr, false) {
*getTrailingObjects<SourceLocation>() = EndLoc;
}
@@ -4959,7 +4993,7 @@ ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
}
ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
- if (!ImportedAndComplete.getInt())
+ if (!isImportComplete())
return None;
const auto *StoredLocs = getTrailingObjects<SourceLocation>();
@@ -4968,7 +5002,7 @@ ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
}
SourceRange ImportDecl::getSourceRange() const {
- if (!ImportedAndComplete.getInt())
+ if (!isImportComplete())
return SourceRange(getLocation(), *getTrailingObjects<SourceLocation>());
return SourceRange(getLocation(), getIdentifierLocs().back());
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
index 6ee767ccecf7..da1eadd9d931 100644
--- a/clang/lib/AST/DeclBase.cpp
+++ b/clang/lib/AST/DeclBase.cpp
@@ -240,15 +240,47 @@ TemplateDecl *Decl::getDescribedTemplate() const {
return nullptr;
}
+const TemplateParameterList *Decl::getDescribedTemplateParams() const {
+ if (auto *TD = getDescribedTemplate())
+ return TD->getTemplateParameters();
+ if (auto *CTPSD = dyn_cast<ClassTemplatePartialSpecializationDecl>(this))
+ return CTPSD->getTemplateParameters();
+ if (auto *VTPSD = dyn_cast<VarTemplatePartialSpecializationDecl>(this))
+ return VTPSD->getTemplateParameters();
+ return nullptr;
+}
+
bool Decl::isTemplated() const {
- // A declaration is dependent if it is a template or a template pattern, or
+ // A declaration is templated if it is a template or a template pattern, or
// is within (lexcially for a friend, semantically otherwise) a dependent
// context.
// FIXME: Should local extern declarations be treated like friends?
if (auto *AsDC = dyn_cast<DeclContext>(this))
return AsDC->isDependentContext();
auto *DC = getFriendObjectKind() ? getLexicalDeclContext() : getDeclContext();
- return DC->isDependentContext() || isTemplateDecl() || getDescribedTemplate();
+ return DC->isDependentContext() || isTemplateDecl() ||
+ getDescribedTemplateParams();
+}
+
+unsigned Decl::getTemplateDepth() const {
+ if (auto *DC = dyn_cast<DeclContext>(this))
+ if (DC->isFileContext())
+ return 0;
+
+ if (auto *TPL = getDescribedTemplateParams())
+ return TPL->getDepth() + 1;
+
+ // If this is a dependent lambda, there might be an enclosing variable
+ // template. In this case, the next step is not the parent DeclContext (or
+ // even a DeclContext at all).
+ auto *RD = dyn_cast<CXXRecordDecl>(this);
+ if (RD && RD->isDependentLambda())
+ if (Decl *Context = RD->getLambdaContextDecl())
+ return Context->getTemplateDepth();
+
+ const DeclContext *DC =
+ getFriendObjectKind() ? getLexicalDeclContext() : getDeclContext();
+ return cast<Decl>(DC)->getTemplateDepth();
}
const DeclContext *Decl::getParentFunctionOrMethod() const {
@@ -332,13 +364,18 @@ void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
}
}
-bool Decl::isLexicallyWithinFunctionOrMethod() const {
+bool Decl::isInLocalScopeForInstantiation() const {
const DeclContext *LDC = getLexicalDeclContext();
+ if (!LDC->isDependentContext())
+ return false;
while (true) {
if (LDC->isFunctionOrMethod())
return true;
if (!isa<TagDecl>(LDC))
return false;
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(LDC))
+ if (CRD->isLambda())
+ return true;
LDC = LDC->getLexicalParent();
}
return false;
@@ -378,6 +415,12 @@ ASTContext &Decl::getASTContext() const {
return getTranslationUnitDecl()->getASTContext();
}
+/// Helper to get the language options from the ASTContext.
+/// Defined out of line to avoid depending on ASTContext.h.
+const LangOptions &Decl::getLangOpts() const {
+ return getASTContext().getLangOpts();
+}
+
ASTMutationListener *Decl::getASTMutationListener() const {
return getASTContext().getASTMutationListener();
}
@@ -390,8 +433,10 @@ unsigned Decl::getMaxAlignment() const {
const AttrVec &V = getAttrs();
ASTContext &Ctx = getASTContext();
specific_attr_iterator<AlignedAttr> I(V.begin()), E(V.end());
- for (; I != E; ++I)
- Align = std::max(Align, I->getAlignment(Ctx));
+ for (; I != E; ++I) {
+ if (!I->isAlignmentErrorDependent())
+ Align = std::max(Align, I->getAlignment(Ctx));
+ }
return Align;
}
@@ -454,7 +499,8 @@ ExternalSourceSymbolAttr *Decl::getExternalSourceSymbolAttr() const {
}
bool Decl::hasDefiningAttr() const {
- return hasAttr<AliasAttr>() || hasAttr<IFuncAttr>();
+ return hasAttr<AliasAttr>() || hasAttr<IFuncAttr>() ||
+ hasAttr<LoaderUninitializedAttr>();
}
const Attr *Decl::getDefiningAttr() const {
@@ -462,6 +508,8 @@ const Attr *Decl::getDefiningAttr() const {
return AA;
if (auto *IFA = getAttr<IFuncAttr>())
return IFA;
+ if (auto *NZA = getAttr<LoaderUninitializedAttr>())
+ return NZA;
return nullptr;
}
@@ -587,7 +635,7 @@ AvailabilityResult Decl::getAvailability(std::string *Message,
continue;
if (Message)
- ResultMessage = Deprecated->getMessage();
+ ResultMessage = std::string(Deprecated->getMessage());
Result = AR_Deprecated;
continue;
@@ -595,7 +643,7 @@ AvailabilityResult Decl::getAvailability(std::string *Message,
if (const auto *Unavailable = dyn_cast<UnavailableAttr>(A)) {
if (Message)
- *Message = Unavailable->getMessage();
+ *Message = std::string(Unavailable->getMessage());
return AR_Unavailable;
}
@@ -786,6 +834,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case TranslationUnit:
case ExternCContext:
case Decomposition:
+ case MSGuid:
case UsingDirective:
case BuiltinTemplate:
@@ -804,6 +853,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case OMPCapturedExpr:
case Empty:
case LifetimeExtendedTemporary:
+ case RequiresExprBody:
// Never looked up by name.
return 0;
}
@@ -1177,6 +1227,7 @@ DeclContext *DeclContext::getPrimaryContext() {
case Decl::Captured:
case Decl::OMPDeclareReduction:
case Decl::OMPDeclareMapper:
+ case Decl::RequiresExprBody:
// There is only one DeclContext for these entities.
return this;
diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp
index 2ead1e70ea0d..6f1fd2f14ede 100644
--- a/clang/lib/AST/DeclCXX.cpp
+++ b/clang/lib/AST/DeclCXX.cpp
@@ -42,6 +42,7 @@
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -83,10 +84,12 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
HasInheritedConstructor(false), HasInheritedAssignment(false),
NeedOverloadResolutionForCopyConstructor(false),
NeedOverloadResolutionForMoveConstructor(false),
+ NeedOverloadResolutionForCopyAssignment(false),
NeedOverloadResolutionForMoveAssignment(false),
NeedOverloadResolutionForDestructor(false),
DefaultedCopyConstructorIsDeleted(false),
DefaultedMoveConstructorIsDeleted(false),
+ DefaultedCopyAssignmentIsDeleted(false),
DefaultedMoveAssignmentIsDeleted(false),
DefaultedDestructorIsDeleted(false), HasTrivialSpecialMembers(SMF_All),
HasTrivialSpecialMembersForCall(SMF_All),
@@ -434,10 +437,8 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
// Keep track of the presence of mutable fields.
- if (BaseClassDecl->hasMutableFields()) {
+ if (BaseClassDecl->hasMutableFields())
data().HasMutableFields = true;
- data().NeedOverloadResolutionForCopyConstructor = true;
- }
if (BaseClassDecl->hasUninitializedReferenceMember())
data().HasUninitializedReferenceMember = true;
@@ -510,6 +511,8 @@ void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) {
// -- a direct or virtual base class B that cannot be copied/moved [...]
// -- a non-static data member of class type M (or array thereof)
// that cannot be copied or moved [...]
+ if (!Subobj->hasSimpleCopyAssignment())
+ data().NeedOverloadResolutionForCopyAssignment = true;
if (!Subobj->hasSimpleMoveAssignment())
data().NeedOverloadResolutionForMoveAssignment = true;
@@ -663,10 +666,9 @@ bool CXXRecordDecl::lambdaIsDefaultConstructibleAndAssignable() const {
// C++17 [expr.prim.lambda]p21:
// The closure type associated with a lambda-expression has no default
// constructor and a deleted copy assignment operator.
- if (getLambdaCaptureDefault() != LCD_None ||
- getLambdaData().NumCaptures != 0)
+ if (getLambdaCaptureDefault() != LCD_None || capture_size() != 0)
return false;
- return getASTContext().getLangOpts().CPlusPlus2a;
+ return getASTContext().getLangOpts().CPlusPlus20;
}
void CXXRecordDecl::addedMember(Decl *D) {
@@ -782,7 +784,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// C++20 [dcl.init.aggr]p1:
// An aggregate is an array or a class with no user-declared [...]
// constructors
- if (getASTContext().getLangOpts().CPlusPlus2a
+ if (getASTContext().getLangOpts().CPlusPlus20
? !Constructor->isImplicit()
: (Constructor->isUserProvided() || Constructor->isExplicit()))
data().Aggregate = false;
@@ -978,10 +980,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// Keep track of the presence of mutable fields.
- if (Field->isMutable()) {
+ if (Field->isMutable())
data().HasMutableFields = true;
- data().NeedOverloadResolutionForCopyConstructor = true;
- }
// C++11 [class.union]p8, DR1460:
// If X is a union, a non-static data member of X that is not an anonymous
@@ -1025,10 +1025,12 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (isUnion()) {
data().DefaultedCopyConstructorIsDeleted = true;
data().DefaultedMoveConstructorIsDeleted = true;
+ data().DefaultedCopyAssignmentIsDeleted = true;
data().DefaultedMoveAssignmentIsDeleted = true;
data().DefaultedDestructorIsDeleted = true;
data().NeedOverloadResolutionForCopyConstructor = true;
data().NeedOverloadResolutionForMoveConstructor = true;
+ data().NeedOverloadResolutionForCopyAssignment = true;
data().NeedOverloadResolutionForMoveAssignment = true;
data().NeedOverloadResolutionForDestructor = true;
}
@@ -1095,8 +1097,10 @@ void CXXRecordDecl::addedMember(Decl *D) {
// A defaulted copy/move assignment operator for a class X is defined
// as deleted if X has:
// -- a non-static data member of reference type
- if (T->isReferenceType())
+ if (T->isReferenceType()) {
+ data().DefaultedCopyAssignmentIsDeleted = true;
data().DefaultedMoveAssignmentIsDeleted = true;
+ }
// Bitfields of length 0 are also zero-sized, but we already bailed out for
// those because they are always unnamed.
@@ -1115,6 +1119,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// parameter.
data().NeedOverloadResolutionForCopyConstructor = true;
data().NeedOverloadResolutionForMoveConstructor = true;
+ data().NeedOverloadResolutionForCopyAssignment = true;
data().NeedOverloadResolutionForMoveAssignment = true;
}
@@ -1128,6 +1133,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().DefaultedCopyConstructorIsDeleted = true;
if (FieldRec->hasNonTrivialMoveConstructor())
data().DefaultedMoveConstructorIsDeleted = true;
+ if (FieldRec->hasNonTrivialCopyAssignment())
+ data().DefaultedCopyAssignmentIsDeleted = true;
if (FieldRec->hasNonTrivialMoveAssignment())
data().DefaultedMoveAssignmentIsDeleted = true;
if (FieldRec->hasNonTrivialDestructor())
@@ -1141,6 +1148,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
FieldRec->data().NeedOverloadResolutionForCopyConstructor;
data().NeedOverloadResolutionForMoveConstructor |=
FieldRec->data().NeedOverloadResolutionForMoveConstructor;
+ data().NeedOverloadResolutionForCopyAssignment |=
+ FieldRec->data().NeedOverloadResolutionForCopyAssignment;
data().NeedOverloadResolutionForMoveAssignment |=
FieldRec->data().NeedOverloadResolutionForMoveAssignment;
data().NeedOverloadResolutionForDestructor |=
@@ -1238,9 +1247,15 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// Keep track of the presence of mutable fields.
- if (FieldRec->hasMutableFields()) {
+ if (FieldRec->hasMutableFields())
data().HasMutableFields = true;
+
+ if (Field->isMutable()) {
+ // Our copy constructor/assignment might call something other than
+ // the subobject's copy constructor/assignment if it's mutable and of
+ // class type.
data().NeedOverloadResolutionForCopyConstructor = true;
+ data().NeedOverloadResolutionForCopyAssignment = true;
}
// C++11 [class.copy]p13:
@@ -1288,7 +1303,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// Base element type of field is a non-class type.
if (!T->isLiteralType(Context) ||
(!Field->hasInClassInitializer() && !isUnion() &&
- !Context.getLangOpts().CPlusPlus2a))
+ !Context.getLangOpts().CPlusPlus20))
data().DefaultedDefaultConstructorIsConstexpr = false;
// C++11 [class.copy]p23:
@@ -1296,8 +1311,10 @@ void CXXRecordDecl::addedMember(Decl *D) {
// as deleted if X has:
// -- a non-static data member of const non-class type (or array
// thereof)
- if (T.isConstQualified())
+ if (T.isConstQualified()) {
+ data().DefaultedCopyAssignmentIsDeleted = true;
data().DefaultedMoveAssignmentIsDeleted = true;
+ }
}
// C++14 [meta.unary.prop]p4:
@@ -1366,6 +1383,27 @@ void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
data().DeclaredNonTrivialSpecialMembers |= SMKind;
}
+void CXXRecordDecl::setCaptures(ArrayRef<LambdaCapture> Captures) {
+ ASTContext &Context = getASTContext();
+ CXXRecordDecl::LambdaDefinitionData &Data = getLambdaData();
+
+ // Copy captures.
+ Data.NumCaptures = Captures.size();
+ Data.NumExplicitCaptures = 0;
+ Data.Captures = (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) *
+ Captures.size());
+ LambdaCapture *ToCapture = Data.Captures;
+ for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
+ if (Captures[I].isExplicit())
+ ++Data.NumExplicitCaptures;
+
+ *ToCapture++ = Captures[I];
+ }
+
+ if (!lambdaIsDefaultConstructibleAndAssignable())
+ Data.DefaultedCopyAssignmentIsDeleted = true;
+}
+
void CXXRecordDecl::setTrivialForCallFlags(CXXMethodDecl *D) {
unsigned SMKind = 0;
@@ -1923,6 +1961,18 @@ bool CXXRecordDecl::mayBeAbstract() const {
return false;
}
+bool CXXRecordDecl::isEffectivelyFinal() const {
+ auto *Def = getDefinition();
+ if (!Def)
+ return false;
+ if (Def->hasAttr<FinalAttr>())
+ return true;
+ if (const auto *Dtor = Def->getDestructor())
+ if (Dtor->hasAttr<FinalAttr>())
+ return true;
+ return false;
+}
+
void CXXDeductionGuideDecl::anchor() {}
bool ExplicitSpecifier::isEquivalent(const ExplicitSpecifier Other) const {
@@ -1968,6 +2018,16 @@ CXXDeductionGuideDecl *CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C,
QualType(), nullptr, SourceLocation());
}
+RequiresExprBodyDecl *RequiresExprBodyDecl::Create(
+ ASTContext &C, DeclContext *DC, SourceLocation StartLoc) {
+ return new (C, DC) RequiresExprBodyDecl(C, DC, StartLoc);
+}
+
+RequiresExprBodyDecl *RequiresExprBodyDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) RequiresExprBodyDecl(C, nullptr, SourceLocation());
+}
+
void CXXMethodDecl::anchor() {}
bool CXXMethodDecl::isStatic() const {
@@ -2028,17 +2088,36 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
if (auto *MD = getCorrespondingMethodDeclaredInClass(RD, MayBeBase))
return MD;
+ llvm::SmallVector<CXXMethodDecl*, 4> FinalOverriders;
+ auto AddFinalOverrider = [&](CXXMethodDecl *D) {
+ // If this function is overridden by a candidate final overrider, it is not
+ // a final overrider.
+ for (CXXMethodDecl *OtherD : FinalOverriders) {
+ if (declaresSameEntity(D, OtherD) || recursivelyOverrides(OtherD, D))
+ return;
+ }
+
+ // Other candidate final overriders might be overridden by this function.
+ FinalOverriders.erase(
+ std::remove_if(FinalOverriders.begin(), FinalOverriders.end(),
+ [&](CXXMethodDecl *OtherD) {
+ return recursivelyOverrides(D, OtherD);
+ }),
+ FinalOverriders.end());
+
+ FinalOverriders.push_back(D);
+ };
+
for (const auto &I : RD->bases()) {
const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT)
continue;
const auto *Base = cast<CXXRecordDecl>(RT->getDecl());
- CXXMethodDecl *T = this->getCorrespondingMethodInClass(Base);
- if (T)
- return T;
+ if (CXXMethodDecl *D = this->getCorrespondingMethodInClass(Base))
+ AddFinalOverrider(D);
}
- return nullptr;
+ return FinalOverriders.size() == 1 ? FinalOverriders.front() : nullptr;
}
CXXMethodDecl *CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
@@ -2095,6 +2174,11 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
CXXMethodDecl *DevirtualizedMethod =
getCorrespondingMethodInClass(BestDynamicDecl);
+ // If there final overrider in the dynamic type is ambiguous, we can't
+ // devirtualize this call.
+ if (!DevirtualizedMethod)
+ return nullptr;
+
// If that method is pure virtual, we can't devirtualize. If this code is
// reached, the result would be UB, not a direct call to the derived class
// function, and we can't assume the derived class function is defined.
@@ -2106,14 +2190,10 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
return DevirtualizedMethod;
// Similarly, if the class itself or its destructor is marked 'final',
- // the class can't be derived from and we can therefore devirtualize the
+ // the class can't be derived from and we can therefore devirtualize the
// member function call.
- if (BestDynamicDecl->hasAttr<FinalAttr>())
+ if (BestDynamicDecl->isEffectivelyFinal())
return DevirtualizedMethod;
- if (const auto *dtor = BestDynamicDecl->getDestructor()) {
- if (dtor->hasAttr<FinalAttr>())
- return DevirtualizedMethod;
- }
if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
@@ -2322,17 +2402,15 @@ QualType CXXMethodDecl::getThisType() const {
// volatile X*, and if the member function is declared const volatile,
// the type of this is const volatile X*.
assert(isInstance() && "No 'this' for static methods!");
-
- return CXXMethodDecl::getThisType(getType()->getAs<FunctionProtoType>(),
+ return CXXMethodDecl::getThisType(getType()->castAs<FunctionProtoType>(),
getParent());
}
QualType CXXMethodDecl::getThisObjectType() const {
// Ditto getThisType.
assert(isInstance() && "No 'this' for static methods!");
-
- return CXXMethodDecl::getThisObjectType(getType()->getAs<FunctionProtoType>(),
- getParent());
+ return CXXMethodDecl::getThisObjectType(
+ getType()->castAs<FunctionProtoType>(), getParent());
}
bool CXXMethodDecl::hasInlineBody() const {
@@ -2508,11 +2586,11 @@ CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
}
bool CXXConstructorDecl::isDefaultConstructor() const {
- // C++ [class.ctor]p5:
- // A default constructor for a class X is a constructor of class
- // X that can be called without an argument.
- return (getNumParams() == 0) ||
- (getNumParams() > 0 && getParamDecl(0)->hasDefaultArg());
+ // C++ [class.default.ctor]p1:
+ // A default constructor for a class X is a constructor of class X for
+ // which each parameter that is not a function parameter pack has a default
+ // argument (including the case of a constructor with no parameters)
+ return getMinRequiredArguments() == 0;
}
bool
@@ -2523,7 +2601,7 @@ CXXConstructorDecl::isCopyConstructor(unsigned &TypeQuals) const {
bool CXXConstructorDecl::isMoveConstructor(unsigned &TypeQuals) const {
return isCopyOrMoveConstructor(TypeQuals) &&
- getParamDecl(0)->getType()->isRValueReferenceType();
+ getParamDecl(0)->getType()->isRValueReferenceType();
}
/// Determine whether this is a copy or move constructor.
@@ -2538,10 +2616,8 @@ bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
// first parameter is of type X&&, const X&&, volatile X&&, or
// const volatile X&&, and either there are no other parameters or else
// all other parameters have default arguments.
- if ((getNumParams() < 1) ||
- (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
- (getPrimaryTemplate() != nullptr) ||
- (getDescribedFunctionTemplate() != nullptr))
+ if (!hasOneParamOrDefaultArgs() || getPrimaryTemplate() != nullptr ||
+ getDescribedFunctionTemplate() != nullptr)
return false;
const ParmVarDecl *Param = getParamDecl(0);
@@ -2578,18 +2654,16 @@ bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
if (isExplicit() && !AllowExplicit)
return false;
- return (getNumParams() == 0 &&
- getType()->castAs<FunctionProtoType>()->isVariadic()) ||
- (getNumParams() == 1) ||
- (getNumParams() > 1 &&
- (getParamDecl(1)->hasDefaultArg() ||
- getParamDecl(1)->isParameterPack()));
+ // FIXME: This has nothing to do with the definition of converting
+ // constructor, but is convenient for how we use this function in overload
+ // resolution.
+ return getNumParams() == 0
+ ? getType()->castAs<FunctionProtoType>()->isVariadic()
+ : getMinRequiredArguments() <= 1;
}
bool CXXConstructorDecl::isSpecializationCopyingObject() const {
- if ((getNumParams() < 1) ||
- (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
- (getDescribedFunctionTemplate() != nullptr))
+ if (!hasOneParamOrDefaultArgs() || getDescribedFunctionTemplate() != nullptr)
return false;
const ParmVarDecl *Param = getParamDecl(0);
@@ -3054,7 +3128,7 @@ VarDecl *BindingDecl::getHoldingVar() const {
if (!DRE)
return nullptr;
- auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ auto *VD = cast<VarDecl>(DRE->getDecl());
assert(VD->isImplicit() && "holding var for binding decl not implicit");
return VD;
}
@@ -3117,6 +3191,102 @@ MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C,
SourceLocation(), nullptr, nullptr);
}
+void MSGuidDecl::anchor() {}
+
+MSGuidDecl::MSGuidDecl(DeclContext *DC, QualType T, Parts P)
+ : ValueDecl(Decl::MSGuid, DC, SourceLocation(), DeclarationName(), T),
+ PartVal(P), APVal() {}
+
+MSGuidDecl *MSGuidDecl::Create(const ASTContext &C, QualType T, Parts P) {
+ DeclContext *DC = C.getTranslationUnitDecl();
+ return new (C, DC) MSGuidDecl(DC, T, P);
+}
+
+MSGuidDecl *MSGuidDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) MSGuidDecl(nullptr, QualType(), Parts());
+}
+
+void MSGuidDecl::printName(llvm::raw_ostream &OS) const {
+ OS << llvm::format("GUID{%08" PRIx32 "-%04" PRIx16 "-%04" PRIx16 "-",
+ PartVal.Part1, PartVal.Part2, PartVal.Part3);
+ unsigned I = 0;
+ for (uint8_t Byte : PartVal.Part4And5) {
+ OS << llvm::format("%02" PRIx8, Byte);
+ if (++I == 2)
+ OS << '-';
+ }
+ OS << '}';
+}
+
+/// Determine if T is a valid 'struct _GUID' of the shape that we expect.
+static bool isValidStructGUID(ASTContext &Ctx, QualType T) {
+ // FIXME: We only need to check this once, not once each time we compute a
+ // GUID APValue.
+ using MatcherRef = llvm::function_ref<bool(QualType)>;
+
+ auto IsInt = [&Ctx](unsigned N) {
+ return [&Ctx, N](QualType T) {
+ return T->isUnsignedIntegerOrEnumerationType() &&
+ Ctx.getIntWidth(T) == N;
+ };
+ };
+
+ auto IsArray = [&Ctx](MatcherRef Elem, unsigned N) {
+ return [&Ctx, Elem, N](QualType T) {
+ const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(T);
+ return CAT && CAT->getSize() == N && Elem(CAT->getElementType());
+ };
+ };
+
+ auto IsStruct = [](std::initializer_list<MatcherRef> Fields) {
+ return [Fields](QualType T) {
+ const RecordDecl *RD = T->getAsRecordDecl();
+ if (!RD || RD->isUnion())
+ return false;
+ RD = RD->getDefinition();
+ if (!RD)
+ return false;
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->getNumBases())
+ return false;
+ auto MatcherIt = Fields.begin();
+ for (const FieldDecl *FD : RD->fields()) {
+ if (FD->isUnnamedBitfield()) continue;
+ if (FD->isBitField() || MatcherIt == Fields.end() ||
+ !(*MatcherIt)(FD->getType()))
+ return false;
+ ++MatcherIt;
+ }
+ return MatcherIt == Fields.end();
+ };
+ };
+
+ // We expect an {i32, i16, i16, [8 x i8]}.
+ return IsStruct({IsInt(32), IsInt(16), IsInt(16), IsArray(IsInt(8), 8)})(T);
+}
+
+APValue &MSGuidDecl::getAsAPValue() const {
+ if (APVal.isAbsent() && isValidStructGUID(getASTContext(), getType())) {
+ using llvm::APInt;
+ using llvm::APSInt;
+ APVal = APValue(APValue::UninitStruct(), 0, 4);
+ APVal.getStructField(0) = APValue(APSInt(APInt(32, PartVal.Part1), true));
+ APVal.getStructField(1) = APValue(APSInt(APInt(16, PartVal.Part2), true));
+ APVal.getStructField(2) = APValue(APSInt(APInt(16, PartVal.Part3), true));
+ APValue &Arr = APVal.getStructField(3) =
+ APValue(APValue::UninitArray(), 8, 8);
+ for (unsigned I = 0; I != 8; ++I) {
+ Arr.getArrayInitializedElt(I) =
+ APValue(APSInt(APInt(8, PartVal.Part4And5[I]), true));
+ }
+ // Register this APValue to be destroyed if necessary. (Note that the
+ // MSGuidDecl destructor is never run.)
+ getASTContext().addDestruction(&APVal);
+ }
+
+ return APVal;
+}
+
static const char *getAccessName(AccessSpecifier AS) {
switch (AS) {
case AS_none:
diff --git a/clang/lib/AST/DeclObjC.cpp b/clang/lib/AST/DeclObjC.cpp
index 9a84e3c4a510..5c8b34731f36 100644
--- a/clang/lib/AST/DeclObjC.cpp
+++ b/clang/lib/AST/DeclObjC.cpp
@@ -94,7 +94,7 @@ ObjCContainerDecl::getMethod(Selector Sel, bool isInstance,
// methods there.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
- if (Def->isHidden() && !AllowHidden)
+ if (!Def->isUnconditionallyVisible() && !AllowHidden)
return nullptr;
}
@@ -146,7 +146,8 @@ bool ObjCContainerDecl::HasUserDeclaredSetterMethod(
// auto-synthesized).
for (const auto *P : Cat->properties())
if (P->getIdentifier() == Property->getIdentifier()) {
- if (P->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite)
+ if (P->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_readwrite)
return true;
break;
}
@@ -180,7 +181,7 @@ ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
// property.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(DC)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
- if (Def->isHidden())
+ if (!Def->isUnconditionallyVisible())
return nullptr;
}
@@ -238,7 +239,7 @@ ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
// Don't find properties within hidden protocol definitions.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
- if (Def->isHidden())
+ if (!Def->isUnconditionallyVisible())
return nullptr;
}
@@ -1361,25 +1362,23 @@ ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const {
return Found;
} else {
// Determine whether the container is a class.
- ClassDecl = dyn_cast<ObjCInterfaceDecl>(Container);
+ ClassDecl = cast<ObjCInterfaceDecl>(Container);
}
+ assert(ClassDecl && "Failed to find main class");
// If we have a class, check its visible extensions.
- if (ClassDecl) {
- for (const auto *Ext : ClassDecl->visible_extensions()) {
- if (Ext == Container)
- continue;
-
- if (const auto *Found = findMatchingProperty(Ext))
- return Found;
- }
+ for (const auto *Ext : ClassDecl->visible_extensions()) {
+ if (Ext == Container)
+ continue;
+ if (const auto *Found = findMatchingProperty(Ext))
+ return Found;
}
assert(isSynthesizedAccessorStub() && "expected an accessor stub");
+
for (const auto *Cat : ClassDecl->known_categories()) {
if (Cat == Container)
continue;
-
if (const auto *Found = findMatchingProperty(Cat))
return Found;
}
@@ -1920,7 +1919,7 @@ ObjCMethodDecl *ObjCProtocolDecl::lookupMethod(Selector Sel,
// If there is no definition or the definition is hidden, we don't find
// anything.
const ObjCProtocolDecl *Def = getDefinition();
- if (!Def || Def->isHidden())
+ if (!Def || !Def->isUnconditionallyVisible())
return nullptr;
if ((MethodDecl = getMethod(Sel, isInstance)))
diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp
index 4cedcbed0644..2e48b2b46c4d 100644
--- a/clang/lib/AST/DeclPrinter.cpp
+++ b/clang/lib/AST/DeclPrinter.cpp
@@ -105,6 +105,8 @@ namespace {
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
void VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
+ void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP);
+ void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *NTTP);
void printTemplateParameters(const TemplateParameterList *Params,
bool OmitTemplateKW = false);
@@ -287,12 +289,10 @@ void DeclPrinter::ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls) {
}
void DeclPrinter::Print(AccessSpecifier AS) {
- switch(AS) {
- case AS_none: llvm_unreachable("No access specifier!");
- case AS_public: Out << "public"; break;
- case AS_protected: Out << "protected"; break;
- case AS_private: Out << "private"; break;
- }
+ const auto AccessSpelling = getAccessSpelling(AS);
+ if (AccessSpelling.empty())
+ llvm_unreachable("No access specifier!");
+ Out << AccessSpelling;
}
void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl,
@@ -530,7 +530,7 @@ void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
Out << ' ' << *D;
- if (D->isFixed() && D->getASTContext().getLangOpts().CPlusPlus11)
+ if (D->isFixed())
Out << " : " << D->getIntegerType().stream(Policy);
if (D->isCompleteDefinition()) {
@@ -1051,37 +1051,10 @@ void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params,
else
NeedComma = true;
- if (auto TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
-
- if (const TypeConstraint *TC = TTP->getTypeConstraint())
- TC->print(Out, Policy);
- else if (TTP->wasDeclaredWithTypename())
- Out << "typename";
- else
- Out << "class";
-
- if (TTP->isParameterPack())
- Out << " ...";
- else if (!TTP->getName().empty())
- Out << ' ';
-
- Out << *TTP;
-
- if (TTP->hasDefaultArgument()) {
- Out << " = ";
- Out << TTP->getDefaultArgument().getAsString(Policy);
- };
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ VisitTemplateTypeParmDecl(TTP);
} else if (auto NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- StringRef Name;
- if (IdentifierInfo *II = NTTP->getIdentifier())
- Name = II->getName();
- printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
-
- if (NTTP->hasDefaultArgument()) {
- Out << " = ";
- NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy,
- Indentation);
- }
+ VisitNonTypeTemplateParmDecl(NTTP);
} else if (auto TTPD = dyn_cast<TemplateTemplateParmDecl>(Param)) {
VisitTemplateDecl(TTPD);
// FIXME: print the default argument, if present.
@@ -1401,7 +1374,12 @@ void DeclPrinter::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) {
}
void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) {
- Out << "@implementation " << *PID->getClassInterface() << '(' << *PID <<")\n";
+ Out << "@implementation ";
+ if (const auto *CID = PID->getClassInterface())
+ Out << *CID;
+ else
+ Out << "<<error-type>>";
+ Out << '(' << *PID << ")\n";
VisitDeclContext(PID, false);
Out << "@end";
@@ -1409,7 +1387,11 @@ void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) {
}
void DeclPrinter::VisitObjCCategoryDecl(ObjCCategoryDecl *PID) {
- Out << "@interface " << *PID->getClassInterface();
+ Out << "@interface ";
+ if (const auto *CID = PID->getClassInterface())
+ Out << *CID;
+ else
+ Out << "<<error-type>>";
if (auto TypeParams = PID->getTypeParamList()) {
PrintObjCTypeParams(TypeParams);
}
@@ -1453,85 +1435,83 @@ void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
QualType T = PDecl->getType();
Out << "@property";
- if (PDecl->getPropertyAttributes() != ObjCPropertyDecl::OBJC_PR_noattr) {
+ if (PDecl->getPropertyAttributes() != ObjCPropertyAttribute::kind_noattr) {
bool first = true;
Out << "(";
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_class) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_class) {
Out << (first ? "" : ", ") << "class";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_direct) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_direct) {
Out << (first ? "" : ", ") << "direct";
first = false;
}
if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_nonatomic) {
+ ObjCPropertyAttribute::kind_nonatomic) {
Out << (first ? "" : ", ") << "nonatomic";
first = false;
}
- if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_atomic) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic) {
Out << (first ? "" : ", ") << "atomic";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_assign) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_assign) {
Out << (first ? "" : ", ") << "assign";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) {
Out << (first ? "" : ", ") << "retain";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_strong) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_strong) {
Out << (first ? "" : ", ") << "strong";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) {
Out << (first ? "" : ", ") << "copy";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) {
Out << (first ? "" : ", ") << "weak";
first = false;
}
- if (PDecl->getPropertyAttributes()
- & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) {
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_unsafe_unretained) {
Out << (first ? "" : ", ") << "unsafe_unretained";
first = false;
}
if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_readwrite) {
+ ObjCPropertyAttribute::kind_readwrite) {
Out << (first ? "" : ", ") << "readwrite";
first = false;
}
- if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_readonly) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_readonly) {
Out << (first ? "" : ", ") << "readonly";
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
Out << (first ? "" : ", ") << "getter = ";
PDecl->getGetterName().print(Out);
first = false;
}
- if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ if (PDecl->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
Out << (first ? "" : ", ") << "setter = ";
PDecl->getSetterName().print(Out);
first = false;
}
if (PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_nullability) {
+ ObjCPropertyAttribute::kind_nullability) {
if (auto nullability = AttributedType::stripOuterNullability(T)) {
if (*nullability == NullabilityKind::Unspecified &&
(PDecl->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_null_resettable)) {
+ ObjCPropertyAttribute::kind_null_resettable)) {
Out << (first ? "" : ", ") << "null_resettable";
} else {
Out << (first ? "" : ", ")
@@ -1705,3 +1685,36 @@ void DeclPrinter::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
D->getInit()->printPretty(Out, nullptr, Policy, Indentation);
}
+void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) {
+ if (const TypeConstraint *TC = TTP->getTypeConstraint())
+ TC->print(Out, Policy);
+ else if (TTP->wasDeclaredWithTypename())
+ Out << "typename";
+ else
+ Out << "class";
+
+ if (TTP->isParameterPack())
+ Out << " ...";
+ else if (!TTP->getName().empty())
+ Out << ' ';
+
+ Out << *TTP;
+
+ if (TTP->hasDefaultArgument()) {
+ Out << " = ";
+ Out << TTP->getDefaultArgument().getAsString(Policy);
+ }
+}
+
+void DeclPrinter::VisitNonTypeTemplateParmDecl(
+ const NonTypeTemplateParmDecl *NTTP) {
+ StringRef Name;
+ if (IdentifierInfo *II = NTTP->getIdentifier())
+ Name = II->getName();
+ printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
+
+ if (NTTP->hasDefaultArgument()) {
+ Out << " = ";
+ NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy, Indentation);
+ }
+}
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
index 95a2e26e0df8..7857e75f57a1 100644
--- a/clang/lib/AST/DeclTemplate.cpp
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -164,10 +164,15 @@ static void AdoptTemplateParameterList(TemplateParameterList *Params,
void TemplateParameterList::
getAssociatedConstraints(llvm::SmallVectorImpl<const Expr *> &AC) const {
if (HasConstrainedParameters)
- for (const NamedDecl *Param : *this)
- if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
+ for (const NamedDecl *Param : *this) {
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
if (const auto *TC = TTP->getTypeConstraint())
AC.push_back(TC->getImmediatelyDeclaredConstraint());
+ } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ if (const Expr *E = NTTP->getPlaceholderTypeConstraint())
+ AC.push_back(E);
+ }
+ }
if (HasRequiresClause)
AC.push_back(getRequiresClause());
}
@@ -483,7 +488,10 @@ static void ProfileTemplateParameterList(ASTContext &C,
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(D)) {
ID.AddInteger(1);
ID.AddBoolean(TTP->isParameterPack());
- // TODO: Concepts: profile type-constraints.
+ ID.AddBoolean(TTP->hasTypeConstraint());
+ if (const TypeConstraint *TC = TTP->getTypeConstraint())
+ TC->getImmediatelyDeclaredConstraint()->Profile(ID, C,
+ /*Canonical=*/true);
continue;
}
const auto *TTP = cast<TemplateTemplateParmDecl>(D);
@@ -684,8 +692,14 @@ NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
unsigned D, unsigned P, IdentifierInfo *Id,
QualType T, bool ParameterPack,
TypeSourceInfo *TInfo) {
- return new (C, DC) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id,
- T, ParameterPack, TInfo);
+ AutoType *AT =
+ C.getLangOpts().CPlusPlus20 ? T->getContainedAutoType() : nullptr;
+ return new (C, DC,
+ additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>,
+ Expr *>(0,
+ AT && AT->isConstrained() ? 1 : 0))
+ NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id, T, ParameterPack,
+ TInfo);
}
NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create(
@@ -693,26 +707,34 @@ NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create(
SourceLocation IdLoc, unsigned D, unsigned P, IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo, ArrayRef<QualType> ExpandedTypes,
ArrayRef<TypeSourceInfo *> ExpandedTInfos) {
+ AutoType *AT = TInfo->getType()->getContainedAutoType();
return new (C, DC,
- additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>>(
- ExpandedTypes.size()))
+ additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>,
+ Expr *>(
+ ExpandedTypes.size(), AT && AT->isConstrained() ? 1 : 0))
NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id, T, TInfo,
ExpandedTypes, ExpandedTInfos);
}
NonTypeTemplateParmDecl *
-NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID) NonTypeTemplateParmDecl(nullptr, SourceLocation(),
- SourceLocation(), 0, 0, nullptr,
- QualType(), false, nullptr);
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ bool HasTypeConstraint) {
+ return new (C, ID, additionalSizeToAlloc<std::pair<QualType,
+ TypeSourceInfo *>,
+ Expr *>(0,
+ HasTypeConstraint ? 1 : 0))
+ NonTypeTemplateParmDecl(nullptr, SourceLocation(), SourceLocation(),
+ 0, 0, nullptr, QualType(), false, nullptr);
}
NonTypeTemplateParmDecl *
NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
- unsigned NumExpandedTypes) {
+ unsigned NumExpandedTypes,
+ bool HasTypeConstraint) {
auto *NTTP =
- new (C, ID, additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>>(
- NumExpandedTypes))
+ new (C, ID, additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>,
+ Expr *>(
+ NumExpandedTypes, HasTypeConstraint ? 1 : 0))
NonTypeTemplateParmDecl(nullptr, SourceLocation(), SourceLocation(),
0, 0, nullptr, QualType(), nullptr, None,
None);
@@ -1408,4 +1430,4 @@ void TypeConstraint::print(llvm::raw_ostream &OS, PrintingPolicy Policy) const {
ArgLoc.getArgument().print(Policy, OS);
OS << ">";
}
-} \ No newline at end of file
+}
diff --git a/clang/lib/AST/DeclarationName.cpp b/clang/lib/AST/DeclarationName.cpp
index 4eb11bc57e52..ecf676c9936d 100644
--- a/clang/lib/AST/DeclarationName.cpp
+++ b/clang/lib/AST/DeclarationName.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
@@ -138,8 +139,19 @@ void DeclarationName::print(raw_ostream &OS,
const PrintingPolicy &Policy) const {
switch (getNameKind()) {
case DeclarationName::Identifier:
- if (const IdentifierInfo *II = getAsIdentifierInfo())
- OS << II->getName();
+ if (const IdentifierInfo *II = getAsIdentifierInfo()) {
+ StringRef Name = II->getName();
+ // If this is a mangled OpenMP variant name we strip off the mangling for
+ // printing. It should not be visible to the user at all.
+ if (II->isMangledOpenMPVariantName()) {
+ std::pair<StringRef, StringRef> NameContextPair =
+ Name.split(getOpenMPVariantManglingSeparatorStr());
+ OS << NameContextPair.first << "["
+ << OMPTraitInfo(NameContextPair.second) << "]";
+ } else {
+ OS << Name;
+ }
+ }
return;
case DeclarationName::ObjCZeroArgSelector:
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index 73ddbc62482d..399e7e13c445 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -14,9 +14,11 @@
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/ComputeDependence.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
@@ -242,6 +244,7 @@ static void AssertResultStorageKind(ConstantExpr::ResultStorageKind Kind) {
assert((Kind == ConstantExpr::RSK_APValue ||
Kind == ConstantExpr::RSK_Int64 || Kind == ConstantExpr::RSK_None) &&
"Invalid StorageKind Value");
+ (void)Kind;
}
ConstantExpr::ResultStorageKind
@@ -266,29 +269,31 @@ ConstantExpr::getStorageKind(const Type *T, const ASTContext &Context) {
return ConstantExpr::RSK_APValue;
}
-void ConstantExpr::DefaultInit(ResultStorageKind StorageKind) {
+ConstantExpr::ConstantExpr(Expr *SubExpr, ResultStorageKind StorageKind,
+ bool IsImmediateInvocation)
+ : FullExpr(ConstantExprClass, SubExpr) {
ConstantExprBits.ResultKind = StorageKind;
ConstantExprBits.APValueKind = APValue::None;
+ ConstantExprBits.IsUnsigned = false;
+ ConstantExprBits.BitWidth = 0;
ConstantExprBits.HasCleanup = false;
+ ConstantExprBits.IsImmediateInvocation = IsImmediateInvocation;
+
if (StorageKind == ConstantExpr::RSK_APValue)
::new (getTrailingObjects<APValue>()) APValue();
}
-ConstantExpr::ConstantExpr(Expr *subexpr, ResultStorageKind StorageKind)
- : FullExpr(ConstantExprClass, subexpr) {
- DefaultInit(StorageKind);
-}
-
ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
- ResultStorageKind StorageKind) {
+ ResultStorageKind StorageKind,
+ bool IsImmediateInvocation) {
assert(!isa<ConstantExpr>(E));
AssertResultStorageKind(StorageKind);
+
unsigned Size = totalSizeToAlloc<APValue, uint64_t>(
StorageKind == ConstantExpr::RSK_APValue,
StorageKind == ConstantExpr::RSK_Int64);
void *Mem = Context.Allocate(Size, alignof(ConstantExpr));
- ConstantExpr *Self = new (Mem) ConstantExpr(E, StorageKind);
- return Self;
+ return new (Mem) ConstantExpr(E, StorageKind, IsImmediateInvocation);
}
ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
@@ -299,25 +304,27 @@ ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
return Self;
}
-ConstantExpr::ConstantExpr(ResultStorageKind StorageKind, EmptyShell Empty)
+ConstantExpr::ConstantExpr(EmptyShell Empty, ResultStorageKind StorageKind)
: FullExpr(ConstantExprClass, Empty) {
- DefaultInit(StorageKind);
+ ConstantExprBits.ResultKind = StorageKind;
+
+ if (StorageKind == ConstantExpr::RSK_APValue)
+ ::new (getTrailingObjects<APValue>()) APValue();
}
ConstantExpr *ConstantExpr::CreateEmpty(const ASTContext &Context,
- ResultStorageKind StorageKind,
- EmptyShell Empty) {
+ ResultStorageKind StorageKind) {
AssertResultStorageKind(StorageKind);
+
unsigned Size = totalSizeToAlloc<APValue, uint64_t>(
StorageKind == ConstantExpr::RSK_APValue,
StorageKind == ConstantExpr::RSK_Int64);
void *Mem = Context.Allocate(Size, alignof(ConstantExpr));
- ConstantExpr *Self = new (Mem) ConstantExpr(StorageKind, Empty);
- return Self;
+ return new (Mem) ConstantExpr(EmptyShell(), StorageKind);
}
void ConstantExpr::MoveIntoResult(APValue &Value, const ASTContext &Context) {
- assert(getStorageKind(Value) == ConstantExprBits.ResultKind &&
+ assert((unsigned)getStorageKind(Value) <= ConstantExprBits.ResultKind &&
"Invalid storage for this value kind");
ConstantExprBits.APValueKind = Value.getKind();
switch (ConstantExprBits.ResultKind) {
@@ -352,6 +359,8 @@ llvm::APSInt ConstantExpr::getResultAsAPSInt() const {
}
APValue ConstantExpr::getAPValueResult() const {
+ assert(hasAPValueResult());
+
switch (ConstantExprBits.ResultKind) {
case ConstantExpr::RSK_APValue:
return APValueResult();
@@ -365,125 +374,12 @@ APValue ConstantExpr::getAPValueResult() const {
llvm_unreachable("invalid ResultKind");
}
-/// Compute the type-, value-, and instantiation-dependence of a
-/// declaration reference
-/// based on the declaration being referenced.
-static void computeDeclRefDependence(const ASTContext &Ctx, NamedDecl *D,
- QualType T, bool &TypeDependent,
- bool &ValueDependent,
- bool &InstantiationDependent) {
- TypeDependent = false;
- ValueDependent = false;
- InstantiationDependent = false;
-
- // (TD) C++ [temp.dep.expr]p3:
- // An id-expression is type-dependent if it contains:
- //
- // and
- //
- // (VD) C++ [temp.dep.constexpr]p2:
- // An identifier is value-dependent if it is:
-
- // (TD) - an identifier that was declared with dependent type
- // (VD) - a name declared with a dependent type,
- if (T->isDependentType()) {
- TypeDependent = true;
- ValueDependent = true;
- InstantiationDependent = true;
- return;
- } else if (T->isInstantiationDependentType()) {
- InstantiationDependent = true;
- }
-
- // (TD) - a conversion-function-id that specifies a dependent type
- if (D->getDeclName().getNameKind()
- == DeclarationName::CXXConversionFunctionName) {
- QualType T = D->getDeclName().getCXXNameType();
- if (T->isDependentType()) {
- TypeDependent = true;
- ValueDependent = true;
- InstantiationDependent = true;
- return;
- }
-
- if (T->isInstantiationDependentType())
- InstantiationDependent = true;
- }
-
- // (VD) - the name of a non-type template parameter,
- if (isa<NonTypeTemplateParmDecl>(D)) {
- ValueDependent = true;
- InstantiationDependent = true;
- return;
- }
-
- // (VD) - a constant with integral or enumeration type and is
- // initialized with an expression that is value-dependent.
- // (VD) - a constant with literal type and is initialized with an
- // expression that is value-dependent [C++11].
- // (VD) - FIXME: Missing from the standard:
- // - an entity with reference type and is initialized with an
- // expression that is value-dependent [C++11]
- if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
- if ((Ctx.getLangOpts().CPlusPlus11 ?
- Var->getType()->isLiteralType(Ctx) :
- Var->getType()->isIntegralOrEnumerationType()) &&
- (Var->getType().isConstQualified() ||
- Var->getType()->isReferenceType())) {
- if (const Expr *Init = Var->getAnyInitializer())
- if (Init->isValueDependent()) {
- ValueDependent = true;
- InstantiationDependent = true;
- }
- }
-
- // (VD) - FIXME: Missing from the standard:
- // - a member function or a static data member of the current
- // instantiation
- if (Var->isStaticDataMember() &&
- Var->getDeclContext()->isDependentContext()) {
- ValueDependent = true;
- InstantiationDependent = true;
- TypeSourceInfo *TInfo = Var->getFirstDecl()->getTypeSourceInfo();
- if (TInfo->getType()->isIncompleteArrayType())
- TypeDependent = true;
- }
-
- return;
- }
-
- // (VD) - FIXME: Missing from the standard:
- // - a member function or a static data member of the current
- // instantiation
- if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) {
- ValueDependent = true;
- InstantiationDependent = true;
- }
-}
-
-void DeclRefExpr::computeDependence(const ASTContext &Ctx) {
- bool TypeDependent = false;
- bool ValueDependent = false;
- bool InstantiationDependent = false;
- computeDeclRefDependence(Ctx, getDecl(), getType(), TypeDependent,
- ValueDependent, InstantiationDependent);
-
- ExprBits.TypeDependent |= TypeDependent;
- ExprBits.ValueDependent |= ValueDependent;
- ExprBits.InstantiationDependent |= InstantiationDependent;
-
- // Is the declaration a parameter pack?
- if (getDecl()->isParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-}
-
DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
bool RefersToEnclosingVariableOrCapture, QualType T,
ExprValueKind VK, SourceLocation L,
const DeclarationNameLoc &LocInfo,
NonOdrUseReason NOUR)
- : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
- D(D), DNLoc(LocInfo) {
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary), D(D), DNLoc(LocInfo) {
DeclRefExprBits.HasQualifier = false;
DeclRefExprBits.HasTemplateKWAndArgsInfo = false;
DeclRefExprBits.HasFoundDecl = false;
@@ -492,7 +388,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
RefersToEnclosingVariableOrCapture;
DeclRefExprBits.NonOdrUseReason = NOUR;
DeclRefExprBits.Loc = L;
- computeDependence(Ctx);
+ setDependence(computeDependence(this, Ctx));
}
DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
@@ -502,19 +398,13 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
const DeclarationNameInfo &NameInfo, NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs,
QualType T, ExprValueKind VK, NonOdrUseReason NOUR)
- : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
- D(D), DNLoc(NameInfo.getInfo()) {
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary), D(D),
+ DNLoc(NameInfo.getInfo()) {
DeclRefExprBits.Loc = NameInfo.getLoc();
DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
- if (QualifierLoc) {
+ if (QualifierLoc)
new (getTrailingObjects<NestedNameSpecifierLoc>())
NestedNameSpecifierLoc(QualifierLoc);
- auto *NNS = QualifierLoc.getNestedNameSpecifier();
- if (NNS->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (NNS->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
- }
DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0;
if (FoundD)
*getTrailingObjects<NamedDecl *>() = FoundD;
@@ -524,22 +414,18 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
RefersToEnclosingVariableOrCapture;
DeclRefExprBits.NonOdrUseReason = NOUR;
if (TemplateArgs) {
- bool Dependent = false;
- bool InstantiationDependent = false;
- bool ContainsUnexpandedParameterPack = false;
+ auto Deps = TemplateArgumentDependence::None;
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(),
- Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
- assert(!Dependent && "built a DeclRefExpr with dependent template args");
- ExprBits.InstantiationDependent |= InstantiationDependent;
- ExprBits.ContainsUnexpandedParameterPack |= ContainsUnexpandedParameterPack;
+ Deps);
+ assert(!(Deps & TemplateArgumentDependence::Dependent) &&
+ "built a DeclRefExpr with dependent template args");
} else if (TemplateKWLoc.isValid()) {
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
}
DeclRefExprBits.HadMultipleCandidates = 0;
-
- computeDependence(Ctx);
+ setDependence(computeDependence(this, Ctx));
}
DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
@@ -611,10 +497,7 @@ SourceLocation DeclRefExpr::getEndLoc() const {
PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
StringLiteral *SL)
- : Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary,
- FNTy->isDependentType(), FNTy->isDependentType(),
- FNTy->isInstantiationDependentType(),
- /*ContainsUnexpandedParameterPack=*/false) {
+ : Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary) {
PredefinedExprBits.Kind = IK;
assert((getIdentKind() == IK) &&
"IdentKind do not fit in PredefinedExprBitfields!");
@@ -623,6 +506,35 @@ PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
PredefinedExprBits.Loc = L;
if (HasFunctionName)
setFunctionName(SL);
+ setDependence(computeDependence(this));
+}
+
+PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FnTy, IdentKind IK,
+ TypeSourceInfo *Info)
+ : Expr(PredefinedExprClass, FnTy, VK_LValue, OK_Ordinary) {
+ PredefinedExprBits.Kind = IK;
+ assert((getIdentKind() == IK) &&
+ "IdentKind do not fit in PredefinedExprBitFields!");
+ assert(IK == UniqueStableNameType &&
+ "Constructor only valid with UniqueStableNameType");
+ PredefinedExprBits.HasFunctionName = false;
+ PredefinedExprBits.Loc = L;
+ setTypeSourceInfo(Info);
+ setDependence(computeDependence(this));
+}
+
+PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FnTy, IdentKind IK,
+ Expr *E)
+ : Expr(PredefinedExprClass, FnTy, VK_LValue, OK_Ordinary) {
+ PredefinedExprBits.Kind = IK;
+ assert((getIdentKind() == IK) &&
+ "IdentKind do not fit in PredefinedExprBitFields!");
+ assert(IK == UniqueStableNameExpr &&
+ "Constructor only valid with UniqueStableNameExpr");
+ PredefinedExprBits.HasFunctionName = false;
+ PredefinedExprBits.Loc = L;
+ setExpr(E);
+ setDependence(computeDependence(this));
}
PredefinedExpr::PredefinedExpr(EmptyShell Empty, bool HasFunctionName)
@@ -634,15 +546,44 @@ PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
QualType FNTy, IdentKind IK,
StringLiteral *SL) {
bool HasFunctionName = SL != nullptr;
- void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
- alignof(PredefinedExpr));
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(HasFunctionName, 0, 0),
+ alignof(PredefinedExpr));
return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
}
+PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
+ QualType FNTy, IdentKind IK,
+ StringLiteral *SL,
+ TypeSourceInfo *Info) {
+ assert(IK == UniqueStableNameType && "Only valid with UniqueStableNameType");
+ bool HasFunctionName = SL != nullptr;
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(
+ HasFunctionName, 0, !HasFunctionName),
+ alignof(PredefinedExpr));
+ if (HasFunctionName)
+ return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
+ return new (Mem) PredefinedExpr(L, FNTy, IK, Info);
+}
+
+PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
+ QualType FNTy, IdentKind IK,
+ StringLiteral *SL, Expr *E) {
+ assert(IK == UniqueStableNameExpr && "Only valid with UniqueStableNameExpr");
+ bool HasFunctionName = SL != nullptr;
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(
+ HasFunctionName, !HasFunctionName, 0),
+ alignof(PredefinedExpr));
+ if (HasFunctionName)
+ return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
+ return new (Mem) PredefinedExpr(L, FNTy, IK, E);
+}
+
PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx,
bool HasFunctionName) {
- void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
- alignof(PredefinedExpr));
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, Expr *, TypeSourceInfo *>(HasFunctionName, 0, 0),
+ alignof(PredefinedExpr));
return new (Mem) PredefinedExpr(EmptyShell(), HasFunctionName);
}
@@ -662,12 +603,28 @@ StringRef PredefinedExpr::getIdentKindName(PredefinedExpr::IdentKind IK) {
return "__FUNCSIG__";
case LFuncSig:
return "L__FUNCSIG__";
+ case UniqueStableNameType:
+ case UniqueStableNameExpr:
+ return "__builtin_unique_stable_name";
case PrettyFunctionNoVirtual:
break;
}
llvm_unreachable("Unknown ident kind for PredefinedExpr");
}
+std::string PredefinedExpr::ComputeName(ASTContext &Context, IdentKind IK,
+ QualType Ty) {
+ std::unique_ptr<MangleContext> Ctx{ItaniumMangleContext::create(
+ Context, Context.getDiagnostics(), /*IsUniqueNameMangler*/ true)};
+
+ Ty = Ty.getCanonicalType();
+
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ Ctx->mangleTypeName(Ty, Out);
+ return std::string(Buffer.str());
+}
+
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
@@ -681,18 +638,22 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
if (MC->shouldMangleDeclName(ND)) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
+ GlobalDecl GD;
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(ND))
- MC->mangleCXXCtor(CD, Ctor_Base, Out);
+ GD = GlobalDecl(CD, Ctor_Base);
else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(ND))
- MC->mangleCXXDtor(DD, Dtor_Base, Out);
+ GD = GlobalDecl(DD, Dtor_Base);
+ else if (ND->hasAttr<CUDAGlobalAttr>())
+ GD = GlobalDecl(cast<FunctionDecl>(ND));
else
- MC->mangleName(ND, Out);
+ GD = GlobalDecl(ND);
+ MC->mangleName(GD, Out);
if (!Buffer.empty() && Buffer.front() == '\01')
- return Buffer.substr(1);
- return Buffer.str();
+ return std::string(Buffer.substr(1));
+ return std::string(Buffer.str());
} else
- return ND->getIdentifier()->getName();
+ return std::string(ND->getIdentifier()->getName());
}
return "";
}
@@ -711,7 +672,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
Out << ComputeName(IK, DCBlock);
else if (auto *DCDecl = dyn_cast<Decl>(DC))
Out << ComputeName(IK, DCDecl) << "_block_invoke";
- return Out.str();
+ return std::string(Out.str());
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
if (IK != PrettyFunction && IK != PrettyFunctionNoVirtual &&
@@ -856,7 +817,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
Out << Proto;
- return Name.str().str();
+ return std::string(Name);
}
if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(CurrentDecl)) {
for (const DeclContext *DC = CD->getParent(); DC; DC = DC->getParent())
@@ -887,7 +848,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
MD->getSelector().print(Out);
Out << ']';
- return Name.str().str();
+ return std::string(Name);
}
if (isa<TranslationUnitDecl>(CurrentDecl) && IK == PrettyFunction) {
// __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
@@ -915,13 +876,12 @@ void APNumericStorage::setIntValue(const ASTContext &C,
IntegerLiteral::IntegerLiteral(const ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l)
- : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
- false, false),
- Loc(l) {
+ : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary), Loc(l) {
assert(type->isIntegerType() && "Illegal type in IntegerLiteral");
assert(V.getBitWidth() == C.getIntWidth(type) &&
"Integer type is not the correct size for constant.");
setValue(C, V);
+ setDependence(ExprDependence::None);
}
IntegerLiteral *
@@ -938,13 +898,13 @@ IntegerLiteral::Create(const ASTContext &C, EmptyShell Empty) {
FixedPointLiteral::FixedPointLiteral(const ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l,
unsigned Scale)
- : Expr(FixedPointLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
- false, false),
- Loc(l), Scale(Scale) {
+ : Expr(FixedPointLiteralClass, type, VK_RValue, OK_Ordinary), Loc(l),
+ Scale(Scale) {
assert(type->isFixedPointType() && "Illegal type in FixedPointLiteral");
assert(V.getBitWidth() == C.getTypeInfo(type).Width &&
"Fixed point type is not the correct size for constant.");
setValue(C, V);
+ setDependence(ExprDependence::None);
}
FixedPointLiteral *FixedPointLiteral::CreateFromRawInt(const ASTContext &C,
@@ -955,6 +915,11 @@ FixedPointLiteral *FixedPointLiteral::CreateFromRawInt(const ASTContext &C,
return new (C) FixedPointLiteral(C, V, type, l, Scale);
}
+FixedPointLiteral *FixedPointLiteral::Create(const ASTContext &C,
+ EmptyShell Empty) {
+ return new (C) FixedPointLiteral(Empty);
+}
+
std::string FixedPointLiteral::getValueAsString(unsigned Radix) const {
// Currently the longest decimal number that can be printed is the max for an
// unsigned long _Accum: 4294967295.99999999976716935634613037109375
@@ -962,16 +927,16 @@ std::string FixedPointLiteral::getValueAsString(unsigned Radix) const {
SmallString<64> S;
FixedPointValueToString(
S, llvm::APSInt::getUnsigned(getValue().getZExtValue()), Scale);
- return S.str();
+ return std::string(S.str());
}
FloatingLiteral::FloatingLiteral(const ASTContext &C, const llvm::APFloat &V,
bool isexact, QualType Type, SourceLocation L)
- : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
- false, false), Loc(L) {
+ : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary), Loc(L) {
setSemantics(V.getSemantics());
FloatingLiteralBits.IsExact = isexact;
setValue(C, V);
+ setDependence(ExprDependence::None);
}
FloatingLiteral::FloatingLiteral(const ASTContext &C, EmptyShell Empty)
@@ -1031,8 +996,7 @@ StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
StringKind Kind, bool Pascal, QualType Ty,
const SourceLocation *Loc,
unsigned NumConcatenated)
- : Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary, false, false, false,
- false) {
+ : Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary) {
assert(Ctx.getAsConstantArrayType(Ty) &&
"StringLiteral must be of constant array type!");
unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind);
@@ -1071,6 +1035,8 @@ StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
// Initialize the trailing array of char holding the string data.
std::memcpy(getTrailingObjects<char>(), Str.data(), ByteLength);
+
+ setDependence(ExprDependence::None);
}
StringLiteral::StringLiteral(EmptyShell Empty, unsigned NumConcatenated,
@@ -1339,10 +1305,7 @@ CallExpr::CallExpr(StmtClass SC, Expr *Fn, ArrayRef<Expr *> PreArgs,
ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
SourceLocation RParenLoc, unsigned MinNumArgs,
ADLCallKind UsesADL)
- : Expr(SC, Ty, VK, OK_Ordinary, Fn->isTypeDependent(),
- Fn->isValueDependent(), Fn->isInstantiationDependent(),
- Fn->containsUnexpandedParameterPack()),
- RParenLoc(RParenLoc) {
+ : Expr(SC, Ty, VK, OK_Ordinary), RParenLoc(RParenLoc) {
NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
unsigned NumPreArgs = PreArgs.size();
CallExprBits.NumPreArgs = NumPreArgs;
@@ -1356,17 +1319,14 @@ CallExpr::CallExpr(StmtClass SC, Expr *Fn, ArrayRef<Expr *> PreArgs,
CallExprBits.UsesADL = static_cast<bool>(UsesADL);
setCallee(Fn);
- for (unsigned I = 0; I != NumPreArgs; ++I) {
- updateDependenciesFromArg(PreArgs[I]);
+ for (unsigned I = 0; I != NumPreArgs; ++I)
setPreArg(I, PreArgs[I]);
- }
- for (unsigned I = 0; I != Args.size(); ++I) {
- updateDependenciesFromArg(Args[I]);
+ for (unsigned I = 0; I != Args.size(); ++I)
setArg(I, Args[I]);
- }
- for (unsigned I = Args.size(); I != NumArgs; ++I) {
+ for (unsigned I = Args.size(); I != NumArgs; ++I)
setArg(I, nullptr);
- }
+
+ setDependence(computeDependence(this, PreArgs));
}
CallExpr::CallExpr(StmtClass SC, unsigned NumPreArgs, unsigned NumArgs,
@@ -1400,7 +1360,8 @@ CallExpr *CallExpr::CreateTemporary(void *Mem, Expr *Fn, QualType Ty,
assert(!(reinterpret_cast<uintptr_t>(Mem) % alignof(CallExpr)) &&
"Misaligned memory in CallExpr::CreateTemporary!");
return new (Mem) CallExpr(CallExprClass, Fn, /*PreArgs=*/{}, /*Args=*/{}, Ty,
- VK, RParenLoc, /*MinNumArgs=*/0, UsesADL);
+ VK, RParenLoc,
+ /*MinNumArgs=*/0, UsesADL);
}
CallExpr *CallExpr::CreateEmpty(const ASTContext &Ctx, unsigned NumArgs,
@@ -1429,33 +1390,31 @@ unsigned CallExpr::offsetToTrailingObjects(StmtClass SC) {
}
}
-void CallExpr::updateDependenciesFromArg(Expr *Arg) {
- if (Arg->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (Arg->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Arg->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Arg->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-}
-
Decl *Expr::getReferencedDeclOfCallee() {
Expr *CEE = IgnoreParenImpCasts();
- while (SubstNonTypeTemplateParmExpr *NTTP
- = dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
- CEE = NTTP->getReplacement()->IgnoreParenCasts();
+ while (SubstNonTypeTemplateParmExpr *NTTP =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
+ CEE = NTTP->getReplacement()->IgnoreParenImpCasts();
}
// If we're calling a dereference, look at the pointer instead.
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
- if (BO->isPtrMemOp())
- CEE = BO->getRHS()->IgnoreParenCasts();
- } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
- if (UO->getOpcode() == UO_Deref)
- CEE = UO->getSubExpr()->IgnoreParenCasts();
+ while (true) {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
+ if (BO->isPtrMemOp()) {
+ CEE = BO->getRHS()->IgnoreParenImpCasts();
+ continue;
+ }
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
+ if (UO->getOpcode() == UO_Deref || UO->getOpcode() == UO_AddrOf ||
+ UO->getOpcode() == UO_Plus) {
+ CEE = UO->getSubExpr()->IgnoreParenImpCasts();
+ continue;
+ }
+ }
+ break;
}
+
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE))
return DRE->getDecl();
if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
@@ -1466,28 +1425,11 @@ Decl *Expr::getReferencedDeclOfCallee() {
return nullptr;
}
-/// getBuiltinCallee - If this is a call to a builtin, return the builtin ID. If
-/// not, return 0.
+/// If this is a call to a builtin, return the builtin ID. If not, return 0.
unsigned CallExpr::getBuiltinCallee() const {
- // All simple function calls (e.g. func()) are implicitly cast to pointer to
- // function. As a result, we try and obtain the DeclRefExpr from the
- // ImplicitCastExpr.
- const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(getCallee());
- if (!ICE) // FIXME: deal with more complex calls (e.g. (func)(), (*func)()).
- return 0;
-
- const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
- if (!DRE)
- return 0;
-
- const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
- if (!FDecl)
- return 0;
-
- if (!FDecl->getIdentifier())
- return 0;
-
- return FDecl->getBuiltinID();
+ auto *FDecl =
+ dyn_cast_or_null<FunctionDecl>(getCallee()->getReferencedDeclOfCallee());
+ return FDecl ? FDecl->getBuiltinID() : 0;
}
bool CallExpr::isUnevaluatedBuiltinCall(const ASTContext &Ctx) const {
@@ -1569,28 +1511,17 @@ OffsetOfExpr *OffsetOfExpr::CreateEmpty(const ASTContext &C,
OffsetOfExpr::OffsetOfExpr(const ASTContext &C, QualType type,
SourceLocation OperatorLoc, TypeSourceInfo *tsi,
- ArrayRef<OffsetOfNode> comps, ArrayRef<Expr*> exprs,
+ ArrayRef<OffsetOfNode> comps, ArrayRef<Expr *> exprs,
SourceLocation RParenLoc)
- : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary,
- /*TypeDependent=*/false,
- /*ValueDependent=*/tsi->getType()->isDependentType(),
- tsi->getType()->isInstantiationDependentType(),
- tsi->getType()->containsUnexpandedParameterPack()),
- OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
- NumComps(comps.size()), NumExprs(exprs.size())
-{
- for (unsigned i = 0; i != comps.size(); ++i) {
+ : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary),
+ OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
+ NumComps(comps.size()), NumExprs(exprs.size()) {
+ for (unsigned i = 0; i != comps.size(); ++i)
setComponent(i, comps[i]);
- }
-
- for (unsigned i = 0; i != exprs.size(); ++i) {
- if (exprs[i]->isTypeDependent() || exprs[i]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (exprs[i]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned i = 0; i != exprs.size(); ++i)
setIndexExpr(i, exprs[i]);
- }
+
+ setDependence(computeDependence(this));
}
IdentifierInfo *OffsetOfNode::getFieldName() const {
@@ -1604,39 +1535,15 @@ IdentifierInfo *OffsetOfNode::getFieldName() const {
UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr(
UnaryExprOrTypeTrait ExprKind, Expr *E, QualType resultType,
SourceLocation op, SourceLocation rp)
- : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary,
- false, // Never type-dependent (C++ [temp.dep.expr]p3).
- // Value-dependent if the argument is type-dependent.
- E->isTypeDependent(), E->isInstantiationDependent(),
- E->containsUnexpandedParameterPack()),
+ : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary),
OpLoc(op), RParenLoc(rp) {
+ assert(ExprKind <= UETT_Last && "invalid enum value!");
UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
+ assert(static_cast<unsigned>(ExprKind) == UnaryExprOrTypeTraitExprBits.Kind &&
+ "UnaryExprOrTypeTraitExprBits.Kind overflow!");
UnaryExprOrTypeTraitExprBits.IsType = false;
Argument.Ex = E;
-
- // Check to see if we are in the situation where alignof(decl) should be
- // dependent because decl's alignment is dependent.
- if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
- if (!isValueDependent() || !isInstantiationDependent()) {
- E = E->IgnoreParens();
-
- const ValueDecl *D = nullptr;
- if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
- D = DRE->getDecl();
- else if (const auto *ME = dyn_cast<MemberExpr>(E))
- D = ME->getMemberDecl();
-
- if (D) {
- for (const auto *I : D->specific_attrs<AlignedAttr>()) {
- if (I->isAlignmentDependent()) {
- setValueDependent(true);
- setInstantiationDependent(true);
- break;
- }
- }
- }
- }
- }
+ setDependence(computeDependence(this));
}
MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
@@ -1644,11 +1551,8 @@ MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
const DeclarationNameInfo &NameInfo, QualType T,
ExprValueKind VK, ExprObjectKind OK,
NonOdrUseReason NOUR)
- : Expr(MemberExprClass, T, VK, OK, Base->isTypeDependent(),
- Base->isValueDependent(), Base->isInstantiationDependent(),
- Base->containsUnexpandedParameterPack()),
- Base(Base), MemberDecl(MemberDecl), MemberDNLoc(NameInfo.getInfo()),
- MemberLoc(NameInfo.getLoc()) {
+ : Expr(MemberExprClass, T, VK, OK), Base(Base), MemberDecl(MemberDecl),
+ MemberDNLoc(NameInfo.getInfo()), MemberLoc(NameInfo.getLoc()) {
assert(!NameInfo.getName() ||
MemberDecl->getDeclName() == NameInfo.getName());
MemberExprBits.IsArrow = IsArrow;
@@ -1657,6 +1561,7 @@ MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc,
MemberExprBits.HadMultipleCandidates = false;
MemberExprBits.NonOdrUseReason = NOUR;
MemberExprBits.OperatorLoc = OperatorLoc;
+ setDependence(computeDependence(this));
}
MemberExpr *MemberExpr::Create(
@@ -1678,25 +1583,15 @@ MemberExpr *MemberExpr::Create(
MemberExpr *E = new (Mem) MemberExpr(Base, IsArrow, OperatorLoc, MemberDecl,
NameInfo, T, VK, OK, NOUR);
- if (isa<FieldDecl>(MemberDecl)) {
- DeclContext *DC = MemberDecl->getDeclContext();
- // dyn_cast_or_null is used to handle objC variables which do not
- // have a declaration context.
- CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(DC);
- if (RD && RD->isDependentContext() && RD->isCurrentInstantiation(DC))
- E->setTypeDependent(T->isDependentType());
- }
-
+ // FIXME: remove remaining dependence computation to computeDependence().
+ auto Deps = E->getDependence();
if (HasQualOrFound) {
// FIXME: Wrong. We should be looking at the member declaration we found.
- if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) {
- E->setValueDependent(true);
- E->setTypeDependent(true);
- E->setInstantiationDependent(true);
- }
+ if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent())
+ Deps |= ExprDependence::TypeValueInstantiation;
else if (QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
- E->setInstantiationDependent(true);
+ Deps |= ExprDependence::Instantiation;
E->MemberExprBits.HasQualifierOrFoundDecl = true;
@@ -1710,19 +1605,17 @@ MemberExpr *MemberExpr::Create(
TemplateArgs || TemplateKWLoc.isValid();
if (TemplateArgs) {
- bool Dependent = false;
- bool InstantiationDependent = false;
- bool ContainsUnexpandedParameterPack = false;
+ auto TemplateArgDeps = TemplateArgumentDependence::None;
E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc, *TemplateArgs,
- E->getTrailingObjects<TemplateArgumentLoc>(), Dependent,
- InstantiationDependent, ContainsUnexpandedParameterPack);
- if (InstantiationDependent)
- E->setInstantiationDependent(true);
+ E->getTrailingObjects<TemplateArgumentLoc>(), TemplateArgDeps);
+ if (TemplateArgDeps & TemplateArgumentDependence::Instantiation)
+ Deps |= ExprDependence::Instantiation;
} else if (TemplateKWLoc.isValid()) {
E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
}
+ E->setDependence(Deps);
return E;
}
@@ -1823,12 +1716,13 @@ bool CastExpr::CastConsistency() const {
auto Ty = getType();
auto SETy = getSubExpr()->getType();
assert(getValueKindForType(Ty) == Expr::getValueKindForType(SETy));
- if (isRValue()) {
+ if (isRValue() && !Ty->isDependentType() && !SETy->isDependentType()) {
Ty = Ty->getPointeeType();
SETy = SETy->getPointeeType();
}
- assert(!Ty.isNull() && !SETy.isNull() &&
- Ty.getAddressSpace() != SETy.getAddressSpace());
+ assert((Ty->isDependentType() || SETy->isDependentType()) ||
+ (!Ty.isNull() && !SETy.isNull() &&
+ Ty.getAddressSpace() != SETy.getAddressSpace()));
goto CheckNoBasePath;
}
// These should not have an inheritance path.
@@ -2163,9 +2057,10 @@ SourceLocExpr::SourceLocExpr(const ASTContext &Ctx, IdentKind Kind,
SourceLocation BLoc, SourceLocation RParenLoc,
DeclContext *ParentContext)
: Expr(SourceLocExprClass, getDecayedSourceLocExprType(Ctx, Kind),
- VK_RValue, OK_Ordinary, false, false, false, false),
+ VK_RValue, OK_Ordinary),
BuiltinLoc(BLoc), RParenLoc(RParenLoc), ParentContext(ParentContext) {
SourceLocExprBits.Kind = Kind;
+ setDependence(ExprDependence::None);
}
StringRef SourceLocExpr::getBuiltinStr() const {
@@ -2229,25 +2124,14 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
}
InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc,
- ArrayRef<Expr*> initExprs, SourceLocation rbraceloc)
- : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
- false, false),
- InitExprs(C, initExprs.size()),
- LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), AltForm(nullptr, true)
-{
+ ArrayRef<Expr *> initExprs, SourceLocation rbraceloc)
+ : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary),
+ InitExprs(C, initExprs.size()), LBraceLoc(lbraceloc),
+ RBraceLoc(rbraceloc), AltForm(nullptr, true) {
sawArrayRangeDesignator(false);
- for (unsigned I = 0; I != initExprs.size(); ++I) {
- if (initExprs[I]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (initExprs[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (initExprs[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (initExprs[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
- }
-
InitExprs.insert(C, InitExprs.end(), initExprs.begin(), initExprs.end());
+
+ setDependence(computeDependence(this));
}
void InitListExpr::reserveInits(const ASTContext &C, unsigned NumInits) {
@@ -2388,6 +2272,64 @@ Stmt *BlockExpr::getBody() {
// Generic Expression Routines
//===----------------------------------------------------------------------===//
+bool Expr::isReadIfDiscardedInCPlusPlus11() const {
+ // In C++11, discarded-value expressions of a certain form are special,
+ // according to [expr]p10:
+ // The lvalue-to-rvalue conversion (4.1) is applied only if the
+ // expression is an lvalue of volatile-qualified type and it has
+ // one of the following forms:
+ if (!isGLValue() || !getType().isVolatileQualified())
+ return false;
+
+ const Expr *E = IgnoreParens();
+
+ // - id-expression (5.1.1),
+ if (isa<DeclRefExpr>(E))
+ return true;
+
+ // - subscripting (5.2.1),
+ if (isa<ArraySubscriptExpr>(E))
+ return true;
+
+ // - class member access (5.2.5),
+ if (isa<MemberExpr>(E))
+ return true;
+
+ // - indirection (5.3.1),
+ if (auto *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_Deref)
+ return true;
+
+ if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ // - pointer-to-member operation (5.5),
+ if (BO->isPtrMemOp())
+ return true;
+
+ // - comma expression (5.18) where the right operand is one of the above.
+ if (BO->getOpcode() == BO_Comma)
+ return BO->getRHS()->isReadIfDiscardedInCPlusPlus11();
+ }
+
+ // - conditional expression (5.16) where both the second and the third
+ // operands are one of the above, or
+ if (auto *CO = dyn_cast<ConditionalOperator>(E))
+ return CO->getTrueExpr()->isReadIfDiscardedInCPlusPlus11() &&
+ CO->getFalseExpr()->isReadIfDiscardedInCPlusPlus11();
+ // The related edge case of "*x ?: *x".
+ if (auto *BCO =
+ dyn_cast<BinaryConditionalOperator>(E)) {
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(BCO->getTrueExpr()))
+ return OVE->getSourceExpr()->isReadIfDiscardedInCPlusPlus11() &&
+ BCO->getFalseExpr()->isReadIfDiscardedInCPlusPlus11();
+ }
+
+ // Objective-C++ extensions to the rule.
+ if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E))
+ return true;
+
+ return false;
+}
+
/// isUnusedResultAWarning - Return true if this immediate expression should
/// be warned about if the result is unused. If so, fill in Loc and Ranges
/// with location to warn on and the source range[s] to report with the
@@ -2576,6 +2518,7 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
// If we don't know precisely what we're looking at, let's not warn.
case UnresolvedLookupExprClass:
case CXXUnresolvedConstructExprClass:
+ case RecoveryExprClass:
return false;
case CXXTemporaryObjectExprClass:
@@ -2675,20 +2618,31 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
}
case CXXFunctionalCastExprClass:
case CStyleCastExprClass: {
- // Ignore an explicit cast to void unless the operand is a non-trivial
- // volatile lvalue.
+ // Ignore an explicit cast to void, except in C++98 if the operand is a
+ // volatile glvalue for which we would trigger an implicit read in any
+ // other language mode. (Such an implicit read always happens as part of
+ // the lvalue conversion in C, and happens in C++ for expressions of all
+ // forms where it seems likely the user intended to trigger a volatile
+ // load.)
const CastExpr *CE = cast<CastExpr>(this);
+ const Expr *SubE = CE->getSubExpr()->IgnoreParens();
if (CE->getCastKind() == CK_ToVoid) {
- if (CE->getSubExpr()->isGLValue() &&
- CE->getSubExpr()->getType().isVolatileQualified()) {
- const DeclRefExpr *DRE =
- dyn_cast<DeclRefExpr>(CE->getSubExpr()->IgnoreParens());
- if (!(DRE && isa<VarDecl>(DRE->getDecl()) &&
- cast<VarDecl>(DRE->getDecl())->hasLocalStorage()) &&
- !isa<CallExpr>(CE->getSubExpr()->IgnoreParens())) {
- return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc,
- R1, R2, Ctx);
- }
+ if (Ctx.getLangOpts().CPlusPlus && !Ctx.getLangOpts().CPlusPlus11 &&
+ SubE->isReadIfDiscardedInCPlusPlus11()) {
+ // Suppress the "unused value" warning for idiomatic usage of
+ // '(void)var;' used to suppress "unused variable" warnings.
+ if (auto *DRE = dyn_cast<DeclRefExpr>(SubE))
+ if (auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (!VD->isExternallyVisible())
+ return false;
+
+ // The lvalue-to-rvalue conversion would have no effect for an array.
+ // It's implausible that the programmer expected this to result in a
+ // volatile array load, so don't warn.
+ if (SubE->getType()->isArrayType())
+ return false;
+
+ return SubE->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
return false;
}
@@ -2900,6 +2854,12 @@ static Expr *IgnoreImplicitAsWrittenSingleStep(Expr *E) {
return IgnoreImplicitSingleStep(E);
}
+static Expr *IgnoreParensOnlySingleStep(Expr *E) {
+ if (auto *PE = dyn_cast<ParenExpr>(E))
+ return PE->getSubExpr();
+ return E;
+}
+
static Expr *IgnoreParensSingleStep(Expr *E) {
if (auto *PE = dyn_cast<ParenExpr>(E))
return PE->getSubExpr();
@@ -2919,9 +2879,6 @@ static Expr *IgnoreParensSingleStep(Expr *E) {
return CE->getChosenSubExpr();
}
- else if (auto *CE = dyn_cast<ConstantExpr>(E))
- return CE->getSubExpr();
-
return E;
}
@@ -3026,12 +2983,16 @@ Expr *Expr::IgnoreUnlessSpelledInSource() {
Expr *LastE = nullptr;
while (E != LastE) {
LastE = E;
- E = E->IgnoreParenImpCasts();
+ E = IgnoreExprNodes(E, IgnoreImplicitSingleStep,
+ IgnoreImpCastsExtraSingleStep,
+ IgnoreParensOnlySingleStep);
auto SR = E->getSourceRange();
if (auto *C = dyn_cast<CXXConstructExpr>(E)) {
- if (C->getNumArgs() == 1) {
+ auto NumArgs = C->getNumArgs();
+ if (NumArgs == 1 ||
+ (NumArgs > 1 && isa<CXXDefaultArgExpr>(C->getArg(1)))) {
Expr *A = C->getArg(0);
if (A->getSourceRange() == SR || !isa<CXXTemporaryObjectExpr>(C))
E = A;
@@ -3039,7 +3000,18 @@ Expr *Expr::IgnoreUnlessSpelledInSource() {
}
if (auto *C = dyn_cast<CXXMemberCallExpr>(E)) {
- Expr *ExprNode = C->getImplicitObjectArgument()->IgnoreParenImpCasts();
+ Expr *ExprNode = C->getImplicitObjectArgument();
+ if (ExprNode->getSourceRange() == SR) {
+ E = ExprNode;
+ continue;
+ }
+ if (auto *PE = dyn_cast<ParenExpr>(ExprNode)) {
+ if (PE->getSourceRange() == C->getSourceRange()) {
+ E = PE;
+ continue;
+ }
+ }
+ ExprNode = ExprNode->IgnoreParenImpCasts();
if (ExprNode->getSourceRange() == SR)
E = ExprNode;
}
@@ -3206,6 +3178,9 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
switch (getStmtClass()) {
default: break;
+ case Stmt::ExprWithCleanupsClass:
+ return cast<ExprWithCleanups>(this)->getSubExpr()->isConstantInitializer(
+ Ctx, IsForRef, Culprit);
case StringLiteralClass:
case ObjCEncodeExprClass:
return true;
@@ -3319,6 +3294,7 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
case ObjCBridgedCastExprClass:
case CXXDynamicCastExprClass:
case CXXReinterpretCastExprClass:
+ case CXXAddrspaceCastExprClass:
case CXXConstCastExprClass: {
const CastExpr *CE = cast<CastExpr>(this);
@@ -3385,6 +3361,26 @@ namespace {
bool hasSideEffects() const { return HasSideEffects; }
+ void VisitDecl(const Decl *D) {
+ if (!D)
+ return;
+
+ // We assume the caller checks subexpressions (eg, the initializer, VLA
+ // bounds) for side-effects on our behalf.
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
+ // Registering a destructor is a side-effect.
+ if (IncludePossibleEffects && VD->isThisDeclarationADefinition() &&
+ VD->needsDestruction(Context))
+ HasSideEffects = true;
+ }
+ }
+
+ void VisitDeclStmt(const DeclStmt *DS) {
+ for (auto *D : DS->decls())
+ VisitDecl(D);
+ Inherited::VisitDeclStmt(DS);
+ }
+
void VisitExpr(const Expr *E) {
if (!HasSideEffects &&
E->HasSideEffects(Context, IncludePossibleEffects))
@@ -3421,6 +3417,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case SubstNonTypeTemplateParmPackExprClass:
case FunctionParmPackExprClass:
case TypoExprClass:
+ case RecoveryExprClass:
case CXXFoldExprClass:
llvm_unreachable("shouldn't see dependent / unresolved nodes here");
@@ -3457,6 +3454,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case OpaqueValueExprClass:
case SourceLocExprClass:
case ConceptSpecializationExprClass:
+ case RequiresExprClass:
// These never have a side-effect.
return false;
@@ -3515,7 +3513,10 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case ParenExprClass:
case ArraySubscriptExprClass:
+ case MatrixSubscriptExprClass:
case OMPArraySectionExprClass:
+ case OMPArrayShapingExprClass:
+ case OMPIteratorExprClass:
case MemberExprClass:
case ConditionalOperatorClass:
case BinaryConditionalOperatorClass:
@@ -3586,6 +3587,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case CXXStaticCastExprClass:
case CXXReinterpretCastExprClass:
case CXXConstCastExprClass:
+ case CXXAddrspaceCastExprClass:
case CXXFunctionalCastExprClass:
case BuiltinBitCastExprClass: {
// While volatile reads are side-effecting in both C and C++, we treat them
@@ -3627,7 +3629,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case LambdaExprClass: {
const LambdaExpr *LE = cast<LambdaExpr>(this);
for (Expr *E : LE->capture_inits())
- if (E->HasSideEffects(Ctx, IncludePossibleEffects))
+ if (E && E->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
return false;
}
@@ -3810,6 +3812,11 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
return Source->isNullPointerConstant(Ctx, NPC);
}
+ // If the expression has no type information, it cannot be a null pointer
+ // constant.
+ if (getType().isNull())
+ return NPCK_NotNull;
+
// C++11 nullptr_t is always a null pointer constant.
if (getType()->isNullPtrType())
return NPCK_CXX11_nullptr;
@@ -4148,28 +4155,16 @@ void ExtVectorElementExpr::getEncodedElementAccess(
}
}
-ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr*> args,
+ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr *> args,
QualType Type, SourceLocation BLoc,
SourceLocation RP)
- : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary,
- Type->isDependentType(), Type->isDependentType(),
- Type->isInstantiationDependentType(),
- Type->containsUnexpandedParameterPack()),
- BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(args.size())
-{
+ : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary),
+ BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(args.size()) {
SubExprs = new (C) Stmt*[args.size()];
- for (unsigned i = 0; i != args.size(); i++) {
- if (args[i]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (args[i]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (args[i]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (args[i]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned i = 0; i != args.size(); i++)
SubExprs[i] = args[i];
- }
+
+ setDependence(computeDependence(this));
}
void ShuffleVectorExpr::setExprs(const ASTContext &C, ArrayRef<Expr *> Exprs) {
@@ -4187,11 +4182,7 @@ GenericSelectionExpr::GenericSelectionExpr(
bool ContainsUnexpandedParameterPack, unsigned ResultIndex)
: Expr(GenericSelectionExprClass, AssocExprs[ResultIndex]->getType(),
AssocExprs[ResultIndex]->getValueKind(),
- AssocExprs[ResultIndex]->getObjectKind(),
- AssocExprs[ResultIndex]->isTypeDependent(),
- AssocExprs[ResultIndex]->isValueDependent(),
- AssocExprs[ResultIndex]->isInstantiationDependent(),
- ContainsUnexpandedParameterPack),
+ AssocExprs[ResultIndex]->getObjectKind()),
NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
assert(AssocTypes.size() == AssocExprs.size() &&
@@ -4205,6 +4196,8 @@ GenericSelectionExpr::GenericSelectionExpr(
getTrailingObjects<Stmt *>() + AssocExprStartIndex);
std::copy(AssocTypes.begin(), AssocTypes.end(),
getTrailingObjects<TypeSourceInfo *>());
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
}
GenericSelectionExpr::GenericSelectionExpr(
@@ -4213,10 +4206,7 @@ GenericSelectionExpr::GenericSelectionExpr(
SourceLocation DefaultLoc, SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack)
: Expr(GenericSelectionExprClass, Context.DependentTy, VK_RValue,
- OK_Ordinary,
- /*isTypeDependent=*/true,
- /*isValueDependent=*/true,
- /*isInstantiationDependent=*/true, ContainsUnexpandedParameterPack),
+ OK_Ordinary),
NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex),
DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
assert(AssocTypes.size() == AssocExprs.size() &&
@@ -4229,6 +4219,8 @@ GenericSelectionExpr::GenericSelectionExpr(
getTrailingObjects<Stmt *>() + AssocExprStartIndex);
std::copy(AssocTypes.begin(), AssocTypes.end(),
getTrailingObjects<TypeSourceInfo *>());
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
}
GenericSelectionExpr::GenericSelectionExpr(EmptyShell Empty, unsigned NumAssocs)
@@ -4287,15 +4279,11 @@ DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
llvm::ArrayRef<Designator> Designators,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
- ArrayRef<Expr*> IndexExprs,
- Expr *Init)
- : Expr(DesignatedInitExprClass, Ty,
- Init->getValueKind(), Init->getObjectKind(),
- Init->isTypeDependent(), Init->isValueDependent(),
- Init->isInstantiationDependent(),
- Init->containsUnexpandedParameterPack()),
- EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
- NumDesignators(Designators.size()), NumSubExprs(IndexExprs.size() + 1) {
+ ArrayRef<Expr *> IndexExprs, Expr *Init)
+ : Expr(DesignatedInitExprClass, Ty, Init->getValueKind(),
+ Init->getObjectKind()),
+ EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
+ NumDesignators(Designators.size()), NumSubExprs(IndexExprs.size() + 1) {
this->Designators = new (C) Designator[NumDesignators];
// Record the initializer itself.
@@ -4307,38 +4295,10 @@ DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
unsigned IndexIdx = 0;
for (unsigned I = 0; I != NumDesignators; ++I) {
this->Designators[I] = Designators[I];
-
if (this->Designators[I].isArrayDesignator()) {
- // Compute type- and value-dependence.
- Expr *Index = IndexExprs[IndexIdx];
- if (Index->isTypeDependent() || Index->isValueDependent())
- ExprBits.TypeDependent = ExprBits.ValueDependent = true;
- if (Index->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- // Propagate unexpanded parameter packs.
- if (Index->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
// Copy the index expressions into permanent storage.
*Child++ = IndexExprs[IndexIdx++];
} else if (this->Designators[I].isArrayRangeDesignator()) {
- // Compute type- and value-dependence.
- Expr *Start = IndexExprs[IndexIdx];
- Expr *End = IndexExprs[IndexIdx + 1];
- if (Start->isTypeDependent() || Start->isValueDependent() ||
- End->isTypeDependent() || End->isValueDependent()) {
- ExprBits.TypeDependent = ExprBits.ValueDependent = true;
- ExprBits.InstantiationDependent = true;
- } else if (Start->isInstantiationDependent() ||
- End->isInstantiationDependent()) {
- ExprBits.InstantiationDependent = true;
- }
-
- // Propagate unexpanded parameter packs.
- if (Start->containsUnexpandedParameterPack() ||
- End->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
// Copy the start/end expressions into permanent storage.
*Child++ = IndexExprs[IndexIdx++];
*Child++ = IndexExprs[IndexIdx++];
@@ -4346,6 +4306,7 @@ DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
}
assert(IndexIdx == IndexExprs.size() && "Wrong number of index expressions");
+ setDependence(computeDependence(this));
}
DesignatedInitExpr *
@@ -4449,14 +4410,19 @@ void DesignatedInitExpr::ExpandDesignator(const ASTContext &C, unsigned Idx,
}
DesignatedInitUpdateExpr::DesignatedInitUpdateExpr(const ASTContext &C,
- SourceLocation lBraceLoc, Expr *baseExpr, SourceLocation rBraceLoc)
- : Expr(DesignatedInitUpdateExprClass, baseExpr->getType(), VK_RValue,
- OK_Ordinary, false, false, false, false) {
+ SourceLocation lBraceLoc,
+ Expr *baseExpr,
+ SourceLocation rBraceLoc)
+ : Expr(DesignatedInitUpdateExprClass, baseExpr->getType(), VK_RValue,
+ OK_Ordinary) {
BaseAndUpdaterExprs[0] = baseExpr;
InitListExpr *ILE = new (C) InitListExpr(C, lBraceLoc, None, rBraceLoc);
ILE->setType(baseExpr->getType());
BaseAndUpdaterExprs[1] = ILE;
+
+ // FIXME: this is wrong, set it correctly.
+ setDependence(ExprDependence::None);
}
SourceLocation DesignatedInitUpdateExpr::getBeginLoc() const {
@@ -4469,23 +4435,13 @@ SourceLocation DesignatedInitUpdateExpr::getEndLoc() const {
ParenListExpr::ParenListExpr(SourceLocation LParenLoc, ArrayRef<Expr *> Exprs,
SourceLocation RParenLoc)
- : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
- false, false),
+ : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary),
LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
ParenListExprBits.NumExprs = Exprs.size();
- for (unsigned I = 0, N = Exprs.size(); I != N; ++I) {
- if (Exprs[I]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (Exprs[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Exprs[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Exprs[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0, N = Exprs.size(); I != N; ++I)
getTrailingObjects<Stmt *>()[I] = Exprs[I];
- }
+ setDependence(computeDependence(this));
}
ParenListExpr::ParenListExpr(EmptyShell Empty, unsigned NumExprs)
@@ -4509,6 +4465,115 @@ ParenListExpr *ParenListExpr::CreateEmpty(const ASTContext &Ctx,
return new (Mem) ParenListExpr(EmptyShell(), NumExprs);
}
+BinaryOperator::BinaryOperator(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
+ Opcode opc, QualType ResTy, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation opLoc,
+ FPOptionsOverride FPFeatures)
+ : Expr(BinaryOperatorClass, ResTy, VK, OK) {
+ BinaryOperatorBits.Opc = opc;
+ assert(!isCompoundAssignmentOp() &&
+ "Use CompoundAssignOperator for compound assignments");
+ BinaryOperatorBits.OpLoc = opLoc;
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ BinaryOperatorBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ if (BinaryOperatorBits.HasFPFeatures)
+ *getTrailingFPFeatures() = FPFeatures;
+ setDependence(computeDependence(this));
+}
+
+BinaryOperator::BinaryOperator(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
+ Opcode opc, QualType ResTy, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation opLoc,
+ FPOptionsOverride FPFeatures, bool dead2)
+ : Expr(CompoundAssignOperatorClass, ResTy, VK, OK) {
+ BinaryOperatorBits.Opc = opc;
+ assert(isCompoundAssignmentOp() &&
+ "Use CompoundAssignOperator for compound assignments");
+ BinaryOperatorBits.OpLoc = opLoc;
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ BinaryOperatorBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ if (BinaryOperatorBits.HasFPFeatures)
+ *getTrailingFPFeatures() = FPFeatures;
+ setDependence(computeDependence(this));
+}
+
+BinaryOperator *BinaryOperator::CreateEmpty(const ASTContext &C,
+ bool HasFPFeatures) {
+ unsigned Extra = sizeOfTrailingObjects(HasFPFeatures);
+ void *Mem =
+ C.Allocate(sizeof(BinaryOperator) + Extra, alignof(BinaryOperator));
+ return new (Mem) BinaryOperator(EmptyShell());
+}
+
+BinaryOperator *BinaryOperator::Create(const ASTContext &C, Expr *lhs,
+ Expr *rhs, Opcode opc, QualType ResTy,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation opLoc,
+ FPOptionsOverride FPFeatures) {
+ bool HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ unsigned Extra = sizeOfTrailingObjects(HasFPFeatures);
+ void *Mem =
+ C.Allocate(sizeof(BinaryOperator) + Extra, alignof(BinaryOperator));
+ return new (Mem)
+ BinaryOperator(C, lhs, rhs, opc, ResTy, VK, OK, opLoc, FPFeatures);
+}
+
+CompoundAssignOperator *
+CompoundAssignOperator::CreateEmpty(const ASTContext &C, bool HasFPFeatures) {
+ unsigned Extra = sizeOfTrailingObjects(HasFPFeatures);
+ void *Mem = C.Allocate(sizeof(CompoundAssignOperator) + Extra,
+ alignof(CompoundAssignOperator));
+ return new (Mem) CompoundAssignOperator(C, EmptyShell(), HasFPFeatures);
+}
+
+CompoundAssignOperator *
+CompoundAssignOperator::Create(const ASTContext &C, Expr *lhs, Expr *rhs,
+ Opcode opc, QualType ResTy, ExprValueKind VK,
+ ExprObjectKind OK, SourceLocation opLoc,
+ FPOptionsOverride FPFeatures,
+ QualType CompLHSType, QualType CompResultType) {
+ bool HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ unsigned Extra = sizeOfTrailingObjects(HasFPFeatures);
+ void *Mem = C.Allocate(sizeof(CompoundAssignOperator) + Extra,
+ alignof(CompoundAssignOperator));
+ return new (Mem)
+ CompoundAssignOperator(C, lhs, rhs, opc, ResTy, VK, OK, opLoc, FPFeatures,
+ CompLHSType, CompResultType);
+}
+
+UnaryOperator *UnaryOperator::CreateEmpty(const ASTContext &C,
+ bool hasFPFeatures) {
+ void *Mem = C.Allocate(totalSizeToAlloc<FPOptionsOverride>(hasFPFeatures),
+ alignof(UnaryOperator));
+ return new (Mem) UnaryOperator(hasFPFeatures, EmptyShell());
+}
+
+UnaryOperator::UnaryOperator(const ASTContext &Ctx, Expr *input, Opcode opc,
+ QualType type, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation l, bool CanOverflow,
+ FPOptionsOverride FPFeatures)
+ : Expr(UnaryOperatorClass, type, VK, OK), Val(input) {
+ UnaryOperatorBits.Opc = opc;
+ UnaryOperatorBits.CanOverflow = CanOverflow;
+ UnaryOperatorBits.Loc = l;
+ UnaryOperatorBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ setDependence(computeDependence(this));
+}
+
+UnaryOperator *UnaryOperator::Create(const ASTContext &C, Expr *input,
+ Opcode opc, QualType type,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation l, bool CanOverflow,
+ FPOptionsOverride FPFeatures) {
+ bool HasFPFeatures = FPFeatures.requiresTrailingStorage();
+ unsigned Size = totalSizeToAlloc<FPOptionsOverride>(HasFPFeatures);
+ void *Mem = C.Allocate(Size, alignof(UnaryOperator));
+ return new (Mem)
+ UnaryOperator(C, input, opc, type, VK, OK, l, CanOverflow, FPFeatures);
+}
+
const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
e = ewc->getSubExpr();
@@ -4559,10 +4624,9 @@ PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &C, Expr *syntax,
}
PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
- Expr *syntax, ArrayRef<Expr*> semantics,
+ Expr *syntax, ArrayRef<Expr *> semantics,
unsigned resultIndex)
- : Expr(PseudoObjectExprClass, type, VK, OK_Ordinary,
- /*filled in at end of ctor*/ false, false, false, false) {
+ : Expr(PseudoObjectExprClass, type, VK, OK_Ordinary) {
PseudoObjectExprBits.NumSubExprs = semantics.size() + 1;
PseudoObjectExprBits.ResultIndex = resultIndex + 1;
@@ -4570,20 +4634,13 @@ PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
Expr *E = (i == 0 ? syntax : semantics[i-1]);
getSubExprsBuffer()[i] = E;
- if (E->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (E->isValueDependent())
- ExprBits.ValueDependent = true;
- if (E->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (E->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
if (isa<OpaqueValueExpr>(E))
assert(cast<OpaqueValueExpr>(E)->getSourceExpr() != nullptr &&
"opaque-value semantic expressions for pseudo-object "
"operations must have sources");
}
+
+ setDependence(computeDependence(this));
}
//===----------------------------------------------------------------------===//
@@ -4610,25 +4667,14 @@ Stmt::const_child_range UnaryExprOrTypeTraitExpr::children() const {
return const_child_range(&Argument.Ex, &Argument.Ex + 1);
}
-AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args,
- QualType t, AtomicOp op, SourceLocation RP)
- : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
- false, false, false, false),
- NumSubExprs(args.size()), BuiltinLoc(BLoc), RParenLoc(RP), Op(op)
-{
+AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr *> args, QualType t,
+ AtomicOp op, SourceLocation RP)
+ : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary),
+ NumSubExprs(args.size()), BuiltinLoc(BLoc), RParenLoc(RP), Op(op) {
assert(args.size() == getNumSubExprs(op) && "wrong number of subexpressions");
- for (unsigned i = 0; i != args.size(); i++) {
- if (args[i]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (args[i]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (args[i]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (args[i]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned i = 0; i != args.size(); i++)
SubExprs[i] = args[i];
- }
+ setDependence(computeDependence(this));
}
unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
@@ -4730,3 +4776,211 @@ QualType OMPArraySectionExpr::getBaseOriginalType(const Expr *Base) {
}
return OriginalTy;
}
+
+RecoveryExpr::RecoveryExpr(ASTContext &Ctx, QualType T, SourceLocation BeginLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> SubExprs)
+ : Expr(RecoveryExprClass, T.getNonReferenceType(),
+ T->isDependentType() ? VK_LValue : getValueKindForType(T),
+ OK_Ordinary),
+ BeginLoc(BeginLoc), EndLoc(EndLoc), NumExprs(SubExprs.size()) {
+ assert(!T.isNull());
+ assert(llvm::all_of(SubExprs, [](Expr* E) { return E != nullptr; }));
+
+ llvm::copy(SubExprs, getTrailingObjects<Expr *>());
+ setDependence(computeDependence(this));
+}
+
+RecoveryExpr *RecoveryExpr::Create(ASTContext &Ctx, QualType T,
+ SourceLocation BeginLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> SubExprs) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Expr *>(SubExprs.size()),
+ alignof(RecoveryExpr));
+ return new (Mem) RecoveryExpr(Ctx, T, BeginLoc, EndLoc, SubExprs);
+}
+
+RecoveryExpr *RecoveryExpr::CreateEmpty(ASTContext &Ctx, unsigned NumSubExprs) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Expr *>(NumSubExprs),
+ alignof(RecoveryExpr));
+ return new (Mem) RecoveryExpr(EmptyShell(), NumSubExprs);
+}
+
+void OMPArrayShapingExpr::setDimensions(ArrayRef<Expr *> Dims) {
+ assert(
+ NumDims == Dims.size() &&
+ "Preallocated number of dimensions is different from the provided one.");
+ llvm::copy(Dims, getTrailingObjects<Expr *>());
+}
+
+void OMPArrayShapingExpr::setBracketsRanges(ArrayRef<SourceRange> BR) {
+ assert(
+ NumDims == BR.size() &&
+ "Preallocated number of dimensions is different from the provided one.");
+ llvm::copy(BR, getTrailingObjects<SourceRange>());
+}
+
+OMPArrayShapingExpr::OMPArrayShapingExpr(QualType ExprTy, Expr *Op,
+ SourceLocation L, SourceLocation R,
+ ArrayRef<Expr *> Dims)
+ : Expr(OMPArrayShapingExprClass, ExprTy, VK_LValue, OK_Ordinary), LPLoc(L),
+ RPLoc(R), NumDims(Dims.size()) {
+ setBase(Op);
+ setDimensions(Dims);
+ setDependence(computeDependence(this));
+}
+
+OMPArrayShapingExpr *
+OMPArrayShapingExpr::Create(const ASTContext &Context, QualType T, Expr *Op,
+ SourceLocation L, SourceLocation R,
+ ArrayRef<Expr *> Dims,
+ ArrayRef<SourceRange> BracketRanges) {
+ assert(Dims.size() == BracketRanges.size() &&
+ "Different number of dimensions and brackets ranges.");
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Expr *, SourceRange>(Dims.size() + 1, Dims.size()),
+ alignof(OMPArrayShapingExpr));
+ auto *E = new (Mem) OMPArrayShapingExpr(T, Op, L, R, Dims);
+ E->setBracketsRanges(BracketRanges);
+ return E;
+}
+
+OMPArrayShapingExpr *OMPArrayShapingExpr::CreateEmpty(const ASTContext &Context,
+ unsigned NumDims) {
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Expr *, SourceRange>(NumDims + 1, NumDims),
+ alignof(OMPArrayShapingExpr));
+ return new (Mem) OMPArrayShapingExpr(EmptyShell(), NumDims);
+}
+
+void OMPIteratorExpr::setIteratorDeclaration(unsigned I, Decl *D) {
+ assert(I < NumIterators &&
+ "Idx is greater or equal the number of iterators definitions.");
+ getTrailingObjects<Decl *>()[I] = D;
+}
+
+void OMPIteratorExpr::setAssignmentLoc(unsigned I, SourceLocation Loc) {
+ assert(I < NumIterators &&
+ "Idx is greater or equal the number of iterators definitions.");
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::AssignLoc)] = Loc;
+}
+
+void OMPIteratorExpr::setIteratorRange(unsigned I, Expr *Begin,
+ SourceLocation ColonLoc, Expr *End,
+ SourceLocation SecondColonLoc,
+ Expr *Step) {
+ assert(I < NumIterators &&
+ "Idx is greater or equal the number of iterators definitions.");
+ getTrailingObjects<Expr *>()[I * static_cast<int>(RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::Begin)] =
+ Begin;
+ getTrailingObjects<Expr *>()[I * static_cast<int>(RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::End)] = End;
+ getTrailingObjects<Expr *>()[I * static_cast<int>(RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::Step)] = Step;
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::FirstColonLoc)] =
+ ColonLoc;
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::SecondColonLoc)] =
+ SecondColonLoc;
+}
+
+Decl *OMPIteratorExpr::getIteratorDecl(unsigned I) {
+ return getTrailingObjects<Decl *>()[I];
+}
+
+OMPIteratorExpr::IteratorRange OMPIteratorExpr::getIteratorRange(unsigned I) {
+ IteratorRange Res;
+ Res.Begin =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(
+ RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::Begin)];
+ Res.End =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(
+ RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::End)];
+ Res.Step =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(
+ RangeExprOffset::Total) +
+ static_cast<int>(RangeExprOffset::Step)];
+ return Res;
+}
+
+SourceLocation OMPIteratorExpr::getAssignLoc(unsigned I) const {
+ return getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::AssignLoc)];
+}
+
+SourceLocation OMPIteratorExpr::getColonLoc(unsigned I) const {
+ return getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::FirstColonLoc)];
+}
+
+SourceLocation OMPIteratorExpr::getSecondColonLoc(unsigned I) const {
+ return getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(RangeLocOffset::Total) +
+ static_cast<int>(RangeLocOffset::SecondColonLoc)];
+}
+
+void OMPIteratorExpr::setHelper(unsigned I, const OMPIteratorHelperData &D) {
+ getTrailingObjects<OMPIteratorHelperData>()[I] = D;
+}
+
+OMPIteratorHelperData &OMPIteratorExpr::getHelper(unsigned I) {
+ return getTrailingObjects<OMPIteratorHelperData>()[I];
+}
+
+const OMPIteratorHelperData &OMPIteratorExpr::getHelper(unsigned I) const {
+ return getTrailingObjects<OMPIteratorHelperData>()[I];
+}
+
+OMPIteratorExpr::OMPIteratorExpr(
+ QualType ExprTy, SourceLocation IteratorKwLoc, SourceLocation L,
+ SourceLocation R, ArrayRef<OMPIteratorExpr::IteratorDefinition> Data,
+ ArrayRef<OMPIteratorHelperData> Helpers)
+ : Expr(OMPIteratorExprClass, ExprTy, VK_LValue, OK_Ordinary),
+ IteratorKwLoc(IteratorKwLoc), LPLoc(L), RPLoc(R),
+ NumIterators(Data.size()) {
+ for (unsigned I = 0, E = Data.size(); I < E; ++I) {
+ const IteratorDefinition &D = Data[I];
+ setIteratorDeclaration(I, D.IteratorDecl);
+ setAssignmentLoc(I, D.AssignmentLoc);
+ setIteratorRange(I, D.Range.Begin, D.ColonLoc, D.Range.End,
+ D.SecondColonLoc, D.Range.Step);
+ setHelper(I, Helpers[I]);
+ }
+ setDependence(computeDependence(this));
+}
+
+OMPIteratorExpr *
+OMPIteratorExpr::Create(const ASTContext &Context, QualType T,
+ SourceLocation IteratorKwLoc, SourceLocation L,
+ SourceLocation R,
+ ArrayRef<OMPIteratorExpr::IteratorDefinition> Data,
+ ArrayRef<OMPIteratorHelperData> Helpers) {
+ assert(Data.size() == Helpers.size() &&
+ "Data and helpers must have the same size.");
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Decl *, Expr *, SourceLocation, OMPIteratorHelperData>(
+ Data.size(), Data.size() * static_cast<int>(RangeExprOffset::Total),
+ Data.size() * static_cast<int>(RangeLocOffset::Total),
+ Helpers.size()),
+ alignof(OMPIteratorExpr));
+ return new (Mem) OMPIteratorExpr(T, IteratorKwLoc, L, R, Data, Helpers);
+}
+
+OMPIteratorExpr *OMPIteratorExpr::CreateEmpty(const ASTContext &Context,
+ unsigned NumIterators) {
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Decl *, Expr *, SourceLocation, OMPIteratorHelperData>(
+ NumIterators, NumIterators * static_cast<int>(RangeExprOffset::Total),
+ NumIterators * static_cast<int>(RangeLocOffset::Total), NumIterators),
+ alignof(OMPIteratorExpr));
+ return new (Mem) OMPIteratorExpr(EmptyShell(), NumIterators);
+}
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
index 422227d787b1..5d99f61c579f 100644
--- a/clang/lib/AST/ExprCXX.cpp
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -13,11 +13,14 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/ComputeDependence.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclAccessPair.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
@@ -172,9 +175,7 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
Expr *Initializer, QualType Ty,
TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
SourceRange DirectInitRange)
- : Expr(CXXNewExprClass, Ty, VK_RValue, OK_Ordinary, Ty->isDependentType(),
- Ty->isDependentType(), Ty->isInstantiationDependentType(),
- Ty->containsUnexpandedParameterPack()),
+ : Expr(CXXNewExprClass, Ty, VK_RValue, OK_Ordinary),
OperatorNew(OperatorNew), OperatorDelete(OperatorDelete),
AllocatedTypeInfo(AllocatedTypeInfo), Range(Range),
DirectInitRange(DirectInitRange) {
@@ -192,42 +193,13 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
CXXNewExprBits.IsParenTypeId = IsParenTypeId;
CXXNewExprBits.NumPlacementArgs = PlacementArgs.size();
- if (ArraySize) {
- if (Expr *SizeExpr = *ArraySize) {
- if (SizeExpr->isValueDependent())
- ExprBits.ValueDependent = true;
- if (SizeExpr->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (SizeExpr->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
- }
-
+ if (ArraySize)
getTrailingObjects<Stmt *>()[arraySizeOffset()] = *ArraySize;
- }
-
- if (Initializer) {
- if (Initializer->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Initializer->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Initializer->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ if (Initializer)
getTrailingObjects<Stmt *>()[initExprOffset()] = Initializer;
- }
-
- for (unsigned I = 0; I != PlacementArgs.size(); ++I) {
- if (PlacementArgs[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (PlacementArgs[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (PlacementArgs[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0; I != PlacementArgs.size(); ++I)
getTrailingObjects<Stmt *>()[placementNewArgsOffset() + I] =
PlacementArgs[I];
- }
-
if (IsParenTypeId)
getTrailingObjects<SourceRange>()[0] = TypeIdParens;
@@ -243,6 +215,8 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
this->Range.setEnd(TypeIdParens.getEnd());
break;
}
+
+ setDependence(computeDependence(this));
}
CXXNewExpr::CXXNewExpr(EmptyShell Empty, bool IsArray,
@@ -330,40 +304,19 @@ PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
Location = Info->getTypeLoc().getLocalSourceRange().getBegin();
}
-CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(const ASTContext &Context,
- Expr *Base, bool isArrow, SourceLocation OperatorLoc,
- NestedNameSpecifierLoc QualifierLoc, TypeSourceInfo *ScopeType,
- SourceLocation ColonColonLoc, SourceLocation TildeLoc,
- PseudoDestructorTypeStorage DestroyedType)
- : Expr(CXXPseudoDestructorExprClass,
- Context.BoundMemberTy,
- VK_RValue, OK_Ordinary,
- /*isTypeDependent=*/(Base->isTypeDependent() ||
- (DestroyedType.getTypeSourceInfo() &&
- DestroyedType.getTypeSourceInfo()->getType()->isDependentType())),
- /*isValueDependent=*/Base->isValueDependent(),
- (Base->isInstantiationDependent() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) ||
- (ScopeType &&
- ScopeType->getType()->isInstantiationDependentType()) ||
- (DestroyedType.getTypeSourceInfo() &&
- DestroyedType.getTypeSourceInfo()->getType()
- ->isInstantiationDependentType())),
- // ContainsUnexpandedParameterPack
- (Base->containsUnexpandedParameterPack() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()) ||
- (ScopeType &&
- ScopeType->getType()->containsUnexpandedParameterPack()) ||
- (DestroyedType.getTypeSourceInfo() &&
- DestroyedType.getTypeSourceInfo()->getType()
- ->containsUnexpandedParameterPack()))),
- Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
- OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
- ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
- DestroyedType(DestroyedType) {}
+CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(
+ const ASTContext &Context, Expr *Base, bool isArrow,
+ SourceLocation OperatorLoc, NestedNameSpecifierLoc QualifierLoc,
+ TypeSourceInfo *ScopeType, SourceLocation ColonColonLoc,
+ SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType)
+ : Expr(CXXPseudoDestructorExprClass, Context.BoundMemberTy, VK_RValue,
+ OK_Ordinary),
+ Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
+ DestroyedType(DestroyedType) {
+ setDependence(computeDependence(this));
+}
QualType CXXPseudoDestructorExpr::getDestroyedType() const {
if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
@@ -453,62 +406,31 @@ OverloadExpr::OverloadExpr(StmtClass SC, const ASTContext &Context,
UnresolvedSetIterator End, bool KnownDependent,
bool KnownInstantiationDependent,
bool KnownContainsUnexpandedParameterPack)
- : Expr(
- SC, Context.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
- KnownDependent,
- (KnownInstantiationDependent || NameInfo.isInstantiationDependent() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
- (KnownContainsUnexpandedParameterPack ||
- NameInfo.containsUnexpandedParameterPack() ||
- (QualifierLoc && QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()))),
- NameInfo(NameInfo), QualifierLoc(QualifierLoc) {
+ : Expr(SC, Context.OverloadTy, VK_LValue, OK_Ordinary), NameInfo(NameInfo),
+ QualifierLoc(QualifierLoc) {
unsigned NumResults = End - Begin;
OverloadExprBits.NumResults = NumResults;
OverloadExprBits.HasTemplateKWAndArgsInfo =
(TemplateArgs != nullptr ) || TemplateKWLoc.isValid();
if (NumResults) {
- // Determine whether this expression is type-dependent.
- for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I) {
- if ((*I)->getDeclContext()->isDependentContext() ||
- isa<UnresolvedUsingValueDecl>(*I)) {
- ExprBits.TypeDependent = true;
- ExprBits.ValueDependent = true;
- ExprBits.InstantiationDependent = true;
- }
- }
-
// Copy the results to the trailing array past UnresolvedLookupExpr
// or UnresolvedMemberExpr.
DeclAccessPair *Results = getTrailingResults();
memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
}
- // If we have explicit template arguments, check for dependent
- // template arguments and whether they contain any unexpanded pack
- // expansions.
if (TemplateArgs) {
- bool Dependent = false;
- bool InstantiationDependent = false;
- bool ContainsUnexpandedParameterPack = false;
+ auto Deps = TemplateArgumentDependence::None;
getTrailingASTTemplateKWAndArgsInfo()->initializeFrom(
- TemplateKWLoc, *TemplateArgs, getTrailingTemplateArgumentLoc(),
- Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
-
- if (Dependent) {
- ExprBits.TypeDependent = true;
- ExprBits.ValueDependent = true;
- }
- if (InstantiationDependent)
- ExprBits.InstantiationDependent = true;
- if (ContainsUnexpandedParameterPack)
- ExprBits.ContainsUnexpandedParameterPack = true;
+ TemplateKWLoc, *TemplateArgs, getTrailingTemplateArgumentLoc(), Deps);
} else if (TemplateKWLoc.isValid()) {
getTrailingASTTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
+ setDependence(computeDependence(this, KnownDependent,
+ KnownInstantiationDependent,
+ KnownContainsUnexpandedParameterPack));
if (isTypeDependent())
setType(Context.DependentTy);
}
@@ -525,31 +447,19 @@ DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(
QualType Ty, NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *Args)
- : Expr(
- DependentScopeDeclRefExprClass, Ty, VK_LValue, OK_Ordinary, true,
- true,
- (NameInfo.isInstantiationDependent() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
- (NameInfo.containsUnexpandedParameterPack() ||
- (QualifierLoc && QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()))),
+ : Expr(DependentScopeDeclRefExprClass, Ty, VK_LValue, OK_Ordinary),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {
DependentScopeDeclRefExprBits.HasTemplateKWAndArgsInfo =
(Args != nullptr) || TemplateKWLoc.isValid();
if (Args) {
- bool Dependent = true;
- bool InstantiationDependent = true;
- bool ContainsUnexpandedParameterPack
- = ExprBits.ContainsUnexpandedParameterPack;
+ auto Deps = TemplateArgumentDependence::None;
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
- TemplateKWLoc, *Args, getTrailingObjects<TemplateArgumentLoc>(),
- Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
- ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ TemplateKWLoc, *Args, getTrailingObjects<TemplateArgumentLoc>(), Deps);
} else if (TemplateKWLoc.isValid()) {
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
}
+ setDependence(computeDependence(this));
}
DependentScopeDeclRefExpr *DependentScopeDeclRefExpr::Create(
@@ -615,27 +525,27 @@ CXXOperatorCallExpr::CXXOperatorCallExpr(OverloadedOperatorKind OpKind,
Expr *Fn, ArrayRef<Expr *> Args,
QualType Ty, ExprValueKind VK,
SourceLocation OperatorLoc,
- FPOptions FPFeatures,
+ FPOptionsOverride FPFeatures,
ADLCallKind UsesADL)
: CallExpr(CXXOperatorCallExprClass, Fn, /*PreArgs=*/{}, Args, Ty, VK,
OperatorLoc, /*MinNumArgs=*/0, UsesADL) {
CXXOperatorCallExprBits.OperatorKind = OpKind;
- CXXOperatorCallExprBits.FPFeatures = FPFeatures.getInt();
assert(
(CXXOperatorCallExprBits.OperatorKind == static_cast<unsigned>(OpKind)) &&
"OperatorKind overflow!");
- assert((CXXOperatorCallExprBits.FPFeatures == FPFeatures.getInt()) &&
- "FPFeatures overflow!");
Range = getSourceRangeImpl();
+ Overrides = FPFeatures;
}
CXXOperatorCallExpr::CXXOperatorCallExpr(unsigned NumArgs, EmptyShell Empty)
: CallExpr(CXXOperatorCallExprClass, /*NumPreArgs=*/0, NumArgs, Empty) {}
-CXXOperatorCallExpr *CXXOperatorCallExpr::Create(
- const ASTContext &Ctx, OverloadedOperatorKind OpKind, Expr *Fn,
- ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
- SourceLocation OperatorLoc, FPOptions FPFeatures, ADLCallKind UsesADL) {
+CXXOperatorCallExpr *
+CXXOperatorCallExpr::Create(const ASTContext &Ctx,
+ OverloadedOperatorKind OpKind, Expr *Fn,
+ ArrayRef<Expr *> Args, QualType Ty,
+ ExprValueKind VK, SourceLocation OperatorLoc,
+ FPOptionsOverride FPFeatures, ADLCallKind UsesADL) {
// Allocate storage for the trailing objects of CallExpr.
unsigned NumArgs = Args.size();
unsigned SizeOfTrailingObjects =
@@ -667,7 +577,7 @@ SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const {
// Postfix operator
return SourceRange(getArg(0)->getBeginLoc(), getOperatorLoc());
} else if (Kind == OO_Arrow) {
- return getArg(0)->getSourceRange();
+ return SourceRange(getArg(0)->getBeginLoc(), getOperatorLoc());
} else if (Kind == OO_Call) {
return SourceRange(getArg(0)->getBeginLoc(), getRParenLoc());
} else if (Kind == OO_Subscript) {
@@ -766,6 +676,7 @@ const char *CXXNamedCastExpr::getCastName() const {
case CXXDynamicCastExprClass: return "dynamic_cast";
case CXXReinterpretCastExprClass: return "reinterpret_cast";
case CXXConstCastExprClass: return "const_cast";
+ case CXXAddrspaceCastExprClass: return "addrspace_cast";
default: return "<invalid cast>";
}
}
@@ -890,6 +801,19 @@ CXXConstCastExpr *CXXConstCastExpr::CreateEmpty(const ASTContext &C) {
return new (C) CXXConstCastExpr(EmptyShell());
}
+CXXAddrspaceCastExpr *
+CXXAddrspaceCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
+ CastKind K, Expr *Op, TypeSourceInfo *WrittenTy,
+ SourceLocation L, SourceLocation RParenLoc,
+ SourceRange AngleBrackets) {
+ return new (C) CXXAddrspaceCastExpr(T, VK, K, Op, WrittenTy, L, RParenLoc,
+ AngleBrackets);
+}
+
+CXXAddrspaceCastExpr *CXXAddrspaceCastExpr::CreateEmpty(const ASTContext &C) {
+ return new (C) CXXAddrspaceCastExpr(EmptyShell());
+}
+
CXXFunctionalCastExpr *
CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
TypeSourceInfo *Written, CastKind K, Expr *Op,
@@ -989,17 +913,19 @@ const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
}
-CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx, SourceLocation Loc,
- FieldDecl *Field, QualType Ty,
- DeclContext *UsedContext)
+CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx,
+ SourceLocation Loc, FieldDecl *Field,
+ QualType Ty, DeclContext *UsedContext)
: Expr(CXXDefaultInitExprClass, Ty.getNonLValueExprType(Ctx),
- Ty->isLValueReferenceType() ? VK_LValue : Ty->isRValueReferenceType()
- ? VK_XValue
- : VK_RValue,
- /*FIXME*/ OK_Ordinary, false, false, false, false),
+ Ty->isLValueReferenceType()
+ ? VK_LValue
+ : Ty->isRValueReferenceType() ? VK_XValue : VK_RValue,
+ /*FIXME*/ OK_Ordinary),
Field(Field), UsedContext(UsedContext) {
CXXDefaultInitExprBits.Loc = Loc;
assert(Field->hasInClassInitializer());
+
+ setDependence(ExprDependence::None);
}
CXXTemporary *CXXTemporary::Create(const ASTContext &C,
@@ -1097,11 +1023,8 @@ CXXConstructExpr::CXXConstructExpr(
bool ListInitialization, bool StdInitListInitialization,
bool ZeroInitialization, ConstructionKind ConstructKind,
SourceRange ParenOrBraceRange)
- : Expr(SC, Ty, VK_RValue, OK_Ordinary, Ty->isDependentType(),
- Ty->isDependentType(), Ty->isInstantiationDependentType(),
- Ty->containsUnexpandedParameterPack()),
- Constructor(Ctor), ParenOrBraceRange(ParenOrBraceRange),
- NumArgs(Args.size()) {
+ : Expr(SC, Ty, VK_RValue, OK_Ordinary), Constructor(Ctor),
+ ParenOrBraceRange(ParenOrBraceRange), NumArgs(Args.size()) {
CXXConstructExprBits.Elidable = Elidable;
CXXConstructExprBits.HadMultipleCandidates = HadMultipleCandidates;
CXXConstructExprBits.ListInitialization = ListInitialization;
@@ -1113,16 +1036,10 @@ CXXConstructExpr::CXXConstructExpr(
Stmt **TrailingArgs = getTrailingArgs();
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
assert(Args[I] && "NULL argument in CXXConstructExpr!");
-
- if (Args[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Args[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Args[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
TrailingArgs[I] = Args[I];
}
+
+ setDependence(computeDependence(this));
}
CXXConstructExpr::CXXConstructExpr(StmtClass SC, EmptyShell Empty,
@@ -1170,37 +1087,22 @@ LambdaCaptureKind LambdaCapture::getCaptureKind() const {
LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc,
- ArrayRef<LambdaCapture> Captures, bool ExplicitParams,
+ SourceLocation CaptureDefaultLoc, bool ExplicitParams,
bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
SourceLocation ClosingBrace,
bool ContainsUnexpandedParameterPack)
- : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary, T->isDependentType(),
- T->isDependentType(), T->isDependentType(),
- ContainsUnexpandedParameterPack),
+ : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary),
IntroducerRange(IntroducerRange), CaptureDefaultLoc(CaptureDefaultLoc),
- NumCaptures(Captures.size()), CaptureDefault(CaptureDefault),
- ExplicitParams(ExplicitParams), ExplicitResultType(ExplicitResultType),
ClosingBrace(ClosingBrace) {
- assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments");
+ LambdaExprBits.NumCaptures = CaptureInits.size();
+ LambdaExprBits.CaptureDefault = CaptureDefault;
+ LambdaExprBits.ExplicitParams = ExplicitParams;
+ LambdaExprBits.ExplicitResultType = ExplicitResultType;
+
CXXRecordDecl *Class = getLambdaClass();
- CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData();
-
- // FIXME: Propagate "has unexpanded parameter pack" bit.
-
- // Copy captures.
- const ASTContext &Context = Class->getASTContext();
- Data.NumCaptures = NumCaptures;
- Data.NumExplicitCaptures = 0;
- Data.Captures =
- (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) * NumCaptures);
- LambdaCapture *ToCapture = Data.Captures;
- for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
- if (Captures[I].isExplicit())
- ++Data.NumExplicitCaptures;
-
- *ToCapture++ = Captures[I];
- }
+ (void)Class;
+ assert(capture_size() == Class->capture_size() && "Wrong number of captures");
+ assert(getCaptureDefault() == Class->getLambdaCaptureDefault());
// Copy initialization expressions for the non-static data members.
Stmt **Stored = getStoredStmts();
@@ -1209,24 +1111,37 @@ LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
// Copy the body of the lambda.
*Stored++ = getCallOperator()->getBody();
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
}
-LambdaExpr *LambdaExpr::Create(
- const ASTContext &Context, CXXRecordDecl *Class,
- SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc, ArrayRef<LambdaCapture> Captures,
- bool ExplicitParams, bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
- SourceLocation ClosingBrace, bool ContainsUnexpandedParameterPack) {
+LambdaExpr::LambdaExpr(EmptyShell Empty, unsigned NumCaptures)
+ : Expr(LambdaExprClass, Empty) {
+ LambdaExprBits.NumCaptures = NumCaptures;
+
+ // Initially don't initialize the body of the LambdaExpr. The body will
+ // be lazily deserialized when needed.
+ getStoredStmts()[NumCaptures] = nullptr; // Not one past the end.
+}
+
+LambdaExpr *LambdaExpr::Create(const ASTContext &Context, CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ SourceLocation CaptureDefaultLoc,
+ bool ExplicitParams, bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ SourceLocation ClosingBrace,
+ bool ContainsUnexpandedParameterPack) {
// Determine the type of the expression (i.e., the type of the
// function object we're creating).
QualType T = Context.getTypeDeclType(Class);
- unsigned Size = totalSizeToAlloc<Stmt *>(Captures.size() + 1);
+ unsigned Size = totalSizeToAlloc<Stmt *>(CaptureInits.size() + 1);
void *Mem = Context.Allocate(Size);
return new (Mem)
LambdaExpr(T, IntroducerRange, CaptureDefault, CaptureDefaultLoc,
- Captures, ExplicitParams, ExplicitResultType, CaptureInits,
- ClosingBrace, ContainsUnexpandedParameterPack);
+ ExplicitParams, ExplicitResultType, CaptureInits, ClosingBrace,
+ ContainsUnexpandedParameterPack);
}
LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C,
@@ -1236,6 +1151,25 @@ LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C,
return new (Mem) LambdaExpr(EmptyShell(), NumCaptures);
}
+void LambdaExpr::initBodyIfNeeded() const {
+ if (!getStoredStmts()[capture_size()]) {
+ auto *This = const_cast<LambdaExpr *>(this);
+ This->getStoredStmts()[capture_size()] = getCallOperator()->getBody();
+ }
+}
+
+Stmt *LambdaExpr::getBody() const {
+ initBodyIfNeeded();
+ return getStoredStmts()[capture_size()];
+}
+
+const CompoundStmt *LambdaExpr::getCompoundStmtBody() const {
+ Stmt *Body = getBody();
+ if (const auto *CoroBody = dyn_cast<CoroutineBodyStmt>(Body))
+ return cast<CompoundStmt>(CoroBody->getBody());
+ return cast<CompoundStmt>(Body);
+}
+
bool LambdaExpr::isInitCapture(const LambdaCapture *C) const {
return (C->capturesVariable() && C->getCapturedVar()->isInitCapture() &&
(getCallOperator() == C->getCapturedVar()->getDeclContext()));
@@ -1246,7 +1180,7 @@ LambdaExpr::capture_iterator LambdaExpr::capture_begin() const {
}
LambdaExpr::capture_iterator LambdaExpr::capture_end() const {
- return capture_begin() + NumCaptures;
+ return capture_begin() + capture_size();
}
LambdaExpr::capture_range LambdaExpr::captures() const {
@@ -1303,19 +1237,17 @@ ArrayRef<NamedDecl *> LambdaExpr::getExplicitTemplateParameters() const {
return Record->getLambdaExplicitTemplateParameters();
}
-CompoundStmt *LambdaExpr::getBody() const {
- // FIXME: this mutation in getBody is bogus. It should be
- // initialized in ASTStmtReader::VisitLambdaExpr, but for reasons I
- // don't understand, that doesn't work.
- if (!getStoredStmts()[NumCaptures])
- *const_cast<Stmt **>(&getStoredStmts()[NumCaptures]) =
- getCallOperator()->getBody();
+bool LambdaExpr::isMutable() const { return !getCallOperator()->isConst(); }
- return static_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
+LambdaExpr::child_range LambdaExpr::children() {
+ initBodyIfNeeded();
+ return child_range(getStoredStmts(), getStoredStmts() + capture_size() + 1);
}
-bool LambdaExpr::isMutable() const {
- return !getCallOperator()->isConst();
+LambdaExpr::const_child_range LambdaExpr::children() const {
+ initBodyIfNeeded();
+ return const_child_range(getStoredStmts(),
+ getStoredStmts() + capture_size() + 1);
}
ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
@@ -1360,19 +1292,13 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *TSI,
? VK_LValue
: TSI->getType()->isRValueReferenceType() ? VK_XValue
: VK_RValue),
- OK_Ordinary,
- TSI->getType()->isDependentType() ||
- TSI->getType()->getContainedDeducedType(),
- true, true, TSI->getType()->containsUnexpandedParameterPack()),
+ OK_Ordinary),
TSI(TSI), LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
CXXUnresolvedConstructExprBits.NumArgs = Args.size();
auto **StoredArgs = getTrailingObjects<Expr *>();
- for (unsigned I = 0; I != Args.size(); ++I) {
- if (Args[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0; I != Args.size(); ++I)
StoredArgs[I] = Args[I];
- }
+ setDependence(computeDependence(this));
}
CXXUnresolvedConstructExpr *CXXUnresolvedConstructExpr::Create(
@@ -1400,11 +1326,7 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs)
: Expr(CXXDependentScopeMemberExprClass, Ctx.DependentTy, VK_LValue,
- OK_Ordinary, true, true, true,
- ((Base && Base->containsUnexpandedParameterPack()) ||
- (QualifierLoc && QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()) ||
- MemberNameInfo.containsUnexpandedParameterPack())),
+ OK_Ordinary),
Base(Base), BaseType(BaseType), QualifierLoc(QualifierLoc),
MemberNameInfo(MemberNameInfo) {
CXXDependentScopeMemberExprBits.IsArrow = IsArrow;
@@ -1415,14 +1337,10 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
CXXDependentScopeMemberExprBits.OperatorLoc = OperatorLoc;
if (TemplateArgs) {
- bool Dependent = true;
- bool InstantiationDependent = true;
- bool ContainsUnexpandedParameterPack = false;
+ auto Deps = TemplateArgumentDependence::None;
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(),
- Dependent, InstantiationDependent, ContainsUnexpandedParameterPack);
- if (ContainsUnexpandedParameterPack)
- ExprBits.ContainsUnexpandedParameterPack = true;
+ Deps);
} else if (TemplateKWLoc.isValid()) {
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
@@ -1430,6 +1348,7 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
if (hasFirstQualifierFoundInScope())
*getTrailingObjects<NamedDecl *>() = FirstQualifierFoundInScope;
+ setDependence(computeDependence(this));
}
CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
@@ -1611,16 +1530,15 @@ SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context,
return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs);
}
-SubstNonTypeTemplateParmPackExpr::
-SubstNonTypeTemplateParmPackExpr(QualType T,
- ExprValueKind ValueKind,
- NonTypeTemplateParmDecl *Param,
- SourceLocation NameLoc,
- const TemplateArgument &ArgPack)
- : Expr(SubstNonTypeTemplateParmPackExprClass, T, ValueKind, OK_Ordinary,
- true, true, true, true),
+SubstNonTypeTemplateParmPackExpr::SubstNonTypeTemplateParmPackExpr(
+ QualType T, ExprValueKind ValueKind, NonTypeTemplateParmDecl *Param,
+ SourceLocation NameLoc, const TemplateArgument &ArgPack)
+ : Expr(SubstNonTypeTemplateParmPackExprClass, T, ValueKind, OK_Ordinary),
Param(Param), Arguments(ArgPack.pack_begin()),
- NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) {}
+ NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) {
+ setDependence(ExprDependence::TypeValueInstantiation |
+ ExprDependence::UnexpandedPack);
+}
TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments));
@@ -1630,12 +1548,13 @@ FunctionParmPackExpr::FunctionParmPackExpr(QualType T, VarDecl *ParamPack,
SourceLocation NameLoc,
unsigned NumParams,
VarDecl *const *Params)
- : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary, true, true,
- true, true),
+ : Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary),
ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
if (Params)
std::uninitialized_copy(Params, Params + NumParams,
getTrailingObjects<VarDecl *>());
+ setDependence(ExprDependence::TypeValueInstantiation |
+ ExprDependence::UnexpandedPack);
}
FunctionParmPackExpr *
@@ -1657,16 +1576,14 @@ MaterializeTemporaryExpr::MaterializeTemporaryExpr(
QualType T, Expr *Temporary, bool BoundToLvalueReference,
LifetimeExtendedTemporaryDecl *MTD)
: Expr(MaterializeTemporaryExprClass, T,
- BoundToLvalueReference ? VK_LValue : VK_XValue, OK_Ordinary,
- Temporary->isTypeDependent(), Temporary->isValueDependent(),
- Temporary->isInstantiationDependent(),
- Temporary->containsUnexpandedParameterPack()) {
+ BoundToLvalueReference ? VK_LValue : VK_XValue, OK_Ordinary) {
if (MTD) {
State = MTD;
MTD->ExprWithTemporary = Temporary;
return;
}
State = Temporary;
+ setDependence(computeDependence(this));
}
void MaterializeTemporaryExpr::setExtendingDecl(ValueDecl *ExtendedBy,
@@ -1688,30 +1605,23 @@ void MaterializeTemporaryExpr::setExtendingDecl(ValueDecl *ExtendedBy,
TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
ArrayRef<TypeSourceInfo *> Args,
- SourceLocation RParenLoc,
- bool Value)
- : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary,
- /*TypeDependent=*/false,
- /*ValueDependent=*/false,
- /*InstantiationDependent=*/false,
- /*ContainsUnexpandedParameterPack=*/false),
- Loc(Loc), RParenLoc(RParenLoc) {
+ SourceLocation RParenLoc, bool Value)
+ : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary), Loc(Loc),
+ RParenLoc(RParenLoc) {
+ assert(Kind <= TT_Last && "invalid enum value!");
TypeTraitExprBits.Kind = Kind;
+ assert(static_cast<unsigned>(Kind) == TypeTraitExprBits.Kind &&
+ "TypeTraitExprBits.Kind overflow!");
TypeTraitExprBits.Value = Value;
TypeTraitExprBits.NumArgs = Args.size();
+ assert(Args.size() == TypeTraitExprBits.NumArgs &&
+ "TypeTraitExprBits.NumArgs overflow!");
auto **ToArgs = getTrailingObjects<TypeSourceInfo *>();
-
- for (unsigned I = 0, N = Args.size(); I != N; ++I) {
- if (Args[I]->getType()->isDependentType())
- setValueDependent(true);
- if (Args[I]->getType()->isInstantiationDependentType())
- setInstantiationDependent(true);
- if (Args[I]->getType()->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack(true);
-
+ for (unsigned I = 0, N = Args.size(); I != N; ++I)
ToArgs[I] = Args[I];
- }
+
+ setDependence(computeDependence(this));
}
TypeTraitExpr *TypeTraitExpr::Create(const ASTContext &C, QualType T,
@@ -1764,81 +1674,3 @@ CUDAKernelCallExpr *CUDAKernelCallExpr::CreateEmpty(const ASTContext &Ctx,
alignof(CUDAKernelCallExpr));
return new (Mem) CUDAKernelCallExpr(NumArgs, Empty);
}
-
-ConceptSpecializationExpr::ConceptSpecializationExpr(const ASTContext &C,
- NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
- DeclarationNameInfo ConceptNameInfo, NamedDecl *FoundDecl,
- ConceptDecl *NamedConcept, const ASTTemplateArgumentListInfo *ArgsAsWritten,
- ArrayRef<TemplateArgument> ConvertedArgs,
- const ConstraintSatisfaction *Satisfaction)
- : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_RValue, OK_Ordinary,
- /*TypeDependent=*/false,
- // All the flags below are set in setTemplateArguments.
- /*ValueDependent=*/!Satisfaction, /*InstantiationDependent=*/false,
- /*ContainsUnexpandedParameterPacks=*/false),
- ConceptReference(NNS, TemplateKWLoc, ConceptNameInfo, FoundDecl,
- NamedConcept, ArgsAsWritten),
- NumTemplateArgs(ConvertedArgs.size()),
- Satisfaction(Satisfaction ?
- ASTConstraintSatisfaction::Create(C, *Satisfaction) :
- nullptr) {
- setTemplateArguments(ConvertedArgs);
-}
-
-ConceptSpecializationExpr::ConceptSpecializationExpr(EmptyShell Empty,
- unsigned NumTemplateArgs)
- : Expr(ConceptSpecializationExprClass, Empty), ConceptReference(),
- NumTemplateArgs(NumTemplateArgs) { }
-
-void ConceptSpecializationExpr::setTemplateArguments(
- ArrayRef<TemplateArgument> Converted) {
- assert(Converted.size() == NumTemplateArgs);
- std::uninitialized_copy(Converted.begin(), Converted.end(),
- getTrailingObjects<TemplateArgument>());
- bool IsInstantiationDependent = false;
- bool ContainsUnexpandedParameterPack = false;
- for (const TemplateArgument& Arg : Converted) {
- if (Arg.isInstantiationDependent())
- IsInstantiationDependent = true;
- if (Arg.containsUnexpandedParameterPack())
- ContainsUnexpandedParameterPack = true;
- if (ContainsUnexpandedParameterPack && IsInstantiationDependent)
- break;
- }
-
- // Currently guaranteed by the fact concepts can only be at namespace-scope.
- assert(!NestedNameSpec ||
- (!NestedNameSpec.getNestedNameSpecifier()->isInstantiationDependent() &&
- !NestedNameSpec.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()));
- setInstantiationDependent(IsInstantiationDependent);
- setContainsUnexpandedParameterPack(ContainsUnexpandedParameterPack);
- assert((!isValueDependent() || isInstantiationDependent()) &&
- "should not be value-dependent");
-}
-
-ConceptSpecializationExpr *
-ConceptSpecializationExpr::Create(const ASTContext &C,
- NestedNameSpecifierLoc NNS,
- SourceLocation TemplateKWLoc,
- DeclarationNameInfo ConceptNameInfo,
- NamedDecl *FoundDecl,
- ConceptDecl *NamedConcept,
- const ASTTemplateArgumentListInfo *ArgsAsWritten,
- ArrayRef<TemplateArgument> ConvertedArgs,
- const ConstraintSatisfaction *Satisfaction) {
- void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>(
- ConvertedArgs.size()));
- return new (Buffer) ConceptSpecializationExpr(C, NNS, TemplateKWLoc,
- ConceptNameInfo, FoundDecl,
- NamedConcept, ArgsAsWritten,
- ConvertedArgs, Satisfaction);
-}
-
-ConceptSpecializationExpr *
-ConceptSpecializationExpr::Create(ASTContext &C, EmptyShell Empty,
- unsigned NumTemplateArgs) {
- void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>(
- NumTemplateArgs));
- return new (Buffer) ConceptSpecializationExpr(Empty, NumTemplateArgs);
-}
diff --git a/clang/lib/AST/ExprClassification.cpp b/clang/lib/AST/ExprClassification.cpp
index 9dbf6fe9e0f0..31aa734ffedb 100644
--- a/clang/lib/AST/ExprClassification.cpp
+++ b/clang/lib/AST/ExprClassification.cpp
@@ -124,6 +124,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::ObjCPropertyRefExprClass:
// C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
case Expr::CXXTypeidExprClass:
+ case Expr::CXXUuidofExprClass:
// Unresolved lookups and uncorrected typos get classified as lvalues.
// FIXME: Is this wise? Should they get their own kind?
case Expr::UnresolvedLookupExprClass:
@@ -139,6 +140,8 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::MSPropertyRefExprClass:
case Expr::MSPropertySubscriptExprClass:
case Expr::OMPArraySectionExprClass:
+ case Expr::OMPArrayShapingExprClass:
+ case Expr::OMPIteratorExprClass:
return Cl::CL_LValue;
// C99 6.5.2.5p5 says that compound literals are lvalues.
@@ -193,6 +196,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::DesignatedInitUpdateExprClass:
case Expr::SourceLocExprClass:
case Expr::ConceptSpecializationExprClass:
+ case Expr::RequiresExprClass:
return Cl::CL_PRValue;
case Expr::ConstantExprClass:
@@ -219,6 +223,10 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
}
return Cl::CL_LValue;
+ // Subscripting matrix types behaves like member accesses.
+ case Expr::MatrixSubscriptExprClass:
+ return ClassifyInternal(Ctx, cast<MatrixSubscriptExpr>(E)->getBase());
+
// C++ [expr.prim.general]p3: The result is an lvalue if the entity is a
// function or variable and a prvalue otherwise.
case Expr::DeclRefExprClass:
@@ -267,6 +275,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_PRValue;
}
+ case Expr::RecoveryExprClass:
case Expr::OpaqueValueExprClass:
return ClassifyExprValueKind(Lang, E, E->getValueKind());
@@ -347,6 +356,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::CXXAddrspaceCastExprClass:
case Expr::ObjCBridgedCastExprClass:
case Expr::BuiltinBitCastExprClass:
// Only in C++ can casts be interesting at all.
@@ -401,9 +411,6 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_PRValue;
}
- case Expr::CXXUuidofExprClass:
- return Cl::CL_LValue;
-
case Expr::PackExpansionExprClass:
return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern());
@@ -451,6 +458,7 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
isa<IndirectFieldDecl>(D) ||
isa<BindingDecl>(D) ||
+ isa<MSGuidDecl>(D) ||
(Ctx.getLangOpts().CPlusPlus &&
(isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) ||
isa<FunctionTemplateDecl>(D)));
diff --git a/clang/lib/AST/ExprConcepts.cpp b/clang/lib/AST/ExprConcepts.cpp
new file mode 100644
index 000000000000..d00d8329095c
--- /dev/null
+++ b/clang/lib/AST/ExprConcepts.cpp
@@ -0,0 +1,215 @@
+//===- ExprCXX.cpp - (C++) Expression AST Node Implementation -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Expr class declared in ExprCXX.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprConcepts.h"
+#include "clang/AST/ASTConcept.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ComputeDependence.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/Support/TrailingObjects.h"
+#include <algorithm>
+#include <string>
+#include <utility>
+
+using namespace clang;
+
+ConceptSpecializationExpr::ConceptSpecializationExpr(
+ const ASTContext &C, NestedNameSpecifierLoc NNS,
+ SourceLocation TemplateKWLoc, DeclarationNameInfo ConceptNameInfo,
+ NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
+ const ASTTemplateArgumentListInfo *ArgsAsWritten,
+ ArrayRef<TemplateArgument> ConvertedArgs,
+ const ConstraintSatisfaction *Satisfaction)
+ : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_RValue, OK_Ordinary),
+ ConceptReference(NNS, TemplateKWLoc, ConceptNameInfo, FoundDecl,
+ NamedConcept, ArgsAsWritten),
+ NumTemplateArgs(ConvertedArgs.size()),
+ Satisfaction(Satisfaction
+ ? ASTConstraintSatisfaction::Create(C, *Satisfaction)
+ : nullptr) {
+ setTemplateArguments(ConvertedArgs);
+ setDependence(computeDependence(this, /*ValueDependent=*/!Satisfaction));
+
+ // Currently guaranteed by the fact concepts can only be at namespace-scope.
+ assert(!NestedNameSpec ||
+ (!NestedNameSpec.getNestedNameSpecifier()->isInstantiationDependent() &&
+ !NestedNameSpec.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()));
+ assert((!isValueDependent() || isInstantiationDependent()) &&
+ "should not be value-dependent");
+}
+
+ConceptSpecializationExpr::ConceptSpecializationExpr(EmptyShell Empty,
+ unsigned NumTemplateArgs)
+ : Expr(ConceptSpecializationExprClass, Empty), ConceptReference(),
+ NumTemplateArgs(NumTemplateArgs) { }
+
+void ConceptSpecializationExpr::setTemplateArguments(
+ ArrayRef<TemplateArgument> Converted) {
+ assert(Converted.size() == NumTemplateArgs);
+ std::uninitialized_copy(Converted.begin(), Converted.end(),
+ getTrailingObjects<TemplateArgument>());
+}
+
+ConceptSpecializationExpr *
+ConceptSpecializationExpr::Create(const ASTContext &C,
+ NestedNameSpecifierLoc NNS,
+ SourceLocation TemplateKWLoc,
+ DeclarationNameInfo ConceptNameInfo,
+ NamedDecl *FoundDecl,
+ ConceptDecl *NamedConcept,
+ const ASTTemplateArgumentListInfo *ArgsAsWritten,
+ ArrayRef<TemplateArgument> ConvertedArgs,
+ const ConstraintSatisfaction *Satisfaction) {
+ void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>(
+ ConvertedArgs.size()));
+ return new (Buffer) ConceptSpecializationExpr(C, NNS, TemplateKWLoc,
+ ConceptNameInfo, FoundDecl,
+ NamedConcept, ArgsAsWritten,
+ ConvertedArgs, Satisfaction);
+}
+
+ConceptSpecializationExpr::ConceptSpecializationExpr(
+ const ASTContext &C, ConceptDecl *NamedConcept,
+ ArrayRef<TemplateArgument> ConvertedArgs,
+ const ConstraintSatisfaction *Satisfaction, bool Dependent,
+ bool ContainsUnexpandedParameterPack)
+ : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_RValue, OK_Ordinary),
+ ConceptReference(NestedNameSpecifierLoc(), SourceLocation(),
+ DeclarationNameInfo(), NamedConcept, NamedConcept,
+ nullptr),
+ NumTemplateArgs(ConvertedArgs.size()),
+ Satisfaction(Satisfaction
+ ? ASTConstraintSatisfaction::Create(C, *Satisfaction)
+ : nullptr) {
+ setTemplateArguments(ConvertedArgs);
+ ExprDependence D = ExprDependence::None;
+ if (!Satisfaction)
+ D |= ExprDependence::Value;
+ if (Dependent)
+ D |= ExprDependence::Instantiation;
+ if (ContainsUnexpandedParameterPack)
+ D |= ExprDependence::UnexpandedPack;
+ setDependence(D);
+}
+
+ConceptSpecializationExpr *
+ConceptSpecializationExpr::Create(const ASTContext &C,
+ ConceptDecl *NamedConcept,
+ ArrayRef<TemplateArgument> ConvertedArgs,
+ const ConstraintSatisfaction *Satisfaction,
+ bool Dependent,
+ bool ContainsUnexpandedParameterPack) {
+ void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>(
+ ConvertedArgs.size()));
+ return new (Buffer) ConceptSpecializationExpr(
+ C, NamedConcept, ConvertedArgs, Satisfaction, Dependent,
+ ContainsUnexpandedParameterPack);
+}
+
+ConceptSpecializationExpr *
+ConceptSpecializationExpr::Create(ASTContext &C, EmptyShell Empty,
+ unsigned NumTemplateArgs) {
+ void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>(
+ NumTemplateArgs));
+ return new (Buffer) ConceptSpecializationExpr(Empty, NumTemplateArgs);
+}
+
+const TypeConstraint *
+concepts::ExprRequirement::ReturnTypeRequirement::getTypeConstraint() const {
+ assert(isTypeConstraint());
+ auto TPL =
+ TypeConstraintInfo.getPointer().get<TemplateParameterList *>();
+ return cast<TemplateTypeParmDecl>(TPL->getParam(0))
+ ->getTypeConstraint();
+}
+
+RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc,
+ RequiresExprBodyDecl *Body,
+ ArrayRef<ParmVarDecl *> LocalParameters,
+ ArrayRef<concepts::Requirement *> Requirements,
+ SourceLocation RBraceLoc)
+ : Expr(RequiresExprClass, C.BoolTy, VK_RValue, OK_Ordinary),
+ NumLocalParameters(LocalParameters.size()),
+ NumRequirements(Requirements.size()), Body(Body), RBraceLoc(RBraceLoc) {
+ RequiresExprBits.IsSatisfied = false;
+ RequiresExprBits.RequiresKWLoc = RequiresKWLoc;
+ bool Dependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ for (ParmVarDecl *P : LocalParameters) {
+ Dependent |= P->getType()->isInstantiationDependentType();
+ ContainsUnexpandedParameterPack |=
+ P->getType()->containsUnexpandedParameterPack();
+ }
+ RequiresExprBits.IsSatisfied = true;
+ for (concepts::Requirement *R : Requirements) {
+ Dependent |= R->isDependent();
+ ContainsUnexpandedParameterPack |= R->containsUnexpandedParameterPack();
+ if (!Dependent) {
+ RequiresExprBits.IsSatisfied = R->isSatisfied();
+ if (!RequiresExprBits.IsSatisfied)
+ break;
+ }
+ }
+ std::copy(LocalParameters.begin(), LocalParameters.end(),
+ getTrailingObjects<ParmVarDecl *>());
+ std::copy(Requirements.begin(), Requirements.end(),
+ getTrailingObjects<concepts::Requirement *>());
+ RequiresExprBits.IsSatisfied |= Dependent;
+ // FIXME: move the computing dependency logic to ComputeDependence.h
+ if (ContainsUnexpandedParameterPack)
+ setDependence(getDependence() | ExprDependence::UnexpandedPack);
+ // FIXME: this is incorrect for cases where we have a non-dependent
+ // requirement, but its parameters are instantiation-dependent. RequiresExpr
+ // should be instantiation-dependent if it has instantiation-dependent
+ // parameters.
+ if (Dependent)
+ setDependence(getDependence() | ExprDependence::ValueInstantiation);
+}
+
+RequiresExpr::RequiresExpr(ASTContext &C, EmptyShell Empty,
+ unsigned NumLocalParameters,
+ unsigned NumRequirements)
+ : Expr(RequiresExprClass, Empty), NumLocalParameters(NumLocalParameters),
+ NumRequirements(NumRequirements) { }
+
+RequiresExpr *
+RequiresExpr::Create(ASTContext &C, SourceLocation RequiresKWLoc,
+ RequiresExprBodyDecl *Body,
+ ArrayRef<ParmVarDecl *> LocalParameters,
+ ArrayRef<concepts::Requirement *> Requirements,
+ SourceLocation RBraceLoc) {
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<ParmVarDecl *, concepts::Requirement *>(
+ LocalParameters.size(), Requirements.size()),
+ alignof(RequiresExpr));
+ return new (Mem) RequiresExpr(C, RequiresKWLoc, Body, LocalParameters,
+ Requirements, RBraceLoc);
+}
+
+RequiresExpr *
+RequiresExpr::Create(ASTContext &C, EmptyShell Empty,
+ unsigned NumLocalParameters, unsigned NumRequirements) {
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<ParmVarDecl *, concepts::Requirement *>(
+ NumLocalParameters, NumRequirements),
+ alignof(RequiresExpr));
+ return new (Mem) RequiresExpr(C, Empty, NumLocalParameters, NumRequirements);
+}
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index c4b27b5d1daa..d20c2382b6ac 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -54,6 +54,7 @@
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
@@ -674,6 +675,7 @@ namespace {
None,
Bases,
AfterBases,
+ AfterFields,
Destroying,
DestroyingBases
};
@@ -821,6 +823,9 @@ namespace {
void finishedConstructingBases() {
EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases;
}
+ void finishedConstructingFields() {
+ EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterFields;
+ }
~EvaluatingConstructorRAII() {
if (DidInsert) EI.ObjectsUnderConstruction.erase(Object);
}
@@ -1417,6 +1422,31 @@ static bool isFormalAccess(AccessKinds AK) {
return isAnyAccess(AK) && AK != AK_Construct && AK != AK_Destroy;
}
+/// Is this kind of axcess valid on an indeterminate object value?
+static bool isValidIndeterminateAccess(AccessKinds AK) {
+ switch (AK) {
+ case AK_Read:
+ case AK_Increment:
+ case AK_Decrement:
+ // These need the object's value.
+ return false;
+
+ case AK_ReadObjectRepresentation:
+ case AK_Assign:
+ case AK_Construct:
+ case AK_Destroy:
+ // Construction and destruction don't need the value.
+ return true;
+
+ case AK_MemberCall:
+ case AK_DynamicCast:
+ case AK_TypeId:
+ // These aren't really meaningful on scalars.
+ return true;
+ }
+ llvm_unreachable("unknown access kind");
+}
+
namespace {
struct ComplexValue {
private:
@@ -1865,7 +1895,8 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->hasGlobalStorage();
// ... the address of a function,
- return isa<FunctionDecl>(D);
+ // ... the address of a GUID [MS extension],
+ return isa<FunctionDecl>(D) || isa<MSGuidDecl>(D);
}
if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>())
@@ -1888,7 +1919,6 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
case Expr::PredefinedExprClass:
case Expr::ObjCStringLiteralClass:
case Expr::ObjCEncodeExprClass:
- case Expr::CXXUuidofExprClass:
return true;
case Expr::ObjCBoxedExprClass:
return cast<ObjCBoxedExpr>(E)->isExpressibleAsConstantInitializer();
@@ -2005,6 +2035,17 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
APValue::LValueBase Base = LVal.getLValueBase();
const SubobjectDesignator &Designator = LVal.getLValueDesignator();
+ if (auto *VD = LVal.getLValueBase().dyn_cast<const ValueDecl *>()) {
+ if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
+ if (FD->isConsteval()) {
+ Info.FFDiag(Loc, diag::note_consteval_address_accessible)
+ << !Type->isAnyPointerType();
+ Info.Note(FD->getLocation(), diag::note_declared_at);
+ return false;
+ }
+ }
+ }
+
// Check that the object is a global. Note that the fake 'this' object we
// manufacture when checking potential constant expressions is conservatively
// assumed to be global here.
@@ -2114,6 +2155,11 @@ static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member);
if (!FD)
return true;
+ if (FD->isConsteval()) {
+ Info.FFDiag(Loc, diag::note_consteval_address_accessible) << /*pointer*/ 0;
+ Info.Note(FD->getLocation(), diag::note_declared_at);
+ return false;
+ }
return Usage == Expr::EvaluateForMangling || FD->isVirtual() ||
!FD->hasAttr<DLLImportAttr>();
}
@@ -2533,7 +2579,7 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
if (SA != RHS) {
Info.CCEDiag(E, diag::note_constexpr_large_shift)
<< RHS << E->getType() << LHS.getBitWidth();
- } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus2a) {
+ } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus20) {
// C++11 [expr.shift]p2: A signed left shift must have a non-negative
// operand, and must not overflow the corresponding unsigned type.
// C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
@@ -2618,6 +2664,155 @@ static bool handleFloatFloatBinOp(EvalInfo &Info, const Expr *E,
return true;
}
+static bool handleLogicalOpForVector(const APInt &LHSValue,
+ BinaryOperatorKind Opcode,
+ const APInt &RHSValue, APInt &Result) {
+ bool LHS = (LHSValue != 0);
+ bool RHS = (RHSValue != 0);
+
+ if (Opcode == BO_LAnd)
+ Result = LHS && RHS;
+ else
+ Result = LHS || RHS;
+ return true;
+}
+static bool handleLogicalOpForVector(const APFloat &LHSValue,
+ BinaryOperatorKind Opcode,
+ const APFloat &RHSValue, APInt &Result) {
+ bool LHS = !LHSValue.isZero();
+ bool RHS = !RHSValue.isZero();
+
+ if (Opcode == BO_LAnd)
+ Result = LHS && RHS;
+ else
+ Result = LHS || RHS;
+ return true;
+}
+
+static bool handleLogicalOpForVector(const APValue &LHSValue,
+ BinaryOperatorKind Opcode,
+ const APValue &RHSValue, APInt &Result) {
+ // The result is always an int type, however operands match the first.
+ if (LHSValue.getKind() == APValue::Int)
+ return handleLogicalOpForVector(LHSValue.getInt(), Opcode,
+ RHSValue.getInt(), Result);
+ assert(LHSValue.getKind() == APValue::Float && "Should be no other options");
+ return handleLogicalOpForVector(LHSValue.getFloat(), Opcode,
+ RHSValue.getFloat(), Result);
+}
+
+template <typename APTy>
+static bool
+handleCompareOpForVectorHelper(const APTy &LHSValue, BinaryOperatorKind Opcode,
+ const APTy &RHSValue, APInt &Result) {
+ switch (Opcode) {
+ default:
+ llvm_unreachable("unsupported binary operator");
+ case BO_EQ:
+ Result = (LHSValue == RHSValue);
+ break;
+ case BO_NE:
+ Result = (LHSValue != RHSValue);
+ break;
+ case BO_LT:
+ Result = (LHSValue < RHSValue);
+ break;
+ case BO_GT:
+ Result = (LHSValue > RHSValue);
+ break;
+ case BO_LE:
+ Result = (LHSValue <= RHSValue);
+ break;
+ case BO_GE:
+ Result = (LHSValue >= RHSValue);
+ break;
+ }
+
+ return true;
+}
+
+static bool handleCompareOpForVector(const APValue &LHSValue,
+ BinaryOperatorKind Opcode,
+ const APValue &RHSValue, APInt &Result) {
+ // The result is always an int type, however operands match the first.
+ if (LHSValue.getKind() == APValue::Int)
+ return handleCompareOpForVectorHelper(LHSValue.getInt(), Opcode,
+ RHSValue.getInt(), Result);
+ assert(LHSValue.getKind() == APValue::Float && "Should be no other options");
+ return handleCompareOpForVectorHelper(LHSValue.getFloat(), Opcode,
+ RHSValue.getFloat(), Result);
+}
+
+// Perform binary operations for vector types, in place on the LHS.
+static bool handleVectorVectorBinOp(EvalInfo &Info, const Expr *E,
+ BinaryOperatorKind Opcode,
+ APValue &LHSValue,
+ const APValue &RHSValue) {
+ assert(Opcode != BO_PtrMemD && Opcode != BO_PtrMemI &&
+ "Operation not supported on vector types");
+
+ const auto *VT = E->getType()->castAs<VectorType>();
+ unsigned NumElements = VT->getNumElements();
+ QualType EltTy = VT->getElementType();
+
+ // In the cases (typically C as I've observed) where we aren't evaluating
+ // constexpr but are checking for cases where the LHS isn't yet evaluatable,
+ // just give up.
+ if (!LHSValue.isVector()) {
+ assert(LHSValue.isLValue() &&
+ "A vector result that isn't a vector OR uncalculated LValue");
+ Info.FFDiag(E);
+ return false;
+ }
+
+ assert(LHSValue.getVectorLength() == NumElements &&
+ RHSValue.getVectorLength() == NumElements && "Different vector sizes");
+
+ SmallVector<APValue, 4> ResultElements;
+
+ for (unsigned EltNum = 0; EltNum < NumElements; ++EltNum) {
+ APValue LHSElt = LHSValue.getVectorElt(EltNum);
+ APValue RHSElt = RHSValue.getVectorElt(EltNum);
+
+ if (EltTy->isIntegerType()) {
+ APSInt EltResult{Info.Ctx.getIntWidth(EltTy),
+ EltTy->isUnsignedIntegerType()};
+ bool Success = true;
+
+ if (BinaryOperator::isLogicalOp(Opcode))
+ Success = handleLogicalOpForVector(LHSElt, Opcode, RHSElt, EltResult);
+ else if (BinaryOperator::isComparisonOp(Opcode))
+ Success = handleCompareOpForVector(LHSElt, Opcode, RHSElt, EltResult);
+ else
+ Success = handleIntIntBinOp(Info, E, LHSElt.getInt(), Opcode,
+ RHSElt.getInt(), EltResult);
+
+ if (!Success) {
+ Info.FFDiag(E);
+ return false;
+ }
+ ResultElements.emplace_back(EltResult);
+
+ } else if (EltTy->isFloatingType()) {
+ assert(LHSElt.getKind() == APValue::Float &&
+ RHSElt.getKind() == APValue::Float &&
+ "Mismatched LHS/RHS/Result Type");
+ APFloat LHSFloat = LHSElt.getFloat();
+
+ if (!handleFloatFloatBinOp(Info, E, LHSFloat, Opcode,
+ RHSElt.getFloat())) {
+ Info.FFDiag(E);
+ return false;
+ }
+
+ ResultElements.emplace_back(LHSFloat);
+ }
+ }
+
+ LHSValue = APValue(ResultElements.data(), ResultElements.size());
+ return true;
+}
+
/// Cast an lvalue referring to a base subobject to a derived class, by
/// truncating the lvalue's path to the given length.
static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
@@ -2830,7 +3025,7 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
if (Info.checkingPotentialConstantExpression())
return false;
if (!Frame || !Frame->Arguments) {
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ Info.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << VD;
return false;
}
Result = &Frame->Arguments[PVD->getFunctionScopeIndex()];
@@ -2861,12 +3056,34 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
}
// Dig out the initializer, and use the declaration which it's attached to.
+ // FIXME: We should eventually check whether the variable has a reachable
+ // initializing declaration.
const Expr *Init = VD->getAnyInitializer(VD);
- if (!Init || Init->isValueDependent()) {
- // If we're checking a potential constant expression, the variable could be
- // initialized later.
- if (!Info.checkingPotentialConstantExpression())
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ if (!Init) {
+ // Don't diagnose during potential constant expression checking; an
+ // initializer might be added later.
+ if (!Info.checkingPotentialConstantExpression()) {
+ Info.FFDiag(E, diag::note_constexpr_var_init_unknown, 1)
+ << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ }
+ return false;
+ }
+
+ if (Init->isValueDependent()) {
+ // The DeclRefExpr is not value-dependent, but the variable it refers to
+ // has a value-dependent initializer. This should only happen in
+ // constant-folding cases, where the variable is not actually of a suitable
+ // type for use in a constant expression (otherwise the DeclRefExpr would
+ // have been value-dependent too), so diagnose that.
+ assert(!VD->mightBeUsableInConstantExpressions(Info.Ctx));
+ if (!Info.checkingPotentialConstantExpression()) {
+ Info.FFDiag(E, Info.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral, 1)
+ << VD << VD->getType();
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ }
return false;
}
@@ -2877,13 +3094,6 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
return true;
}
- // Never evaluate the initializer of a weak variable. We can't be sure that
- // this is the definition which will be used.
- if (VD->isWeak()) {
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
- return false;
- }
-
// Check that we can fold the initializer. In C++, we will have already done
// this in the cases where it matters for conformance.
SmallVector<PartialDiagnosticAt, 8> Notes;
@@ -2893,13 +3103,24 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
Info.Note(VD->getLocation(), diag::note_declared_at);
Info.addNotes(Notes);
return false;
- } else if (!VD->checkInitIsICE()) {
+ }
+
+ // Check that the variable is actually usable in constant expressions.
+ if (!VD->checkInitIsICE()) {
Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant,
Notes.size() + 1) << VD;
Info.Note(VD->getLocation(), diag::note_declared_at);
Info.addNotes(Notes);
}
+ // Never use the initializer of a weak variable, not even for constant
+ // folding. We can't be sure that this is the definition that will be used.
+ if (VD->isWeak()) {
+ Info.FFDiag(E, diag::note_constexpr_var_init_weak) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
Result = VD->getEvaluatedValue();
return true;
}
@@ -3006,15 +3227,22 @@ static void expandArray(APValue &Array, unsigned Index) {
/// is trivial. Note that this is never true for a union type with fields
/// (because the copy always "reads" the active member) and always true for
/// a non-class type.
+static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD);
static bool isReadByLvalueToRvalueConversion(QualType T) {
CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
- if (!RD || (RD->isUnion() && !RD->field_empty()))
- return true;
+ return !RD || isReadByLvalueToRvalueConversion(RD);
+}
+static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD) {
+ // FIXME: A trivial copy of a union copies the object representation, even if
+ // the union is empty.
+ if (RD->isUnion())
+ return !RD->field_empty();
if (RD->isEmpty())
return false;
for (auto *Field : RD->fields())
- if (isReadByLvalueToRvalueConversion(Field->getType()))
+ if (!Field->isUnnamedBitfield() &&
+ isReadByLvalueToRvalueConversion(Field->getType()))
return true;
for (auto &BaseSpec : RD->bases())
@@ -3124,6 +3352,13 @@ struct CompleteObject {
: Base(Base), Value(Value), Type(Type) {}
bool mayAccessMutableMembers(EvalInfo &Info, AccessKinds AK) const {
+ // If this isn't a "real" access (eg, if it's just accessing the type
+ // info), allow it. We assume the type doesn't change dynamically for
+ // subobjects of constexpr objects (even though we'd hit UB here if it
+ // did). FIXME: Is this right?
+ if (!isAnyAccess(AK))
+ return true;
+
// In C++14 onwards, it is permitted to read a mutable member whose
// lifetime began within the evaluation.
// FIXME: Should we also allow this in C++11?
@@ -3178,9 +3413,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
// Reading an indeterminate value is undefined, but assigning over one is OK.
if ((O->isAbsent() && !(handler.AccessKind == AK_Construct && I == N)) ||
- (O->isIndeterminate() && handler.AccessKind != AK_Construct &&
- handler.AccessKind != AK_Assign &&
- handler.AccessKind != AK_ReadObjectRepresentation)) {
+ (O->isIndeterminate() &&
+ !isValidIndeterminateAccess(handler.AccessKind))) {
if (!Info.checkingPotentialConstantExpression())
Info.FFDiag(E, diag::note_constexpr_access_uninit)
<< handler.AccessKind << O->isIndeterminate();
@@ -3548,7 +3782,30 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
APValue *BaseVal = nullptr;
QualType BaseType = getType(LVal.Base);
- if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
+ if (const ConstantExpr *CE =
+ dyn_cast_or_null<ConstantExpr>(LVal.Base.dyn_cast<const Expr *>())) {
+ /// Nested immediate invocation have been previously removed so if we found
+ /// a ConstantExpr it can only be the EvaluatingDecl.
+ assert(CE->isImmediateInvocation() && CE == Info.EvaluatingDecl);
+ (void)CE;
+ BaseVal = Info.EvaluatingDeclValue;
+ } else if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl *>()) {
+ // Allow reading from a GUID declaration.
+ if (auto *GD = dyn_cast<MSGuidDecl>(D)) {
+ if (isModification(AK)) {
+ // All the remaining cases do not permit modification of the object.
+ Info.FFDiag(E, diag::note_constexpr_modify_global);
+ return CompleteObject();
+ }
+ APValue &V = GD->getAsAPValue();
+ if (V.isAbsent()) {
+ Info.FFDiag(E, diag::note_constexpr_unsupported_layout)
+ << GD->getType();
+ return CompleteObject();
+ }
+ return CompleteObject(LVal.Base, &V, GD->getType());
+ }
+
// In C++98, const, non-volatile integers initialized with ICEs are ICEs.
// In C++11, constexpr, non-volatile variables initialized with constant
// expressions are constant expressions too. Inside constexpr functions,
@@ -3566,6 +3823,11 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
return CompleteObject();
}
+ // In OpenCL if a variable is in constant address space it is a const value.
+ bool IsConstant = BaseType.isConstQualified() ||
+ (Info.getLangOpts().OpenCL &&
+ BaseType.getAddressSpace() == LangAS::opencl_constant);
+
// Unless we're looking at a local variable or argument in a constexpr call,
// the variable we're reading must be const.
if (!Frame) {
@@ -3583,9 +3845,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
} else if (BaseType->isIntegralOrEnumerationType()) {
// In OpenCL if a variable is in constant address space it is a const
// value.
- if (!(BaseType.isConstQualified() ||
- (Info.getLangOpts().OpenCL &&
- BaseType.getAddressSpace() == LangAS::opencl_constant))) {
+ if (!IsConstant) {
if (!IsAccess)
return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
if (Info.getLangOpts().CPlusPlus) {
@@ -3598,27 +3858,29 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
}
} else if (!IsAccess) {
return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
- } else if (BaseType->isFloatingType() && BaseType.isConstQualified()) {
- // We support folding of const floating-point types, in order to make
- // static const data members of such types (supported as an extension)
- // more useful.
- if (Info.getLangOpts().CPlusPlus11) {
- Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ } else if (IsConstant && Info.checkingPotentialConstantExpression() &&
+ BaseType->isLiteralType(Info.Ctx) && !VD->hasDefinition()) {
+ // This variable might end up being constexpr. Don't diagnose it yet.
+ } else if (IsConstant) {
+ // Keep evaluating to see what we can do. In particular, we support
+ // folding of const floating-point types, in order to make static const
+ // data members of such types (supported as an extension) more useful.
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.CCEDiag(E, Info.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral, 1)
+ << VD << BaseType;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
Info.CCEDiag(E);
}
- } else if (BaseType.isConstQualified() && VD->hasDefinition(Info.Ctx)) {
- Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr) << VD;
- // Keep evaluating to see what we can do.
} else {
- // FIXME: Allow folding of values of any literal type in all languages.
- if (Info.checkingPotentialConstantExpression() &&
- VD->getType().isConstQualified() && !VD->hasDefinition(Info.Ctx)) {
- // The definition of this variable could be constexpr. We can't
- // access it right now, but may be able to in future.
- } else if (Info.getLangOpts().CPlusPlus11) {
- Info.FFDiag(E, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ // Never allow reading a non-const value.
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.FFDiag(E, Info.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral, 1)
+ << VD << BaseType;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
Info.FFDiag(E);
@@ -3828,12 +4090,26 @@ struct CompoundAssignSubobjectHandler {
return false;
case APValue::LValue:
return foundPointer(Subobj, SubobjType);
+ case APValue::Vector:
+ return foundVector(Subobj, SubobjType);
default:
// FIXME: can this happen?
Info.FFDiag(E);
return false;
}
}
+
+ bool foundVector(APValue &Value, QualType SubobjType) {
+ if (!checkConst(SubobjType))
+ return false;
+
+ if (!SubobjType->isVectorType()) {
+ Info.FFDiag(E);
+ return false;
+ }
+ return handleVectorVectorBinOp(Info, E, Opcode, Value, RHS);
+ }
+
bool found(APSInt &Value, QualType SubobjType) {
if (!checkConst(SubobjType))
return false;
@@ -4230,37 +4506,48 @@ static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
}
/// Get the value to use for a default-initialized object of type T.
-static APValue getDefaultInitValue(QualType T) {
+/// Return false if it encounters something invalid.
+static bool getDefaultInitValue(QualType T, APValue &Result) {
+ bool Success = true;
if (auto *RD = T->getAsCXXRecordDecl()) {
- if (RD->isUnion())
- return APValue((const FieldDecl*)nullptr);
-
- APValue Struct(APValue::UninitStruct(), RD->getNumBases(),
- std::distance(RD->field_begin(), RD->field_end()));
+ if (RD->isInvalidDecl()) {
+ Result = APValue();
+ return false;
+ }
+ if (RD->isUnion()) {
+ Result = APValue((const FieldDecl *)nullptr);
+ return true;
+ }
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
unsigned Index = 0;
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- End = RD->bases_end(); I != End; ++I, ++Index)
- Struct.getStructBase(Index) = getDefaultInitValue(I->getType());
+ End = RD->bases_end();
+ I != End; ++I, ++Index)
+ Success &= getDefaultInitValue(I->getType(), Result.getStructBase(Index));
for (const auto *I : RD->fields()) {
if (I->isUnnamedBitfield())
continue;
- Struct.getStructField(I->getFieldIndex()) =
- getDefaultInitValue(I->getType());
+ Success &= getDefaultInitValue(I->getType(),
+ Result.getStructField(I->getFieldIndex()));
}
- return Struct;
+ return Success;
}
if (auto *AT =
dyn_cast_or_null<ConstantArrayType>(T->getAsArrayTypeUnsafe())) {
- APValue Array(APValue::UninitArray(), 0, AT->getSize().getZExtValue());
- if (Array.hasArrayFiller())
- Array.getArrayFiller() = getDefaultInitValue(AT->getElementType());
- return Array;
+ Result = APValue(APValue::UninitArray(), 0, AT->getSize().getZExtValue());
+ if (Result.hasArrayFiller())
+ Success &=
+ getDefaultInitValue(AT->getElementType(), Result.getArrayFiller());
+
+ return Success;
}
- return APValue::IndeterminateValue();
+ Result = APValue::IndeterminateValue();
+ return true;
}
namespace {
@@ -4290,10 +4577,8 @@ static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
Info.CurrentCall->createTemporary(VD, VD->getType(), true, Result);
const Expr *InitE = VD->getInit();
- if (!InitE) {
- Val = getDefaultInitValue(VD->getType());
- return true;
- }
+ if (!InitE)
+ return getDefaultInitValue(VD->getType(), Val);
if (InitE->isValueDependent())
return false;
@@ -4901,7 +5186,7 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
// DR1872: An instantiated virtual constexpr function can't be called in a
// constant expression (prior to C++20). We can still constant-fold such a
// call.
- if (!Info.Ctx.getLangOpts().CPlusPlus2a && isa<CXXMethodDecl>(Declaration) &&
+ if (!Info.Ctx.getLangOpts().CPlusPlus20 && isa<CXXMethodDecl>(Declaration) &&
cast<CXXMethodDecl>(Declaration)->isVirtual())
Info.CCEDiag(CallLoc, diag::note_constexpr_virtual_call);
@@ -4910,6 +5195,13 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
return false;
}
+ if (const auto *CtorDecl = dyn_cast_or_null<CXXConstructorDecl>(Definition)) {
+ for (const auto *InitExpr : CtorDecl->inits()) {
+ if (InitExpr->getInit() && InitExpr->getInit()->containsErrors())
+ return false;
+ }
+ }
+
// Can we evaluate this function call?
if (Definition && Definition->isConstexpr() && Body)
return true;
@@ -5060,6 +5352,7 @@ static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E,
case ConstructionPhase::None:
case ConstructionPhase::AfterBases:
+ case ConstructionPhase::AfterFields:
case ConstructionPhase::Destroying:
// We've finished constructing the base classes and not yet started
// destroying them again, so this is the dynamic type.
@@ -5278,12 +5571,15 @@ static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
namespace {
struct StartLifetimeOfUnionMemberHandler {
+ EvalInfo &Info;
+ const Expr *LHSExpr;
const FieldDecl *Field;
-
+ bool DuringInit;
+ bool Failed = false;
static const AccessKinds AccessKind = AK_Assign;
typedef bool result_type;
- bool failed() { return false; }
+ bool failed() { return Failed; }
bool found(APValue &Subobj, QualType SubobjType) {
// We are supposed to perform no initialization but begin the lifetime of
// the object. We interpret that as meaning to do what default
@@ -5294,9 +5590,22 @@ struct StartLifetimeOfUnionMemberHandler {
// * No variant members' lifetimes begin
// * All scalar subobjects whose lifetimes begin have indeterminate values
assert(SubobjType->isUnionType());
- if (!declaresSameEntity(Subobj.getUnionField(), Field) ||
- !Subobj.getUnionValue().hasValue())
- Subobj.setUnion(Field, getDefaultInitValue(Field->getType()));
+ if (declaresSameEntity(Subobj.getUnionField(), Field)) {
+ // This union member is already active. If it's also in-lifetime, there's
+ // nothing to do.
+ if (Subobj.getUnionValue().hasValue())
+ return true;
+ } else if (DuringInit) {
+ // We're currently in the process of initializing a different union
+ // member. If we carried on, that initialization would attempt to
+ // store to an inactive union member, resulting in undefined behavior.
+ Info.FFDiag(LHSExpr,
+ diag::note_constexpr_union_member_change_during_init);
+ return false;
+ }
+ APValue Result;
+ Failed = !getDefaultInitValue(Field->getType(), Result);
+ Subobj.setUnion(Field, Result);
return true;
}
bool found(APSInt &Value, QualType SubobjType) {
@@ -5399,7 +5708,10 @@ static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr,
SubobjectDesignator D = LHS.Designator;
D.truncate(Info.Ctx, LHS.Base, LengthAndField.first);
- StartLifetimeOfUnionMemberHandler StartLifetime{LengthAndField.second};
+ bool DuringInit = Info.isEvaluatingCtorDtor(LHS.Base, D.Entries) ==
+ ConstructionPhase::AfterBases;
+ StartLifetimeOfUnionMemberHandler StartLifetime{
+ Info, LHSExpr, LengthAndField.second, DuringInit};
if (!findSubobject(Info, LHSExpr, Obj, D, StartLifetime))
return false;
}
@@ -5407,22 +5719,6 @@ static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr,
return true;
}
-/// Determine if a class has any fields that might need to be copied by a
-/// trivial copy or move operation.
-static bool hasFields(const CXXRecordDecl *RD) {
- if (!RD || RD->isEmpty())
- return false;
- for (auto *FD : RD->fields()) {
- if (FD->isUnnamedBitfield())
- continue;
- return true;
- }
- for (auto &Base : RD->bases())
- if (hasFields(Base.getType()->getAsCXXRecordDecl()))
- return true;
- return false;
-}
-
namespace {
typedef SmallVector<APValue, 8> ArgVector;
}
@@ -5447,6 +5743,8 @@ static bool EvaluateArgs(ArrayRef<const Expr *> Args, ArgVector &ArgValues,
}
}
}
+ // FIXME: This is the wrong evaluation order for an assignment operator
+ // called via operator syntax.
for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
if (!Evaluate(ArgValues[Idx], Info, Args[Idx])) {
// If we're checking for a potential constant expression, evaluate all
@@ -5491,7 +5789,8 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee);
if (MD && MD->isDefaulted() &&
(MD->getParent()->isUnion() ||
- (MD->isTrivial() && hasFields(MD->getParent())))) {
+ (MD->isTrivial() &&
+ isReadByLvalueToRvalueConversion(MD->getParent())))) {
assert(This &&
(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()));
LValue RHS;
@@ -5500,7 +5799,7 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(), RHS,
RHSValue, MD->getParent()->isUnion()))
return false;
- if (Info.getLangOpts().CPlusPlus2a && MD->isTrivial() &&
+ if (Info.getLangOpts().CPlusPlus20 && MD->isTrivial() &&
!HandleUnionActiveMemberChange(Info, Args[0], *This))
return false;
if (!handleAssignment(Info, Args[0], *This, MD->getThisType(),
@@ -5578,7 +5877,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// actually read them.
if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() &&
(Definition->getParent()->isUnion() ||
- (Definition->isTrivial() && hasFields(Definition->getParent())))) {
+ (Definition->isTrivial() &&
+ isReadByLvalueToRvalueConversion(Definition->getParent())))) {
LValue RHS;
RHS.setFrom(Info.Ctx, ArgValues[0]);
return handleLValueToRValueConversion(
@@ -5587,9 +5887,14 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
}
// Reserve space for the struct members.
- if (!RD->isUnion() && !Result.hasValue())
- Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
- std::distance(RD->field_begin(), RD->field_end()));
+ if (!Result.hasValue()) {
+ if (!RD->isUnion())
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+ else
+ // A union starts with no active member.
+ Result = APValue((const FieldDecl*)nullptr);
+ }
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
@@ -5616,8 +5921,9 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
for (; !declaresSameEntity(*FieldIt, FD); ++FieldIt) {
assert(FieldIt != RD->field_end() && "missing field?");
if (!FieldIt->isUnnamedBitfield())
- Result.getStructField(FieldIt->getFieldIndex()) =
- getDefaultInitValue(FieldIt->getType());
+ Success &= getDefaultInitValue(
+ FieldIt->getType(),
+ Result.getStructField(FieldIt->getFieldIndex()));
}
++FieldIt;
};
@@ -5669,10 +5975,10 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
if (CD->isUnion())
*Value = APValue(FD);
else
- // FIXME: This immediately starts the lifetime of all members of an
- // anonymous struct. It would be preferable to strictly start member
- // lifetime in initialization order.
- *Value = getDefaultInitValue(Info.Ctx.getRecordType(CD));
+ // FIXME: This immediately starts the lifetime of all members of
+ // an anonymous struct. It would be preferable to strictly start
+ // member lifetime in initialization order.
+ Success &= getDefaultInitValue(Info.Ctx.getRecordType(CD), *Value);
}
// Store Subobject as its parent before updating it for the last element
// in the chain.
@@ -5719,11 +6025,14 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
if (!RD->isUnion()) {
for (; FieldIt != RD->field_end(); ++FieldIt) {
if (!FieldIt->isUnnamedBitfield())
- Result.getStructField(FieldIt->getFieldIndex()) =
- getDefaultInitValue(FieldIt->getType());
+ Success &= getDefaultInitValue(
+ FieldIt->getType(),
+ Result.getStructField(FieldIt->getFieldIndex()));
}
}
+ EvalObj.finishedConstructingFields();
+
return Success &&
EvaluateStmt(Ret, Info, Definition->getBody()) != ESR_Failed &&
LifetimeExtendedScope.destroy();
@@ -5964,7 +6273,7 @@ static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E,
// This is permitted only within a call to std::allocator<T>::allocate.
auto Caller = Info.getStdAllocatorCaller("allocate");
if (!Caller) {
- Info.FFDiag(E->getExprLoc(), Info.getLangOpts().CPlusPlus2a
+ Info.FFDiag(E->getExprLoc(), Info.getLangOpts().CPlusPlus20
? diag::note_constexpr_new_untyped
: diag::note_constexpr_new);
return false;
@@ -6697,8 +7006,13 @@ public:
return Error(E);
}
- bool VisitConstantExpr(const ConstantExpr *E)
- { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ bool VisitConstantExpr(const ConstantExpr *E) {
+ if (E->hasAPValueResult())
+ return DerivedSuccess(E->getAPValueResult(), E);
+
+ return StmtVisitorTy::Visit(E->getSubExpr());
+ }
+
bool VisitParenExpr(const ParenExpr *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitUnaryExtension(const UnaryOperator *E)
@@ -6741,7 +7055,7 @@ public:
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
- if (!Info.Ctx.getLangOpts().CPlusPlus2a)
+ if (!Info.Ctx.getLangOpts().CPlusPlus20)
CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
@@ -6900,12 +7214,10 @@ public:
return Error(Callee);
This = &ThisVal;
} else if (const auto *PDE = dyn_cast<CXXPseudoDestructorExpr>(Callee)) {
- if (!Info.getLangOpts().CPlusPlus2a)
+ if (!Info.getLangOpts().CPlusPlus20)
Info.CCEDiag(PDE, diag::note_constexpr_pseudo_destructor);
- // FIXME: If pseudo-destructor calls ever start ending the lifetime of
- // their callee, we should start calling HandleDestruction here.
- // For now, we just evaluate the object argument and discard it.
- return EvaluateObjectArgument(Info, PDE->getBase(), ThisVal);
+ return EvaluateObjectArgument(Info, PDE->getBase(), ThisVal) &&
+ HandleDestruction(Info, PDE, ThisVal, PDE->getDestroyedType());
} else
return Error(Callee);
FD = Member;
@@ -7369,6 +7681,8 @@ public:
// from the AST (FIXME).
// * A MaterializeTemporaryExpr that has static storage duration, with no
// CallIndex, for a lifetime-extended temporary.
+// * The ConstantExpr that is currently being evaluated during evaluation of an
+// immediate invocation.
// plus an offset in bytes.
//===----------------------------------------------------------------------===//
namespace {
@@ -7448,6 +7762,8 @@ bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
return VisitVarDecl(E, VD);
if (const BindingDecl *BD = dyn_cast<BindingDecl>(E->getDecl()))
return Visit(BD->getBinding());
+ if (const MSGuidDecl *GD = dyn_cast<MSGuidDecl>(E->getDecl()))
+ return Success(GD);
return Error(E);
}
@@ -7604,7 +7920,7 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
else
TypeInfo = TypeInfoLValue(E->getExprOperand()->getType().getTypePtr());
} else {
- if (!Info.Ctx.getLangOpts().CPlusPlus2a) {
+ if (!Info.Ctx.getLangOpts().CPlusPlus20) {
Info.CCEDiag(E, diag::note_constexpr_typeid_polymorphic)
<< E->getExprOperand()->getType()
<< E->getExprOperand()->getSourceRange();
@@ -7626,7 +7942,7 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
}
bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return Success(E);
+ return Success(E->getGuidDecl());
}
bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
@@ -7740,7 +8056,7 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
if (!Evaluate(NewVal, this->Info, E->getRHS()))
return false;
- if (Info.getLangOpts().CPlusPlus2a &&
+ if (Info.getLangOpts().CPlusPlus20 &&
!HandleUnionActiveMemberChange(Info, E->getLHS(), Result))
return false;
@@ -8235,6 +8551,12 @@ bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
return visitNonBuiltinCallExpr(E);
}
+// Determine if T is a character type for which we guarantee that
+// sizeof(T) == 1.
+static bool isOneByteCharacterType(QualType T) {
+ return T->isCharType() || T->isChar8Type();
+}
+
bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned BuiltinOp) {
switch (BuiltinOp) {
@@ -8385,8 +8707,12 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
}
// Give up on byte-oriented matching against multibyte elements.
// FIXME: We can compare the bytes in the correct order.
- if (IsRawByte && Info.Ctx.getTypeSizeInChars(CharTy) != CharUnits::One())
+ if (IsRawByte && !isOneByteCharacterType(CharTy)) {
+ Info.FFDiag(E, diag::note_constexpr_memchr_unsupported)
+ << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'")
+ << CharTy;
return false;
+ }
// Figure out what value we're actually looking for (after converting to
// the corresponding unsigned type if necessary).
uint64_t DesiredVal;
@@ -8502,6 +8828,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
QualType T = Dest.Designator.getType(Info.Ctx);
QualType SrcT = Src.Designator.getType(Info.Ctx);
if (!Info.Ctx.hasSameUnqualifiedType(T, SrcT)) {
+ // FIXME: Consider using our bit_cast implementation to support this.
Info.FFDiag(E, diag::note_constexpr_memcpy_type_pun) << Move << SrcT << T;
return false;
}
@@ -8593,9 +8920,13 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
APValue &Result, const InitListExpr *ILE,
QualType AllocType);
+static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
+ APValue &Result,
+ const CXXConstructExpr *CCE,
+ QualType AllocType);
bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
- if (!Info.getLangOpts().CPlusPlus2a)
+ if (!Info.getLangOpts().CPlusPlus20)
Info.CCEDiag(E, diag::note_constexpr_new);
// We cannot speculatively evaluate a delete expression.
@@ -8642,6 +8973,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
const Expr *Init = E->getInitializer();
const InitListExpr *ResizedArrayILE = nullptr;
+ const CXXConstructExpr *ResizedArrayCCE = nullptr;
QualType AllocType = E->getAllocatedType();
if (Optional<const Expr*> ArraySize = E->getArraySize()) {
@@ -8685,7 +9017,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// -- the new-initializer is a braced-init-list and the number of
// array elements for which initializers are provided [...]
// exceeds the number of elements to initialize
- if (Init) {
+ if (Init && !isa<CXXConstructExpr>(Init)) {
auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType());
assert(CAT && "unexpected type for array initializer");
@@ -8708,6 +9040,8 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// special handling for this case when we initialize.
if (InitBound != AllocBound)
ResizedArrayILE = cast<InitListExpr>(Init);
+ } else if (Init) {
+ ResizedArrayCCE = cast<CXXConstructExpr>(Init);
}
AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr,
@@ -8772,11 +9106,15 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
if (!EvaluateArrayNewInitList(Info, Result, *Val, ResizedArrayILE,
AllocType))
return false;
+ } else if (ResizedArrayCCE) {
+ if (!EvaluateArrayNewConstructExpr(Info, Result, *Val, ResizedArrayCCE,
+ AllocType))
+ return false;
} else if (Init) {
if (!EvaluateInPlace(*Val, Info, Result, Init))
return false;
- } else {
- *Val = getDefaultInitValue(AllocType);
+ } else if (!getDefaultInitValue(AllocType, *Val)) {
+ return false;
}
// Array new returns a pointer to the first element, not a pointer to the
@@ -9126,6 +9464,8 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
}
}
+ EvalObj.finishedConstructingFields();
+
return Success;
}
@@ -9145,8 +9485,7 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
if (ZeroInit)
return ZeroInitialization(E, T);
- Result = getDefaultInitValue(T);
- return true;
+ return getDefaultInitValue(T, Result);
}
const FunctionDecl *Definition = nullptr;
@@ -9204,24 +9543,30 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
// Get a pointer to the first element of the array.
Array.addArray(Info, E, ArrayType);
+ auto InvalidType = [&] {
+ Info.FFDiag(E, diag::note_constexpr_unsupported_layout)
+ << E->getType();
+ return false;
+ };
+
// FIXME: Perform the checks on the field types in SemaInit.
RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
RecordDecl::field_iterator Field = Record->field_begin();
if (Field == Record->field_end())
- return Error(E);
+ return InvalidType();
// Start pointer.
if (!Field->getType()->isPointerType() ||
!Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
ArrayType->getElementType()))
- return Error(E);
+ return InvalidType();
// FIXME: What if the initializer_list type has base classes, etc?
Result = APValue(APValue::UninitStruct(), 0, 2);
Array.moveInto(Result.getStructField(0));
if (++Field == Record->field_end())
- return Error(E);
+ return InvalidType();
if (Field->getType()->isPointerType() &&
Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
@@ -9236,10 +9581,10 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
// Length.
Result.getStructField(1) = APValue(APSInt(ArrayType->getSize()));
else
- return Error(E);
+ return InvalidType();
if (++Field != Record->field_end())
- return Error(E);
+ return InvalidType();
return true;
}
@@ -9376,10 +9721,9 @@ namespace {
bool VisitCastExpr(const CastExpr* E);
bool VisitInitListExpr(const InitListExpr *E);
bool VisitUnaryImag(const UnaryOperator *E);
- // FIXME: Missing: unary -, unary ~, binary add/sub/mul/div,
- // binary comparisons, binary and/or/xor,
- // conditional operator (for GNU conditional select),
- // shufflevector, ExtVectorElementExpr
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ // FIXME: Missing: unary -, unary ~, conditional operator (for GNU
+ // conditional select), shufflevector, ExtVectorElementExpr
};
} // end anonymous namespace
@@ -9527,6 +9871,41 @@ bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
return ZeroInitialization(E);
}
+bool VectorExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ BinaryOperatorKind Op = E->getOpcode();
+ assert(Op != BO_PtrMemD && Op != BO_PtrMemI && Op != BO_Cmp &&
+ "Operation not supported on vector types");
+
+ if (Op == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ Expr *LHS = E->getLHS();
+ Expr *RHS = E->getRHS();
+
+ assert(LHS->getType()->isVectorType() && RHS->getType()->isVectorType() &&
+ "Must both be vector types");
+ // Checking JUST the types are the same would be fine, except shifts don't
+ // need to have their types be the same (since you always shift by an int).
+ assert(LHS->getType()->getAs<VectorType>()->getNumElements() ==
+ E->getType()->getAs<VectorType>()->getNumElements() &&
+ RHS->getType()->getAs<VectorType>()->getNumElements() ==
+ E->getType()->getAs<VectorType>()->getNumElements() &&
+ "All operands must be the same size.");
+
+ APValue LHSValue;
+ APValue RHSValue;
+ bool LHSOK = Evaluate(LHSValue, Info, LHS);
+ if (!LHSOK && !Info.noteFailure())
+ return false;
+ if (!Evaluate(RHSValue, Info, RHS) || !LHSOK)
+ return false;
+
+ if (!handleVectorVectorBinOp(Info, E, Op, LHSValue, RHSValue))
+ return false;
+
+ return Success(LHSValue, E);
+}
+
//===----------------------------------------------------------------------===//
// Array Evaluation
//===----------------------------------------------------------------------===//
@@ -9550,8 +9929,18 @@ namespace {
bool ZeroInitialization(const Expr *E) {
const ConstantArrayType *CAT =
Info.Ctx.getAsConstantArrayType(E->getType());
- if (!CAT)
+ if (!CAT) {
+ if (const IncompleteArrayType *IAT =
+ Info.Ctx.getAsIncompleteArrayType(E->getType())) {
+ // We can be asked to zero-initialize a flexible array member; this
+ // is represented as an ImplicitValueInitExpr of incomplete array
+ // type. In this case, the array has zero elements.
+ Result = APValue(APValue::UninitArray(), 0, 0);
+ return true;
+ }
+ // FIXME: We could handle VLAs here.
return Error(E);
+ }
Result = APValue(APValue::UninitArray(), 0,
CAT->getSize().getZExtValue());
@@ -9597,9 +9986,19 @@ static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
.VisitInitListExpr(ILE, AllocType);
}
+static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
+ APValue &Result,
+ const CXXConstructExpr *CCE,
+ QualType AllocType) {
+ assert(CCE->isRValue() && CCE->getType()->isArrayType() &&
+ "not an array rvalue");
+ return ArrayExprEvaluator(Info, This, Result)
+ .VisitCXXConstructExpr(CCE, This, &Result, AllocType);
+}
+
// Return true iff the given array filler may depend on the element index.
static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) {
- // For now, just whitelist non-class value-initialization and initialization
+ // For now, just allow non-class value-initialization and initialization
// lists comprised of them.
if (isa<ImplicitValueInitExpr>(FillerExpr))
return false;
@@ -9836,8 +10235,6 @@ public:
// Visitor Methods
//===--------------------------------------------------------------------===//
- bool VisitConstantExpr(const ConstantExpr *E);
-
bool VisitIntegerLiteral(const IntegerLiteral *E) {
return Success(E->getValue(), E);
}
@@ -9912,6 +10309,7 @@ public:
bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
bool VisitSourceLocExpr(const SourceLocExpr *E);
bool VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E);
+ bool VisitRequiresExpr(const RequiresExpr *E);
// FIXME: Missing: array subscript of vector, member of vector
};
@@ -10199,10 +10597,12 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
case Type::BlockPointer:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::Pipe:
+ case Type::ExtInt:
// GCC classifies vectors as None. We follow its lead and classify all
// other types that don't fit into the regular classification the same way.
return GCCTypeClass::None;
@@ -10286,7 +10686,7 @@ static bool EvaluateBuiltinConstantP(EvalInfo &Info, const Expr *Arg) {
ArgType->isAnyComplexType() || ArgType->isPointerType() ||
ArgType->isNullPtrType()) {
APValue V;
- if (!::EvaluateAsRValue(Info, Arg, V)) {
+ if (!::EvaluateAsRValue(Info, Arg, V) || Info.EvalStatus.HasSideEffects) {
Fold.keepDiagnostics();
return false;
}
@@ -10464,9 +10864,9 @@ static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) {
// the array at the end was flexible, or if it had 0 or 1 elements. This
// broke some common standard library extensions (PR30346), but was
// otherwise seemingly fine. It may be useful to reintroduce this behavior
- // with some sort of whitelist. OTOH, it seems that GCC is always
+ // with some sort of list. OTOH, it seems that GCC is always
// conservative with the last element in structs (if it's an array), so our
- // current behavior is more compatible than a whitelisting approach would
+ // current behavior is more compatible than an explicit list approach would
// be.
return LVal.InvalidBase &&
Designator.Entries.size() == Designator.MostDerivedPathLength &&
@@ -10616,13 +11016,6 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
return true;
}
-bool IntExprEvaluator::VisitConstantExpr(const ConstantExpr *E) {
- llvm::SaveAndRestore<bool> InConstantContext(Info.InConstantContext, true);
- if (E->getResultAPValueKind() != APValue::None)
- return Success(E->getAPValueResult(), E);
- return ExprEvaluatorBaseTy::VisitConstantExpr(E);
-}
-
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (unsigned BuiltinOp = E->getBuiltinCallee())
return VisitBuiltinCallExpr(E, BuiltinOp);
@@ -10659,7 +11052,7 @@ static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info,
bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned BuiltinOp) {
- switch (unsigned BuiltinOp = E->getBuiltinCallee()) {
+ switch (BuiltinOp) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -10848,6 +11241,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
}
case Builtin::BI__builtin_expect:
+ case Builtin::BI__builtin_expect_with_probability:
return Visit(E->getArg(0));
case Builtin::BI__builtin_ffs:
@@ -11041,6 +11435,17 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
CharTy1, E->getArg(0)->getType()->getPointeeType()) &&
Info.Ctx.hasSameUnqualifiedType(CharTy1, CharTy2)));
+ // For memcmp, allow comparing any arrays of '[[un]signed] char' or
+ // 'char8_t', but no other types.
+ if (IsRawByte &&
+ !(isOneByteCharacterType(CharTy1) && isOneByteCharacterType(CharTy2))) {
+ // FIXME: Consider using our bit_cast implementation to support this.
+ Info.FFDiag(E, diag::note_constexpr_memcmp_unsupported)
+ << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'")
+ << CharTy1 << CharTy2;
+ return false;
+ }
+
const auto &ReadCurElems = [&](APValue &Char1, APValue &Char2) {
return handleLValueToRValueConversion(Info, E, CharTy1, String1, Char1) &&
handleLValueToRValueConversion(Info, E, CharTy2, String2, Char2) &&
@@ -11051,57 +11456,6 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
HandleLValueArrayAdjustment(Info, E, String2, CharTy2, 1);
};
- if (IsRawByte) {
- uint64_t BytesRemaining = MaxLength;
- // Pointers to const void may point to objects of incomplete type.
- if (CharTy1->isIncompleteType()) {
- Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy1;
- return false;
- }
- if (CharTy2->isIncompleteType()) {
- Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy2;
- return false;
- }
- uint64_t CharTy1Width{Info.Ctx.getTypeSize(CharTy1)};
- CharUnits CharTy1Size = Info.Ctx.toCharUnitsFromBits(CharTy1Width);
- // Give up on comparing between elements with disparate widths.
- if (CharTy1Size != Info.Ctx.getTypeSizeInChars(CharTy2))
- return false;
- uint64_t BytesPerElement = CharTy1Size.getQuantity();
- assert(BytesRemaining && "BytesRemaining should not be zero: the "
- "following loop considers at least one element");
- while (true) {
- APValue Char1, Char2;
- if (!ReadCurElems(Char1, Char2))
- return false;
- // We have compatible in-memory widths, but a possible type and
- // (for `bool`) internal representation mismatch.
- // Assuming two's complement representation, including 0 for `false` and
- // 1 for `true`, we can check an appropriate number of elements for
- // equality even if they are not byte-sized.
- APSInt Char1InMem = Char1.getInt().extOrTrunc(CharTy1Width);
- APSInt Char2InMem = Char2.getInt().extOrTrunc(CharTy1Width);
- if (Char1InMem.ne(Char2InMem)) {
- // If the elements are byte-sized, then we can produce a three-way
- // comparison result in a straightforward manner.
- if (BytesPerElement == 1u) {
- // memcmp always compares unsigned chars.
- return Success(Char1InMem.ult(Char2InMem) ? -1 : 1, E);
- }
- // The result is byte-order sensitive, and we have multibyte elements.
- // FIXME: We can compare the remaining bytes in the correct order.
- return false;
- }
- if (!AdvanceElems())
- return false;
- if (BytesRemaining <= BytesPerElement)
- break;
- BytesRemaining -= BytesPerElement;
- }
- // Enough elements are equal to account for the memcmp limit.
- return Success(0, E);
- }
-
bool StopAtNull =
(BuiltinOp != Builtin::BImemcmp && BuiltinOp != Builtin::BIbcmp &&
BuiltinOp != Builtin::BIwmemcmp &&
@@ -11119,7 +11473,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
APValue Char1, Char2;
if (!ReadCurElems(Char1, Char2))
return false;
- if (Char1.getInt() != Char2.getInt()) {
+ if (Char1.getInt().ne(Char2.getInt())) {
if (IsWide) // wmemcmp compares with wchar_t signedness.
return Success(Char1.getInt() < Char2.getInt() ? -1 : 1, E);
// memcmp always compares unsigned chars.
@@ -12524,6 +12878,9 @@ bool IntExprEvaluator::VisitConceptSpecializationExpr(
return Success(E->isSatisfied(), E);
}
+bool IntExprEvaluator::VisitRequiresExpr(const RequiresExpr *E) {
+ return Success(E->isSatisfied(), E);
+}
bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
switch (E->getOpcode()) {
@@ -12566,8 +12923,14 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
return false;
bool Overflowed;
APFixedPoint Result = Src.convert(DestFXSema, &Overflowed);
- if (Overflowed && !HandleOverflow(Info, E, Result, DestType))
- return false;
+ if (Overflowed) {
+ if (Info.checkingForUndefinedBehavior())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_fixedpoint_constant_overflow)
+ << Result.toString() << E->getType();
+ else if (!HandleOverflow(Info, E, Result, E->getType()))
+ return false;
+ }
return Success(Result, E);
}
case CK_IntegralToFixedPoint: {
@@ -12579,8 +12942,14 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
APFixedPoint IntResult = APFixedPoint::getFromIntValue(
Src, Info.Ctx.getFixedPointSemantics(DestType), &Overflowed);
- if (Overflowed && !HandleOverflow(Info, E, IntResult, DestType))
- return false;
+ if (Overflowed) {
+ if (Info.checkingForUndefinedBehavior())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_fixedpoint_constant_overflow)
+ << IntResult.toString() << E->getType();
+ else if (!HandleOverflow(Info, E, IntResult, E->getType()))
+ return false;
+ }
return Success(IntResult, E);
}
@@ -12593,6 +12962,9 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
}
bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
const Expr *LHS = E->getLHS();
const Expr *RHS = E->getRHS();
FixedPointSemantics ResultFXSema =
@@ -12605,20 +12977,45 @@ bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (!EvaluateFixedPointOrInteger(RHS, RHSFX, Info))
return false;
+ bool OpOverflow = false, ConversionOverflow = false;
+ APFixedPoint Result(LHSFX.getSemantics());
switch (E->getOpcode()) {
case BO_Add: {
- bool AddOverflow, ConversionOverflow;
- APFixedPoint Result = LHSFX.add(RHSFX, &AddOverflow)
- .convert(ResultFXSema, &ConversionOverflow);
- if ((AddOverflow || ConversionOverflow) &&
- !HandleOverflow(Info, E, Result, E->getType()))
+ Result = LHSFX.add(RHSFX, &OpOverflow)
+ .convert(ResultFXSema, &ConversionOverflow);
+ break;
+ }
+ case BO_Sub: {
+ Result = LHSFX.sub(RHSFX, &OpOverflow)
+ .convert(ResultFXSema, &ConversionOverflow);
+ break;
+ }
+ case BO_Mul: {
+ Result = LHSFX.mul(RHSFX, &OpOverflow)
+ .convert(ResultFXSema, &ConversionOverflow);
+ break;
+ }
+ case BO_Div: {
+ if (RHSFX.getValue() == 0) {
+ Info.FFDiag(E, diag::note_expr_divide_by_zero);
return false;
- return Success(Result, E);
+ }
+ Result = LHSFX.div(RHSFX, &OpOverflow)
+ .convert(ResultFXSema, &ConversionOverflow);
+ break;
}
default:
return false;
}
- llvm_unreachable("Should've exited before this");
+ if (OpOverflow || ConversionOverflow) {
+ if (Info.checkingForUndefinedBehavior())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_fixedpoint_constant_overflow)
+ << Result.toString() << E->getType();
+ else if (!HandleOverflow(Info, E, Result, E->getType()))
+ return false;
+ }
+ return Success(Result, E);
}
//===----------------------------------------------------------------------===//
@@ -13470,7 +13867,7 @@ bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
// This is the only case where we need to produce an extension warning:
// the only other way we can succeed is if we find a dynamic allocation,
// and we will have warned when we allocated it in that case.
- if (!Info.getLangOpts().CPlusPlus2a)
+ if (!Info.getLangOpts().CPlusPlus20)
Info.CCEDiag(E, diag::note_constexpr_new);
return true;
}
@@ -13823,7 +14220,7 @@ bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx,
}
bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
- const ASTContext &Ctx) const {
+ const ASTContext &Ctx, bool InPlace) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
@@ -13831,7 +14228,14 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
EvalInfo Info(Ctx, Result, EM);
Info.InConstantContext = true;
- if (!::Evaluate(Result.Val, Info, this) || Result.HasSideEffects)
+ if (InPlace) {
+ Info.setEvaluatingDecl(this, Result.Val);
+ LValue LVal;
+ LVal.set(this);
+ if (!::EvaluateInPlace(Result.Val, Info, LVal, this) ||
+ Result.HasSideEffects)
+ return false;
+ } else if (!::Evaluate(Result.Val, Info, this) || Result.HasSideEffects)
return false;
if (!Info.discardCleanups())
@@ -13874,18 +14278,6 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
LValue LVal;
LVal.set(VD);
- // C++11 [basic.start.init]p2:
- // Variables with static storage duration or thread storage duration shall
- // be zero-initialized before any other initialization takes place.
- // This behavior is not present in C.
- if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() &&
- !DeclTy->isReferenceType()) {
- ImplicitValueInitExpr VIE(DeclTy);
- if (!EvaluateInPlace(Value, Info, LVal, &VIE,
- /*AllowNonLiteralTypes=*/true))
- return false;
- }
-
if (!EvaluateInPlace(Value, Info, LVal, this,
/*AllowNonLiteralTypes=*/true) ||
EStatus.HasSideEffects)
@@ -13904,14 +14296,17 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
bool VarDecl::evaluateDestruction(
SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
- assert(getEvaluatedValue() && !getEvaluatedValue()->isAbsent() &&
- "cannot evaluate destruction of non-constant-initialized variable");
-
Expr::EvalStatus EStatus;
EStatus.Diag = &Notes;
- // Make a copy of the value for the destructor to mutate.
- APValue DestroyedValue = *getEvaluatedValue();
+ // Make a copy of the value for the destructor to mutate, if we know it.
+ // Otherwise, treat the value as default-initialized; if the destructor works
+ // anyway, then the destruction is constant (and must be essentially empty).
+ APValue DestroyedValue;
+ if (getEvaluatedValue() && !getEvaluatedValue()->isAbsent())
+ DestroyedValue = *getEvaluatedValue();
+ else if (!getDefaultInitValue(getType(), DestroyedValue))
+ return false;
EvalInfo Info(getASTContext(), EStatus, EvalInfo::EM_ConstantExpression);
Info.setEvaluatingDecl(this, DestroyedValue,
@@ -13924,8 +14319,6 @@ bool VarDecl::evaluateDestruction(
LValue LVal;
LVal.set(this);
- // FIXME: Consider storing whether this variable has constant destruction in
- // the EvaluatedStmt so that CodeGen can query it.
if (!HandleDestruction(Info, DeclLoc, LVal.Base, DestroyedValue, DeclTy) ||
EStatus.HasSideEffects)
return false;
@@ -14073,7 +14466,10 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::ImaginaryLiteralClass:
case Expr::StringLiteralClass:
case Expr::ArraySubscriptExprClass:
+ case Expr::MatrixSubscriptExprClass:
case Expr::OMPArraySectionExprClass:
+ case Expr::OMPArrayShapingExprClass:
+ case Expr::OMPIteratorExprClass:
case Expr::MemberExprClass:
case Expr::CompoundAssignOperatorClass:
case Expr::CompoundLiteralExprClass:
@@ -14090,6 +14486,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::StmtExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CUDAKernelCallExprClass:
+ case Expr::CXXAddrspaceCastExprClass:
case Expr::CXXDynamicCastExprClass:
case Expr::CXXTypeidExprClass:
case Expr::CXXUuidofExprClass:
@@ -14104,6 +14501,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::CXXPseudoDestructorExprClass:
case Expr::UnresolvedLookupExprClass:
case Expr::TypoExprClass:
+ case Expr::RecoveryExprClass:
case Expr::DependentScopeDeclRefExprClass:
case Expr::CXXConstructExprClass:
case Expr::CXXInheritedCtorInitExprClass:
@@ -14182,6 +14580,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::CXXScalarValueInitExprClass:
case Expr::TypeTraitExprClass:
case Expr::ConceptSpecializationExprClass:
+ case Expr::RequiresExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::CXXNoexceptExprClass:
@@ -14627,6 +15026,15 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
if (FD->isDependentContext())
return true;
+ // Bail out if a constexpr constructor has an initializer that contains an
+ // error. We deliberately don't produce a diagnostic, as we have produced a
+ // relevant diagnostic when parsing the error initializer.
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(FD)) {
+ for (const auto *InitExpr : Ctor->inits()) {
+ if (InitExpr->getInit() && InitExpr->getInit()->containsErrors())
+ return false;
+ }
+ }
Expr::EvalStatus Status;
Status.Diag = &Diags;
diff --git a/clang/lib/AST/ExprObjC.cpp b/clang/lib/AST/ExprObjC.cpp
index 53d0e873f8c9..662bc325f12c 100644
--- a/clang/lib/AST/ExprObjC.cpp
+++ b/clang/lib/AST/ExprObjC.cpp
@@ -12,6 +12,8 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ComputeDependence.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/SelectorLocationsKind.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
@@ -25,20 +27,13 @@ using namespace clang;
ObjCArrayLiteral::ObjCArrayLiteral(ArrayRef<Expr *> Elements, QualType T,
ObjCMethodDecl *Method, SourceRange SR)
- : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
- false, false),
+ : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary),
NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method) {
Expr **SaveElements = getElements();
- for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
- if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Elements[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Elements[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I)
SaveElements[I] = Elements[I];
- }
+
+ setDependence(computeDependence(this));
}
ObjCArrayLiteral *ObjCArrayLiteral::Create(const ASTContext &C,
@@ -59,25 +54,13 @@ ObjCDictionaryLiteral::ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
bool HasPackExpansions, QualType T,
ObjCMethodDecl *method,
SourceRange SR)
- : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
- false, false),
+ : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary),
NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
DictWithObjectsMethod(method) {
KeyValuePair *KeyValues = getTrailingObjects<KeyValuePair>();
ExpansionData *Expansions =
HasPackExpansions ? getTrailingObjects<ExpansionData>() : nullptr;
for (unsigned I = 0; I < NumElements; I++) {
- if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
- VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
- ExprBits.ValueDependent = true;
- if (VK[I].Key->isInstantiationDependent() ||
- VK[I].Value->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (VK[I].EllipsisLoc.isInvalid() &&
- (VK[I].Key->containsUnexpandedParameterPack() ||
- VK[I].Value->containsUnexpandedParameterPack()))
- ExprBits.ContainsUnexpandedParameterPack = true;
-
KeyValues[I].Key = VK[I].Key;
KeyValues[I].Value = VK[I].Value;
if (Expansions) {
@@ -88,6 +71,7 @@ ObjCDictionaryLiteral::ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
Expansions[I].NumExpansionsPlusOne = 0;
}
}
+ setDependence(computeDependence(this));
}
ObjCDictionaryLiteral *
@@ -127,10 +111,7 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
SourceLocation RBracLoc, bool isImplicit)
- : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
- /*TypeDependent=*/false, /*ValueDependent=*/false,
- /*InstantiationDependent=*/false,
- /*ContainsUnexpandedParameterPack=*/false),
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary),
SelectorOrMethod(
reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
Kind(IsInstanceSuper ? SuperInstance : SuperClass),
@@ -139,6 +120,7 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
RBracLoc(RBracLoc) {
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(SuperType.getAsOpaquePtr());
+ setDependence(computeDependence(this));
}
ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
@@ -148,15 +130,14 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
SourceLocation RBracLoc, bool isImplicit)
- : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
- T->isDependentType(), T->isInstantiationDependentType(),
- T->containsUnexpandedParameterPack()),
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary),
SelectorOrMethod(
reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
Kind(Class), HasMethod(Method != nullptr), IsDelegateInitCall(false),
IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc) {
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(Receiver);
+ setDependence(computeDependence(this));
}
ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
@@ -165,16 +146,14 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method, ArrayRef<Expr *> Args,
SourceLocation RBracLoc, bool isImplicit)
- : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
- Receiver->isTypeDependent(), Receiver->isTypeDependent(),
- Receiver->isInstantiationDependent(),
- Receiver->containsUnexpandedParameterPack()),
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary),
SelectorOrMethod(
reinterpret_cast<uintptr_t>(Method ? Method : Sel.getAsOpaquePtr())),
Kind(Instance), HasMethod(Method != nullptr), IsDelegateInitCall(false),
IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc) {
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(Receiver);
+ setDependence(computeDependence(this));
}
void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
@@ -182,18 +161,8 @@ void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
SelectorLocationsKind SelLocsK) {
setNumArgs(Args.size());
Expr **MyArgs = getArgs();
- for (unsigned I = 0; I != Args.size(); ++I) {
- if (Args[I]->isTypeDependent())
- ExprBits.TypeDependent = true;
- if (Args[I]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Args[I]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Args[I]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
-
+ for (unsigned I = 0; I != Args.size(); ++I)
MyArgs[I] = Args[I];
- }
SelLocsKind = SelLocsK;
if (!isImplicit()) {
diff --git a/clang/lib/AST/ExternalASTSource.cpp b/clang/lib/AST/ExternalASTSource.cpp
index 837be5527fce..257833182621 100644
--- a/clang/lib/AST/ExternalASTSource.cpp
+++ b/clang/lib/AST/ExternalASTSource.cpp
@@ -15,9 +15,11 @@
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/None.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdint>
@@ -28,7 +30,7 @@ char ExternalASTSource::ID;
ExternalASTSource::~ExternalASTSource() = default;
-llvm::Optional<ExternalASTSource::ASTSourceDescriptor>
+llvm::Optional<ASTSourceDescriptor>
ExternalASTSource::getSourceDescriptor(unsigned ID) {
return None;
}
@@ -38,21 +40,6 @@ ExternalASTSource::hasExternalDefinitions(const Decl *D) {
return EK_ReplyHazy;
}
-ExternalASTSource::ASTSourceDescriptor::ASTSourceDescriptor(const Module &M)
- : Signature(M.Signature), ClangModule(&M) {
- if (M.Directory)
- Path = M.Directory->getName();
- if (auto *File = M.getASTFile())
- ASTFile = File->getName();
-}
-
-std::string ExternalASTSource::ASTSourceDescriptor::getModuleName() const {
- if (ClangModule)
- return ClangModule->Name;
- else
- return PCHModuleName;
-}
-
void ExternalASTSource::FindFileRegionDecls(FileID File, unsigned Offset,
unsigned Length,
SmallVectorImpl<Decl *> &Decls) {}
diff --git a/clang/lib/AST/FormatString.cpp b/clang/lib/AST/FormatString.cpp
index fcc0b3b11e25..83b952116a5e 100644
--- a/clang/lib/AST/FormatString.cpp
+++ b/clang/lib/AST/FormatString.cpp
@@ -419,7 +419,6 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
QualType pointeeTy = PT->getPointeeType();
if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
switch (BT->getKind()) {
- case BuiltinType::Void:
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::Char_S:
@@ -539,7 +538,7 @@ QualType ArgType::getRepresentativeType(ASTContext &C) const {
}
std::string ArgType::getRepresentativeTypeName(ASTContext &C) const {
- std::string S = getRepresentativeType(C).getAsString();
+ std::string S = getRepresentativeType(C).getAsString(C.getPrintingPolicy());
std::string Alias;
if (Name) {
diff --git a/clang/lib/AST/Interp/Boolean.h b/clang/lib/AST/Interp/Boolean.h
index 3e6c8b5da9f0..2baa717311bc 100644
--- a/clang/lib/AST/Interp/Boolean.h
+++ b/clang/lib/AST/Interp/Boolean.h
@@ -85,14 +85,13 @@ class Boolean {
static Boolean max(unsigned NumBits) { return Boolean(true); }
template <typename T>
- static typename std::enable_if<std::is_integral<T>::value, Boolean>::type
- from(T Value) {
+ static std::enable_if_t<std::is_integral<T>::value, Boolean> from(T Value) {
return Boolean(Value != 0);
}
template <unsigned SrcBits, bool SrcSign>
- static typename std::enable_if<SrcBits != 0, Boolean>::type from(
- Integral<SrcBits, SrcSign> Value) {
+ static std::enable_if_t<SrcBits != 0, Boolean>
+ from(Integral<SrcBits, SrcSign> Value) {
return Boolean(!Value.isZero());
}
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.h b/clang/lib/AST/Interp/ByteCodeExprGen.h
index 1d0e34fc991f..716f28551e58 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -21,6 +21,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/Optional.h"
namespace clang {
diff --git a/clang/lib/AST/Interp/Context.cpp b/clang/lib/AST/Interp/Context.cpp
index e7f9ba0f010a..3bfcdfcd4c58 100644
--- a/clang/lib/AST/Interp/Context.cpp
+++ b/clang/lib/AST/Interp/Context.cpp
@@ -17,6 +17,7 @@
#include "PrimType.h"
#include "Program.h"
#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
using namespace clang;
using namespace clang::interp;
diff --git a/clang/lib/AST/Interp/Disasm.cpp b/clang/lib/AST/Interp/Disasm.cpp
index e77a825eb1f2..293fdd4b3256 100644
--- a/clang/lib/AST/Interp/Disasm.cpp
+++ b/clang/lib/AST/Interp/Disasm.cpp
@@ -16,6 +16,7 @@
#include "Program.h"
#include "clang/AST/DeclCXX.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Format.h"
using namespace clang;
using namespace clang::interp;
diff --git a/clang/lib/AST/Interp/Integral.h b/clang/lib/AST/Interp/Integral.h
index 7cc788070de8..46cd611ee389 100644
--- a/clang/lib/AST/Interp/Integral.h
+++ b/clang/lib/AST/Interp/Integral.h
@@ -156,13 +156,12 @@ public:
}
template <typename T>
- static typename std::enable_if<std::is_integral<T>::value, Integral>::type
- from(T Value) {
+ static std::enable_if_t<std::is_integral<T>::value, Integral> from(T Value) {
return Integral(Value);
}
template <unsigned SrcBits, bool SrcSign>
- static typename std::enable_if<SrcBits != 0, Integral>::type
+ static std::enable_if_t<SrcBits != 0, Integral>
from(Integral<SrcBits, SrcSign> Value) {
return Integral(Value.V);
}
@@ -206,52 +205,52 @@ public:
private:
template <typename T>
- static typename std::enable_if<std::is_signed<T>::value, bool>::type
- CheckAddUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_signed<T>::value, bool> CheckAddUB(T A, T B,
+ T &R) {
return llvm::AddOverflow<T>(A, B, R);
}
template <typename T>
- static typename std::enable_if<std::is_unsigned<T>::value, bool>::type
- CheckAddUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckAddUB(T A, T B,
+ T &R) {
R = A + B;
return false;
}
template <typename T>
- static typename std::enable_if<std::is_signed<T>::value, bool>::type
- CheckSubUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_signed<T>::value, bool> CheckSubUB(T A, T B,
+ T &R) {
return llvm::SubOverflow<T>(A, B, R);
}
template <typename T>
- static typename std::enable_if<std::is_unsigned<T>::value, bool>::type
- CheckSubUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckSubUB(T A, T B,
+ T &R) {
R = A - B;
return false;
}
template <typename T>
- static typename std::enable_if<std::is_signed<T>::value, bool>::type
- CheckMulUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_signed<T>::value, bool> CheckMulUB(T A, T B,
+ T &R) {
return llvm::MulOverflow<T>(A, B, R);
}
template <typename T>
- static typename std::enable_if<std::is_unsigned<T>::value, bool>::type
- CheckMulUB(T A, T B, T &R) {
+ static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckMulUB(T A, T B,
+ T &R) {
R = A * B;
return false;
}
template <typename T, T Min, T Max>
- static typename std::enable_if<std::is_signed<T>::value, bool>::type
+ static std::enable_if_t<std::is_signed<T>::value, bool>
CheckRange(int64_t V) {
return Min <= V && V <= Max;
}
template <typename T, T Min, T Max>
- static typename std::enable_if<std::is_unsigned<T>::value, bool>::type
+ static std::enable_if_t<std::is_unsigned<T>::value, bool>
CheckRange(int64_t V) {
return V >= 0 && static_cast<uint64_t>(V) <= Max;
}
diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp
index 1a8109cedf76..cec3f6d6160e 100644
--- a/clang/lib/AST/Interp/Interp.cpp
+++ b/clang/lib/AST/Interp/Interp.cpp
@@ -334,7 +334,7 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, Function *F) {
const SourceLocation &Loc = S.Current->getLocation(OpPC);
if (F->isVirtual()) {
- if (!S.getLangOpts().CPlusPlus2a) {
+ if (!S.getLangOpts().CPlusPlus20) {
S.CCEDiag(Loc, diag::note_constexpr_virtual_call);
return false;
}
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index c12caa639da7..a63c5a871ba3 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -869,7 +869,7 @@ inline bool ShiftRight(InterpState &S, CodePtr OpPC, const T &V, unsigned RHS) {
template <PrimType TL, PrimType TR, typename T = typename PrimConv<TL>::T>
inline bool ShiftLeft(InterpState &S, CodePtr OpPC, const T &V, unsigned RHS) {
- if (V.isSigned() && !S.getLangOpts().CPlusPlus2a) {
+ if (V.isSigned() && !S.getLangOpts().CPlusPlus20) {
// C++11 [expr.shift]p2: A signed left shift must have a non-negative
// operand, and must not overflow the corresponding unsigned type.
// C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
diff --git a/clang/lib/AST/Interp/Block.cpp b/clang/lib/AST/Interp/InterpBlock.cpp
index 5fc93eb39f4e..ed6e8910194d 100644
--- a/clang/lib/AST/Interp/Block.cpp
+++ b/clang/lib/AST/Interp/InterpBlock.cpp
@@ -10,7 +10,7 @@
//
//===----------------------------------------------------------------------===//
-#include "Block.h"
+#include "InterpBlock.h"
#include "Pointer.h"
using namespace clang;
diff --git a/clang/lib/AST/Interp/Block.h b/clang/lib/AST/Interp/InterpBlock.h
index 97fb9a3ca096..0ccdef221c83 100644
--- a/clang/lib/AST/Interp/Block.h
+++ b/clang/lib/AST/Interp/InterpBlock.h
@@ -1,4 +1,4 @@
-//===--- Block.h - Allocated blocks for the interpreter ---------*- C++ -*-===//
+//===-- InterpBlock.h - Allocated blocks for the interpreter -*- C++ ----*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/clang/lib/AST/Interp/InterpFrame.h b/clang/lib/AST/Interp/InterpFrame.h
index b8391b0bcf92..304e2ad66537 100644
--- a/clang/lib/AST/Interp/InterpFrame.h
+++ b/clang/lib/AST/Interp/InterpFrame.h
@@ -45,16 +45,16 @@ public:
void popArgs();
/// Describes the frame with arguments for diagnostic purposes.
- void describe(llvm::raw_ostream &OS);
+ void describe(llvm::raw_ostream &OS) override;
/// Returns the parent frame object.
- Frame *getCaller() const;
+ Frame *getCaller() const override;
/// Returns the location of the call to the frame.
- SourceLocation getCallLocation() const;
+ SourceLocation getCallLocation() const override;
/// Returns the caller.
- const FunctionDecl *getCallee() const;
+ const FunctionDecl *getCallee() const override;
/// Returns the current function.
Function *getFunction() const { return Func; }
diff --git a/clang/lib/AST/Interp/Pointer.cpp b/clang/lib/AST/Interp/Pointer.cpp
index 1a10723aaca5..ef2638e2a36b 100644
--- a/clang/lib/AST/Interp/Pointer.cpp
+++ b/clang/lib/AST/Interp/Pointer.cpp
@@ -7,8 +7,8 @@
//===----------------------------------------------------------------------===//
#include "Pointer.h"
-#include "Block.h"
#include "Function.h"
+#include "InterpBlock.h"
#include "PrimType.h"
using namespace clang;
diff --git a/clang/lib/AST/Interp/Pointer.h b/clang/lib/AST/Interp/Pointer.h
index b8fa98e24faa..f2f6e0e76018 100644
--- a/clang/lib/AST/Interp/Pointer.h
+++ b/clang/lib/AST/Interp/Pointer.h
@@ -13,12 +13,12 @@
#ifndef LLVM_CLANG_AST_INTERP_POINTER_H
#define LLVM_CLANG_AST_INTERP_POINTER_H
-#include "Block.h"
#include "Descriptor.h"
+#include "InterpBlock.h"
+#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/ComparisonCategories.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/clang/lib/AST/Interp/Source.h b/clang/lib/AST/Interp/Source.h
index e591c3399d7c..19c652b7331a 100644
--- a/clang/lib/AST/Interp/Source.h
+++ b/clang/lib/AST/Interp/Source.h
@@ -56,14 +56,14 @@ private:
/// Helper to decode a value or a pointer.
template <typename T>
- static typename std::enable_if<!std::is_pointer<T>::value, T>::type
+ static std::enable_if_t<!std::is_pointer<T>::value, T>
ReadHelper(const char *Ptr) {
using namespace llvm::support;
return endian::read<T, endianness::native, 1>(Ptr);
}
template <typename T>
- static typename std::enable_if<std::is_pointer<T>::value, T>::type
+ static std::enable_if_t<std::is_pointer<T>::value, T>
ReadHelper(const char *Ptr) {
using namespace llvm::support;
auto Punned = endian::read<uintptr_t, endianness::native, 1>(Ptr);
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index 0d567edac521..ddfbe9f86499 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -13,6 +13,7 @@
// http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
//
//===----------------------------------------------------------------------===//
+
#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
@@ -22,10 +23,12 @@
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ABI.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
@@ -124,8 +127,9 @@ class ItaniumMangleContextImpl : public ItaniumMangleContext {
public:
explicit ItaniumMangleContextImpl(ASTContext &Context,
- DiagnosticsEngine &Diags)
- : ItaniumMangleContext(Context, Diags) {}
+ DiagnosticsEngine &Diags,
+ bool IsUniqueNameMangler)
+ : ItaniumMangleContext(Context, Diags, IsUniqueNameMangler) {}
/// @name Mangler Entry Points
/// @{
@@ -134,7 +138,7 @@ public:
bool shouldMangleStringLiteral(const StringLiteral *) override {
return false;
}
- void mangleCXXName(const NamedDecl *D, raw_ostream &) override;
+ void mangleCXXName(GlobalDecl GD, raw_ostream &) override;
void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
raw_ostream &) override;
void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
@@ -149,10 +153,6 @@ public:
void mangleCXXRTTI(QualType T, raw_ostream &) override;
void mangleCXXRTTIName(QualType T, raw_ostream &) override;
void mangleTypeName(QualType T, raw_ostream &) override;
- void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- raw_ostream &) override;
- void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- raw_ostream &) override;
void mangleCXXCtorComdat(const CXXConstructorDecl *D, raw_ostream &) override;
void mangleCXXDtorComdat(const CXXDestructorDecl *D, raw_ostream &) override;
@@ -160,6 +160,7 @@ public:
void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
void mangleDynamicAtExitDestructor(const VarDecl *D,
raw_ostream &Out) override;
+ void mangleDynamicStermFinalizer(const VarDecl *D, raw_ostream &Out) override;
void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl,
raw_ostream &Out) override;
void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
@@ -416,14 +417,14 @@ public:
void disableDerivedAbiTags() { DisableDerivedAbiTags = true; }
static bool shouldHaveAbiTags(ItaniumMangleContextImpl &C, const VarDecl *VD);
- void mangle(const NamedDecl *D);
+ void mangle(GlobalDecl GD);
void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
void mangleNumber(const llvm::APSInt &I);
void mangleNumber(int64_t Number);
void mangleFloat(const llvm::APFloat &F);
- void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleFunctionEncoding(GlobalDecl GD);
void mangleSeqID(unsigned SeqID);
- void mangleName(const NamedDecl *ND);
+ void mangleName(GlobalDecl GD);
void mangleType(QualType T);
void mangleNameOrStandardSubstitution(const NamedDecl *ND);
void mangleLambdaSig(const CXXRecordDecl *Lambda);
@@ -460,38 +461,39 @@ private:
void mangleFunctionEncodingBareType(const FunctionDecl *FD);
- void mangleNameWithAbiTags(const NamedDecl *ND,
+ void mangleNameWithAbiTags(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
void mangleModuleName(const Module *M);
void mangleModuleNamePrefix(StringRef Name);
void mangleTemplateName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
- void mangleUnqualifiedName(const NamedDecl *ND,
+ void mangleUnqualifiedName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags) {
- mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity,
+ mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName(), UnknownArity,
AdditionalAbiTags);
}
- void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
+ void mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name,
unsigned KnownArity,
const AbiTagList *AdditionalAbiTags);
- void mangleUnscopedName(const NamedDecl *ND,
+ void mangleUnscopedName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
- void mangleUnscopedTemplateName(const TemplateDecl *ND,
+ void mangleUnscopedTemplateName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
void mangleUnscopedTemplateName(TemplateName,
const AbiTagList *AdditionalAbiTags);
void mangleSourceName(const IdentifierInfo *II);
void mangleRegCallName(const IdentifierInfo *II);
+ void mangleDeviceStubName(const IdentifierInfo *II);
void mangleSourceNameWithAbiTags(
const NamedDecl *ND, const AbiTagList *AdditionalAbiTags = nullptr);
- void mangleLocalName(const Decl *D,
+ void mangleLocalName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
void mangleBlockForPrefix(const BlockDecl *Block);
void mangleUnqualifiedBlock(const BlockDecl *Block);
void mangleTemplateParamDecl(const NamedDecl *Decl);
void mangleLambda(const CXXRecordDecl *Lambda);
- void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
+ void mangleNestedName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags,
bool NoFunction=false);
void mangleNestedName(const TemplateDecl *TD,
@@ -500,7 +502,7 @@ private:
void manglePrefix(NestedNameSpecifier *qualifier);
void manglePrefix(const DeclContext *DC, bool NoFunction=false);
void manglePrefix(QualType type);
- void mangleTemplatePrefix(const TemplateDecl *ND, bool NoFunction=false);
+ void mangleTemplatePrefix(GlobalDecl GD, bool NoFunction=false);
void mangleTemplatePrefix(TemplateName Template);
bool mangleUnresolvedTypeOrSimpleId(QualType DestroyedType,
StringRef Prefix = "");
@@ -639,34 +641,40 @@ void CXXNameMangler::mangleSourceNameWithAbiTags(
writeAbiTags(ND, AdditionalAbiTags);
}
-void CXXNameMangler::mangle(const NamedDecl *D) {
+void CXXNameMangler::mangle(GlobalDecl GD) {
// <mangled-name> ::= _Z <encoding>
// ::= <data name>
// ::= <special-name>
Out << "_Z";
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- mangleFunctionEncoding(FD);
- else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (isa<FunctionDecl>(GD.getDecl()))
+ mangleFunctionEncoding(GD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(GD.getDecl()))
mangleName(VD);
- else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(D))
+ else if (const IndirectFieldDecl *IFD =
+ dyn_cast<IndirectFieldDecl>(GD.getDecl()))
mangleName(IFD->getAnonField());
+ else if (const FieldDecl *FD = dyn_cast<FieldDecl>(GD.getDecl()))
+ mangleName(FD);
+ else if (const MSGuidDecl *GuidD = dyn_cast<MSGuidDecl>(GD.getDecl()))
+ mangleName(GuidD);
else
- mangleName(cast<FieldDecl>(D));
+ llvm_unreachable("unexpected kind of global decl");
}
-void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+void CXXNameMangler::mangleFunctionEncoding(GlobalDecl GD) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
// <encoding> ::= <function name> <bare-function-type>
// Don't mangle in the type if this isn't a decl we should typically mangle.
if (!Context.shouldMangleDeclName(FD)) {
- mangleName(FD);
+ mangleName(GD);
return;
}
AbiTagList ReturnTypeAbiTags = makeFunctionReturnTypeTags(FD);
if (ReturnTypeAbiTags.empty()) {
// There are no tags for return type, the simplest case.
- mangleName(FD);
+ mangleName(GD);
mangleFunctionEncodingBareType(FD);
return;
}
@@ -786,13 +794,14 @@ static bool isStdNamespace(const DeclContext *DC) {
return isStd(cast<NamespaceDecl>(DC));
}
-static const TemplateDecl *
-isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+static const GlobalDecl
+isTemplate(GlobalDecl GD, const TemplateArgumentList *&TemplateArgs) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// Check if we have a function template.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
TemplateArgs = FD->getTemplateSpecializationArgs();
- return TD;
+ return GD.getWithDecl(TD);
}
}
@@ -800,20 +809,21 @@ isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
if (const ClassTemplateSpecializationDecl *Spec =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
- return Spec->getSpecializedTemplate();
+ return GD.getWithDecl(Spec->getSpecializedTemplate());
}
// Check if we have a variable template.
if (const VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
- return Spec->getSpecializedTemplate();
+ return GD.getWithDecl(Spec->getSpecializedTemplate());
}
- return nullptr;
+ return GlobalDecl();
}
-void CXXNameMangler::mangleName(const NamedDecl *ND) {
+void CXXNameMangler::mangleName(GlobalDecl GD) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// Variables should have implicit tags from its type.
AbiTagList VariableTypeAbiTags = makeVariableTypeTags(VD);
@@ -842,12 +852,13 @@ void CXXNameMangler::mangleName(const NamedDecl *ND) {
// Output name with implicit tags.
mangleNameWithAbiTags(VD, &AdditionalAbiTags);
} else {
- mangleNameWithAbiTags(ND, nullptr);
+ mangleNameWithAbiTags(GD, nullptr);
}
}
-void CXXNameMangler::mangleNameWithAbiTags(const NamedDecl *ND,
+void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <name> ::= [<module-name>] <nested-name>
// ::= [<module-name>] <unscoped-name>
// ::= [<module-name>] <unscoped-template-name> <template-args>
@@ -863,14 +874,14 @@ void CXXNameMangler::mangleNameWithAbiTags(const NamedDecl *ND,
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = getEffectiveParentContext(DC);
else if (GetLocalClassDecl(ND)) {
- mangleLocalName(ND, AdditionalAbiTags);
+ mangleLocalName(GD, AdditionalAbiTags);
return;
}
DC = IgnoreLinkageSpecDecls(DC);
if (isLocalContainerContext(DC)) {
- mangleLocalName(ND, AdditionalAbiTags);
+ mangleLocalName(GD, AdditionalAbiTags);
return;
}
@@ -885,17 +896,17 @@ void CXXNameMangler::mangleNameWithAbiTags(const NamedDecl *ND,
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
- if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
mangleUnscopedTemplateName(TD, AdditionalAbiTags);
mangleTemplateArgs(*TemplateArgs);
return;
}
- mangleUnscopedName(ND, AdditionalAbiTags);
+ mangleUnscopedName(GD, AdditionalAbiTags);
return;
}
- mangleNestedName(ND, DC, AdditionalAbiTags);
+ mangleNestedName(GD, DC, AdditionalAbiTags);
}
void CXXNameMangler::mangleModuleName(const Module *M) {
@@ -946,19 +957,21 @@ void CXXNameMangler::mangleTemplateName(const TemplateDecl *TD,
}
}
-void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND,
+void CXXNameMangler::mangleUnscopedName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <unscoped-name> ::= <unqualified-name>
// ::= St <unqualified-name> # ::std::
if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND))))
Out << "St";
- mangleUnqualifiedName(ND, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, AdditionalAbiTags);
}
void CXXNameMangler::mangleUnscopedTemplateName(
- const TemplateDecl *ND, const AbiTagList *AdditionalAbiTags) {
+ GlobalDecl GD, const AbiTagList *AdditionalAbiTags) {
+ const TemplateDecl *ND = cast<TemplateDecl>(GD.getDecl());
// <unscoped-template-name> ::= <unscoped-name>
// ::= <substitution>
if (mangleSubstitution(ND))
@@ -970,9 +983,9 @@ void CXXNameMangler::mangleUnscopedTemplateName(
"template template param cannot have abi tags");
mangleTemplateParameter(TTP->getDepth(), TTP->getIndex());
} else if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND)) {
- mangleUnscopedName(ND, AdditionalAbiTags);
+ mangleUnscopedName(GD, AdditionalAbiTags);
} else {
- mangleUnscopedName(ND->getTemplatedDecl(), AdditionalAbiTags);
+ mangleUnscopedName(GD.getWithDecl(ND->getTemplatedDecl()), AdditionalAbiTags);
}
addSubstitution(ND);
@@ -1249,10 +1262,11 @@ void CXXNameMangler::mangleUnresolvedName(
mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
}
-void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
DeclarationName Name,
unsigned KnownArity,
const AbiTagList *AdditionalAbiTags) {
+ const NamedDecl *ND = cast_or_null<NamedDecl>(GD.getDecl());
unsigned Arity = KnownArity;
// <unqualified-name> ::= <operator-name>
// ::= <ctor-dtor-name>
@@ -1278,6 +1292,16 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
break;
}
+ if (auto *GD = dyn_cast<MSGuidDecl>(ND)) {
+ // We follow MSVC in mangling GUID declarations as if they were variables
+ // with a particular reserved name. Continue the pretense here.
+ SmallString<sizeof("_GUID_12345678_1234_1234_1234_1234567890ab")> GUID;
+ llvm::raw_svector_ostream GUIDOS(GUID);
+ Context.mangleMSGuidDecl(GD, GUIDOS);
+ Out << GUID.size() << GUID;
+ break;
+ }
+
if (II) {
// Match GCC's naming convention for internal linkage symbols, for
// symbols that are not actually visible outside of this TU. GCC
@@ -1302,7 +1326,12 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
bool IsRegCall = FD &&
FD->getType()->castAs<FunctionType>()->getCallConv() ==
clang::CC_X86RegCall;
- if (IsRegCall)
+ bool IsDeviceStub =
+ FD && FD->hasAttr<CUDAGlobalAttr>() &&
+ GD.getKernelReferenceKind() == KernelReferenceKind::Stub;
+ if (IsDeviceStub)
+ mangleDeviceStubName(II);
+ else if (IsRegCall)
mangleRegCallName(II);
else
mangleSourceName(II);
@@ -1380,7 +1409,8 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// <lambda-sig> ::= <template-param-decl>* <parameter-type>+
// # Parameter types or 'v' for 'void'.
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
- if (Record->isLambda() && Record->getLambdaManglingNumber()) {
+ if (Record->isLambda() && (Record->getLambdaManglingNumber() ||
+ Context.isUniqueNameMangler())) {
assert(!AdditionalAbiTags &&
"Lambda type cannot have additional abi tags");
mangleLambda(Record);
@@ -1491,6 +1521,14 @@ void CXXNameMangler::mangleRegCallName(const IdentifierInfo *II) {
<< II->getName();
}
+void CXXNameMangler::mangleDeviceStubName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> __device_stub__ <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() + sizeof("__device_stub__") - 1 << "__device_stub__"
+ << II->getName();
+}
+
void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
// <source-name> ::= <positive length number> <identifier>
// <number> ::= [n] <non-negative decimal integer>
@@ -1498,10 +1536,11 @@ void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
Out << II->getLength() << II->getName();
}
-void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
+void CXXNameMangler::mangleNestedName(GlobalDecl GD,
const DeclContext *DC,
const AbiTagList *AdditionalAbiTags,
bool NoFunction) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <nested-name>
// ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix> <unqualified-name> E
// ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
@@ -1519,13 +1558,13 @@ void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
- if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
mangleTemplatePrefix(TD, NoFunction);
mangleTemplateArgs(*TemplateArgs);
}
else {
manglePrefix(DC, NoFunction);
- mangleUnqualifiedName(ND, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, AdditionalAbiTags);
}
Out << 'E';
@@ -1543,8 +1582,24 @@ void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
Out << 'E';
}
-void CXXNameMangler::mangleLocalName(const Decl *D,
+static GlobalDecl getParentOfLocalEntity(const DeclContext *DC) {
+ GlobalDecl GD;
+ // The Itanium spec says:
+ // For entities in constructors and destructors, the mangling of the
+ // complete object constructor or destructor is used as the base function
+ // name, i.e. the C1 or D1 version.
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ GD = GlobalDecl(CD, Ctor_Complete);
+ else if (auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ GD = GlobalDecl(DD, Dtor_Complete);
+ else
+ GD = GlobalDecl(cast<FunctionDecl>(DC));
+ return GD;
+}
+
+void CXXNameMangler::mangleLocalName(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags) {
+ const Decl *D = GD.getDecl();
// <local-name> := Z <function encoding> E <entity name> [<discriminator>]
// := Z <function encoding> E s [<discriminator>]
// <local-name> := Z <function encoding> E d [ <parameter number> ]
@@ -1564,7 +1619,7 @@ void CXXNameMangler::mangleLocalName(const Decl *D,
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC))
mangleBlockForPrefix(BD);
else
- mangleFunctionEncoding(cast<FunctionDecl>(DC));
+ mangleFunctionEncoding(getParentOfLocalEntity(DC));
// Implicit ABI tags (from namespace) are not available in the following
// entity; reset to actually emitted tags, which are available.
@@ -1607,7 +1662,7 @@ void CXXNameMangler::mangleLocalName(const Decl *D,
mangleUnqualifiedBlock(BD);
} else {
const NamedDecl *ND = cast<NamedDecl>(D);
- mangleNestedName(ND, getEffectiveDeclContext(ND), AdditionalAbiTags,
+ mangleNestedName(GD, getEffectiveDeclContext(ND), AdditionalAbiTags,
true /*NoFunction*/);
}
} else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
@@ -1628,7 +1683,7 @@ void CXXNameMangler::mangleLocalName(const Decl *D,
assert(!AdditionalAbiTags && "Block cannot have additional abi tags");
mangleUnqualifiedBlock(BD);
} else {
- mangleUnqualifiedName(cast<NamedDecl>(D), AdditionalAbiTags);
+ mangleUnqualifiedName(GD, AdditionalAbiTags);
}
if (const NamedDecl *ND = dyn_cast<NamedDecl>(RD ? RD : D)) {
@@ -1730,6 +1785,37 @@ void CXXNameMangler::mangleTemplateParamDecl(const NamedDecl *Decl) {
}
}
+// Handles the __builtin_unique_stable_name feature for lambdas. Instead of the
+// ordinal of the lambda in its mangling, this does line/column to uniquely and
+// reliably identify the lambda. Additionally, macro expansions are expressed
+// as well to prevent macros causing duplicates.
+static void mangleUniqueNameLambda(CXXNameMangler &Mangler, SourceManager &SM,
+ raw_ostream &Out,
+ const CXXRecordDecl *Lambda) {
+ SourceLocation Loc = Lambda->getLocation();
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ Mangler.mangleNumber(PLoc.getLine());
+ Out << "_";
+ Mangler.mangleNumber(PLoc.getColumn());
+
+ while(Loc.isMacroID()) {
+ SourceLocation SLToPrint = Loc;
+ if (SM.isMacroArgExpansion(Loc))
+ SLToPrint = SM.getImmediateExpansionRange(Loc).getBegin();
+
+ PLoc = SM.getPresumedLoc(SM.getSpellingLoc(SLToPrint));
+ Out << "m";
+ Mangler.mangleNumber(PLoc.getLine());
+ Out << "_";
+ Mangler.mangleNumber(PLoc.getColumn());
+
+ Loc = SM.getImmediateMacroCallerLoc(Loc);
+ if (Loc.isFileID())
+ Loc = SM.getImmediateMacroCallerLoc(SLToPrint);
+ }
+}
+
void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
// If the context of a closure type is an initializer for a class member
// (static or nonstatic), it is encoded in a qualified name with a final
@@ -1760,6 +1846,12 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
mangleLambdaSig(Lambda);
Out << "E";
+ if (Context.isUniqueNameMangler()) {
+ mangleUniqueNameLambda(
+ *this, Context.getASTContext().getSourceManager(), Out, Lambda);
+ return;
+ }
+
// The number is omitted for the first closure type with a given
// <lambda-sig> in a given context; it is n-2 for the nth closure type
// (in lexical order) with that same <lambda-sig> and context.
@@ -1775,8 +1867,8 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
void CXXNameMangler::mangleLambdaSig(const CXXRecordDecl *Lambda) {
for (auto *D : Lambda->getLambdaExplicitTemplateParameters())
mangleTemplateParamDecl(D);
- const FunctionProtoType *Proto = Lambda->getLambdaTypeInfo()->getType()->
- getAs<FunctionProtoType>();
+ auto *Proto =
+ Lambda->getLambdaTypeInfo()->getType()->castAs<FunctionProtoType>();
mangleBareFunctionType(Proto, /*MangleReturnType=*/false,
Lambda->getLambdaStaticInvoker());
}
@@ -1839,7 +1931,7 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
- if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ if (GlobalDecl TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD);
mangleTemplateArgs(*TemplateArgs);
} else {
@@ -1862,7 +1954,7 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
if (OverloadedTemplateStorage *Overloaded
= Template.getAsOverloadedTemplate()) {
- mangleUnqualifiedName(nullptr, (*Overloaded->begin())->getDeclName(),
+ mangleUnqualifiedName(GlobalDecl(), (*Overloaded->begin())->getDeclName(),
UnknownArity, nullptr);
return;
}
@@ -1874,8 +1966,9 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
mangleUnscopedTemplateName(Template, /* AdditionalAbiTags */ nullptr);
}
-void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND,
+void CXXNameMangler::mangleTemplatePrefix(GlobalDecl GD,
bool NoFunction) {
+ const TemplateDecl *ND = cast<TemplateDecl>(GD.getDecl());
// <template-prefix> ::= <prefix> <template unqualified-name>
// ::= <template-param>
// ::= <substitution>
@@ -1891,9 +1984,9 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND,
} else {
manglePrefix(getEffectiveDeclContext(ND), NoFunction);
if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND))
- mangleUnqualifiedName(ND, nullptr);
+ mangleUnqualifiedName(GD, nullptr);
else
- mangleUnqualifiedName(ND->getTemplatedDecl(), nullptr);
+ mangleUnqualifiedName(GD.getWithDecl(ND->getTemplatedDecl()), nullptr);
}
addSubstitution(ND);
@@ -1987,6 +2080,8 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::DependentSizedMatrix:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Paren:
@@ -2001,6 +2096,8 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Atomic:
case Type::Pipe:
case Type::MacroQualified:
+ case Type::ExtInt:
+ case Type::DependentExtInt:
llvm_unreachable("type is illegal as a nested name specifier");
case Type::SubstTemplateTypeParmPack:
@@ -2668,6 +2765,11 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << TI->getFloat128Mangling();
break;
}
+ case BuiltinType::BFloat16: {
+ const TargetInfo *TI = &getASTContext().getTargetInfo();
+ Out << TI->getBFloat16Mangling();
+ break;
+ }
case BuiltinType::NullPtr:
Out << "Dn";
break;
@@ -2719,10 +2821,18 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
// The SVE types are effectively target-specific. The mangling scheme
// is defined in the appendices to the Procedure Call Standard for the
// Arm Architecture.
-#define SVE_TYPE(Name, Id, SingletonId) \
- case BuiltinType::Id: \
- type_name = Name; \
- Out << 'u' << type_name.size() << type_name; \
+#define SVE_VECTOR_TYPE(InternalName, MangledName, Id, SingletonId, NumEls, \
+ ElBits, IsSigned, IsFP, IsBF) \
+ case BuiltinType::Id: \
+ type_name = MangledName; \
+ Out << (type_name == InternalName ? "u" : "") << type_name.size() \
+ << type_name; \
+ break;
+#define SVE_PREDICATE_TYPE(InternalName, MangledName, Id, SingletonId, NumEls) \
+ case BuiltinType::Id: \
+ type_name = MangledName; \
+ Out << (type_name == InternalName ? "u" : "") << type_name.size() \
+ << type_name; \
break;
#include "clang/Basic/AArch64SVEACLETypes.def"
}
@@ -3066,6 +3176,7 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
case BuiltinType::UShort:
EltName = "poly16_t";
break;
+ case BuiltinType::LongLong:
case BuiltinType::ULongLong:
EltName = "poly64_t";
break;
@@ -3083,7 +3194,8 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
case BuiltinType::ULongLong: EltName = "uint64_t"; break;
case BuiltinType::Double: EltName = "float64_t"; break;
case BuiltinType::Float: EltName = "float32_t"; break;
- case BuiltinType::Half: EltName = "float16_t";break;
+ case BuiltinType::Half: EltName = "float16_t"; break;
+ case BuiltinType::BFloat16: EltName = "bfloat16_t"; break;
default:
llvm_unreachable("unexpected Neon vector element type");
}
@@ -3135,6 +3247,8 @@ static StringRef mangleAArch64VectorBase(const BuiltinType *EltType) {
return "Float32";
case BuiltinType::Double:
return "Float64";
+ case BuiltinType::BFloat16:
+ return "BFloat16";
default:
llvm_unreachable("Unexpected vector element base type");
}
@@ -3249,6 +3363,31 @@ void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
mangleType(T->getElementType());
}
+void CXXNameMangler::mangleType(const ConstantMatrixType *T) {
+ // Mangle matrix types using a vendor extended type qualifier:
+ // U<Len>matrix_type<Rows><Columns><element type>
+ StringRef VendorQualifier = "matrix_type";
+ Out << "U" << VendorQualifier.size() << VendorQualifier;
+ auto &ASTCtx = getASTContext();
+ unsigned BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
+ llvm::APSInt Rows(BitWidth);
+ Rows = T->getNumRows();
+ mangleIntegerLiteral(ASTCtx.getSizeType(), Rows);
+ llvm::APSInt Columns(BitWidth);
+ Columns = T->getNumColumns();
+ mangleIntegerLiteral(ASTCtx.getSizeType(), Columns);
+ mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const DependentSizedMatrixType *T) {
+ // U<Len>matrix_type<row expr><column expr><element type>
+ StringRef VendorQualifier = "matrix_type";
+ Out << "U" << VendorQualifier.size() << VendorQualifier;
+ mangleTemplateArg(T->getRowExpr());
+ mangleTemplateArg(T->getColumnExpr());
+ mangleType(T->getElementType());
+}
+
void CXXNameMangler::mangleType(const DependentAddressSpaceType *T) {
SplitQualType split = T->getPointeeType().split();
mangleQualifiers(split.Quals, T);
@@ -3459,6 +3598,28 @@ void CXXNameMangler::mangleType(const PipeType *T) {
Out << "8ocl_pipe";
}
+void CXXNameMangler::mangleType(const ExtIntType *T) {
+ Out << "U7_ExtInt";
+ llvm::APSInt BW(32, true);
+ BW = T->getNumBits();
+ TemplateArgument TA(Context.getASTContext(), BW, getASTContext().IntTy);
+ mangleTemplateArgs(&TA, 1);
+ if (T->isUnsigned())
+ Out << "j";
+ else
+ Out << "i";
+}
+
+void CXXNameMangler::mangleType(const DependentExtIntType *T) {
+ Out << "U7_ExtInt";
+ TemplateArgument TA(T->getNumBitsExpr());
+ mangleTemplateArgs(&TA, 1);
+ if (T->isUnsigned())
+ Out << "j";
+ else
+ Out << "i";
+}
+
void CXXNameMangler::mangleIntegerLiteral(QualType T,
const llvm::APSInt &Value) {
// <expr-primary> ::= L <type> <value number> E # integer literal
@@ -3633,8 +3794,11 @@ recurse:
case Expr::LambdaExprClass:
case Expr::MSPropertyRefExprClass:
case Expr::MSPropertySubscriptExprClass:
- case Expr::TypoExprClass: // This should no longer exist in the AST by now.
+ case Expr::TypoExprClass: // This should no longer exist in the AST by now.
+ case Expr::RecoveryExprClass:
case Expr::OMPArraySectionExprClass:
+ case Expr::OMPArrayShapingExprClass:
+ case Expr::OMPIteratorExprClass:
case Expr::CXXInheritedCtorInitExprClass:
llvm_unreachable("unexpected statement kind");
@@ -3668,6 +3832,7 @@ recurse:
case Expr::ConvertVectorExprClass:
case Expr::StmtExprClass:
case Expr::TypeTraitExprClass:
+ case Expr::RequiresExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::VAArgExprClass:
@@ -4087,6 +4252,15 @@ recurse:
break;
}
+ case Expr::MatrixSubscriptExprClass: {
+ const MatrixSubscriptExpr *ME = cast<MatrixSubscriptExpr>(E);
+ Out << "ixix";
+ mangleExpression(ME->getBase());
+ mangleExpression(ME->getRowIdx());
+ mangleExpression(ME->getColumnIdx());
+ break;
+ }
+
case Expr::CompoundAssignOperatorClass: // fallthrough
case Expr::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(E);
@@ -4170,6 +4344,9 @@ recurse:
case Expr::CXXConstCastExprClass:
mangleCastExpression(E, "cc");
break;
+ case Expr::CXXAddrspaceCastExprClass:
+ mangleCastExpression(E, "ac");
+ break;
case Expr::CXXOperatorCallExprClass: {
const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
@@ -4941,45 +5118,42 @@ bool CXXNameMangler::shouldHaveAbiTags(ItaniumMangleContextImpl &C,
/// and this routine will return false. In this case, the caller should just
/// emit the identifier of the declaration (\c D->getIdentifier()) as its
/// name.
-void ItaniumMangleContextImpl::mangleCXXName(const NamedDecl *D,
+void ItaniumMangleContextImpl::mangleCXXName(GlobalDecl GD,
raw_ostream &Out) {
+ const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
"Invalid mangleName() call, argument is not a variable or function!");
- assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
- "Invalid mangleName() call on 'structor decl!");
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
getASTContext().getSourceManager(),
"Mangling declaration");
- CXXNameMangler Mangler(*this, Out, D);
- Mangler.mangle(D);
-}
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(D)) {
+ auto Type = GD.getCtorType();
+ CXXNameMangler Mangler(*this, Out, CD, Type);
+ return Mangler.mangle(GlobalDecl(CD, Type));
+ }
-void ItaniumMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D,
- CXXCtorType Type,
- raw_ostream &Out) {
- CXXNameMangler Mangler(*this, Out, D, Type);
- Mangler.mangle(D);
-}
+ if (auto *DD = dyn_cast<CXXDestructorDecl>(D)) {
+ auto Type = GD.getDtorType();
+ CXXNameMangler Mangler(*this, Out, DD, Type);
+ return Mangler.mangle(GlobalDecl(DD, Type));
+ }
-void ItaniumMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
- CXXDtorType Type,
- raw_ostream &Out) {
- CXXNameMangler Mangler(*this, Out, D, Type);
- Mangler.mangle(D);
+ CXXNameMangler Mangler(*this, Out, D);
+ Mangler.mangle(GD);
}
void ItaniumMangleContextImpl::mangleCXXCtorComdat(const CXXConstructorDecl *D,
raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out, D, Ctor_Comdat);
- Mangler.mangle(D);
+ Mangler.mangle(GlobalDecl(D, Ctor_Comdat));
}
void ItaniumMangleContextImpl::mangleCXXDtorComdat(const CXXDestructorDecl *D,
raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out, D, Dtor_Comdat);
- Mangler.mangle(D);
+ Mangler.mangle(GlobalDecl(D, Dtor_Comdat));
}
void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
@@ -5023,7 +5197,7 @@ void ItaniumMangleContextImpl::mangleCXXDtorThunk(
Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
ThisAdjustment.Virtual.Itanium.VCallOffsetOffset);
- Mangler.mangleFunctionEncoding(DD);
+ Mangler.mangleFunctionEncoding(GlobalDecl(DD, Type));
}
/// Returns the mangled name for a guard variable for the passed in VarDecl.
@@ -5057,6 +5231,18 @@ void ItaniumMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D,
Mangler.getStream() << D->getName();
}
+void ItaniumMangleContextImpl::mangleDynamicStermFinalizer(const VarDecl *D,
+ raw_ostream &Out) {
+ // Clang generates these internal-linkage functions as part of its
+ // implementation of the XL ABI.
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "__finalize_";
+ if (shouldMangleDeclName(D))
+ Mangler.mangle(D);
+ else
+ Mangler.getStream() << D->getName();
+}
+
void ItaniumMangleContextImpl::mangleSEHFilterExpression(
const NamedDecl *EnclosingDecl, raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out);
@@ -5165,7 +5351,8 @@ void ItaniumMangleContextImpl::mangleLambdaSig(const CXXRecordDecl *Lambda,
Mangler.mangleLambdaSig(Lambda);
}
-ItaniumMangleContext *
-ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
- return new ItaniumMangleContextImpl(Context, Diags);
+ItaniumMangleContext *ItaniumMangleContext::create(ASTContext &Context,
+ DiagnosticsEngine &Diags,
+ bool IsUniqueNameMangler) {
+ return new ItaniumMangleContextImpl(Context, Diags, IsUniqueNameMangler);
}
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index c30b07137edc..4bd00ece86ab 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -1,4 +1,6 @@
#include "clang/AST/JSONNodeDumper.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/StringSwitch.h"
@@ -72,6 +74,7 @@ void JSONNodeDumper::Visit(const Type *T) {
JOS.attribute("kind", (llvm::Twine(T->getTypeClassName()) + "Type").str());
JOS.attribute("type", createQualType(QualType(T, 0), /*Desugar*/ false));
+ attributeOnlyIfTrue("containsErrors", T->containsErrors());
attributeOnlyIfTrue("isDependent", T->isDependentType());
attributeOnlyIfTrue("isInstantiationDependent",
T->isInstantiationDependentType());
@@ -109,7 +112,7 @@ void JSONNodeDumper::Visit(const Decl *D) {
JOS.attribute("isReferenced", true);
if (const auto *ND = dyn_cast<NamedDecl>(D))
- attributeOnlyIfTrue("isHidden", ND->isHidden());
+ attributeOnlyIfTrue("isHidden", !ND->isUnconditionallyVisible());
if (D->getLexicalDeclContext() != D->getDeclContext()) {
// Because of multiple inheritance, a DeclContext pointer does not produce
@@ -180,6 +183,13 @@ void JSONNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
attributeOnlyIfTrue("selected", A.isSelected());
}
+void JSONNodeDumper::Visit(const APValue &Value, QualType Ty) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ Value.printPretty(OS, Ctx, Ty);
+ JOS.attribute("value", OS.str());
+}
+
void JSONNodeDumper::writeIncludeStack(PresumedLoc Loc, bool JustFirst) {
if (Loc.isInvalid())
return;
@@ -384,6 +394,7 @@ static llvm::json::Object
createCopyAssignmentDefinitionData(const CXXRecordDecl *RD) {
llvm::json::Object Ret;
+ FIELD2("simple", hasSimpleCopyAssignment);
FIELD2("trivial", hasTrivialCopyAssignment);
FIELD2("nonTrivial", hasNonTrivialCopyAssignment);
FIELD2("hasConstParam", hasCopyAssignmentWithConstParam);
@@ -464,13 +475,10 @@ JSONNodeDumper::createCXXRecordDefinitionData(const CXXRecordDecl *RD) {
#undef FIELD2
std::string JSONNodeDumper::createAccessSpecifier(AccessSpecifier AS) {
- switch (AS) {
- case AS_none: return "none";
- case AS_private: return "private";
- case AS_protected: return "protected";
- case AS_public: return "public";
- }
- llvm_unreachable("Unknown access specifier");
+ const auto AccessSpelling = getAccessSpelling(AS);
+ if (AccessSpelling.empty())
+ return "none";
+ return AccessSpelling.str();
}
llvm::json::Object
@@ -997,32 +1005,33 @@ void JSONNodeDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
case ObjCPropertyDecl::Required: JOS.attribute("control", "required"); break;
case ObjCPropertyDecl::Optional: JOS.attribute("control", "optional"); break;
}
-
- ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes();
- if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) {
- if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
+
+ ObjCPropertyAttribute::Kind Attrs = D->getPropertyAttributes();
+ if (Attrs != ObjCPropertyAttribute::kind_noattr) {
+ if (Attrs & ObjCPropertyAttribute::kind_getter)
JOS.attribute("getter", createBareDeclRef(D->getGetterMethodDecl()));
- if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
+ if (Attrs & ObjCPropertyAttribute::kind_setter)
JOS.attribute("setter", createBareDeclRef(D->getSetterMethodDecl()));
- attributeOnlyIfTrue("readonly", Attrs & ObjCPropertyDecl::OBJC_PR_readonly);
- attributeOnlyIfTrue("assign", Attrs & ObjCPropertyDecl::OBJC_PR_assign);
+ attributeOnlyIfTrue("readonly",
+ Attrs & ObjCPropertyAttribute::kind_readonly);
+ attributeOnlyIfTrue("assign", Attrs & ObjCPropertyAttribute::kind_assign);
attributeOnlyIfTrue("readwrite",
- Attrs & ObjCPropertyDecl::OBJC_PR_readwrite);
- attributeOnlyIfTrue("retain", Attrs & ObjCPropertyDecl::OBJC_PR_retain);
- attributeOnlyIfTrue("copy", Attrs & ObjCPropertyDecl::OBJC_PR_copy);
+ Attrs & ObjCPropertyAttribute::kind_readwrite);
+ attributeOnlyIfTrue("retain", Attrs & ObjCPropertyAttribute::kind_retain);
+ attributeOnlyIfTrue("copy", Attrs & ObjCPropertyAttribute::kind_copy);
attributeOnlyIfTrue("nonatomic",
- Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic);
- attributeOnlyIfTrue("atomic", Attrs & ObjCPropertyDecl::OBJC_PR_atomic);
- attributeOnlyIfTrue("weak", Attrs & ObjCPropertyDecl::OBJC_PR_weak);
- attributeOnlyIfTrue("strong", Attrs & ObjCPropertyDecl::OBJC_PR_strong);
+ Attrs & ObjCPropertyAttribute::kind_nonatomic);
+ attributeOnlyIfTrue("atomic", Attrs & ObjCPropertyAttribute::kind_atomic);
+ attributeOnlyIfTrue("weak", Attrs & ObjCPropertyAttribute::kind_weak);
+ attributeOnlyIfTrue("strong", Attrs & ObjCPropertyAttribute::kind_strong);
attributeOnlyIfTrue("unsafe_unretained",
- Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
- attributeOnlyIfTrue("class", Attrs & ObjCPropertyDecl::OBJC_PR_class);
- attributeOnlyIfTrue("direct", Attrs & ObjCPropertyDecl::OBJC_PR_direct);
+ Attrs & ObjCPropertyAttribute::kind_unsafe_unretained);
+ attributeOnlyIfTrue("class", Attrs & ObjCPropertyAttribute::kind_class);
+ attributeOnlyIfTrue("direct", Attrs & ObjCPropertyAttribute::kind_direct);
attributeOnlyIfTrue("nullability",
- Attrs & ObjCPropertyDecl::OBJC_PR_nullability);
+ Attrs & ObjCPropertyAttribute::kind_nullability);
attributeOnlyIfTrue("null_resettable",
- Attrs & ObjCPropertyDecl::OBJC_PR_null_resettable);
+ Attrs & ObjCPropertyAttribute::kind_null_resettable);
}
}
@@ -1234,14 +1243,7 @@ void JSONNodeDumper::VisitCallExpr(const CallExpr *CE) {
void JSONNodeDumper::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *TTE) {
- switch (TTE->getKind()) {
- case UETT_SizeOf: JOS.attribute("name", "sizeof"); break;
- case UETT_AlignOf: JOS.attribute("name", "alignof"); break;
- case UETT_VecStep: JOS.attribute("name", "vec_step"); break;
- case UETT_PreferredAlignOf: JOS.attribute("name", "__alignof"); break;
- case UETT_OpenMPRequiredSimdAlign:
- JOS.attribute("name", "__builtin_omp_required_simd_align"); break;
- }
+ JOS.attribute("name", getTraitSpelling(TTE->getKind()));
if (TTE->isArgumentType())
JOS.attribute("argType", createQualType(TTE->getArgumentType()));
}
@@ -1277,12 +1279,8 @@ void JSONNodeDumper::VisitCXXTypeidExpr(const CXXTypeidExpr *CTE) {
}
void JSONNodeDumper::VisitConstantExpr(const ConstantExpr *CE) {
- if (CE->getResultAPValueKind() != APValue::None) {
- std::string Str;
- llvm::raw_string_ostream OS(Str);
- CE->getAPValueResult().printPretty(OS, Ctx, CE->getType());
- JOS.attribute("value", OS.str());
- }
+ if (CE->getResultAPValueKind() != APValue::None)
+ Visit(CE->getAPValueResult(), CE->getType());
}
void JSONNodeDumper::VisitInitListExpr(const InitListExpr *ILE) {
@@ -1333,7 +1331,16 @@ void JSONNodeDumper::VisitExprWithCleanups(const ExprWithCleanups *EWC) {
if (EWC->getNumObjects()) {
JOS.attributeArray("cleanups", [this, EWC] {
for (const ExprWithCleanups::CleanupObject &CO : EWC->getObjects())
- JOS.value(createBareDeclRef(CO));
+ if (auto *BD = CO.dyn_cast<BlockDecl *>()) {
+ JOS.value(createBareDeclRef(BD));
+ } else if (auto *CLE = CO.dyn_cast<CompoundLiteralExpr *>()) {
+ llvm::json::Object Obj;
+ Obj["id"] = createPointerRepresentation(CLE);
+ Obj["kind"] = CLE->getStmtClassName();
+ JOS.value(std::move(Obj));
+ } else {
+ llvm_unreachable("unexpected cleanup object type");
+ }
});
}
}
diff --git a/clang/lib/AST/Linkage.h b/clang/lib/AST/Linkage.h
index 4e913540de86..5d8acf0016f4 100644
--- a/clang/lib/AST/Linkage.h
+++ b/clang/lib/AST/Linkage.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_LIB_AST_LINKAGE_H
#define LLVM_CLANG_LIB_AST_LINKAGE_H
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Type.h"
diff --git a/clang/lib/AST/Mangle.cpp b/clang/lib/AST/Mangle.cpp
index e106b31f59f0..a732325006c6 100644
--- a/clang/lib/AST/Mangle.cpp
+++ b/clang/lib/AST/Mangle.cpp
@@ -25,6 +25,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Mangler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -50,19 +51,32 @@ enum CCMangling {
CCM_Fast,
CCM_RegCall,
CCM_Vector,
- CCM_Std
+ CCM_Std,
+ CCM_WasmMainArgcArgv
};
static bool isExternC(const NamedDecl *ND) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
return FD->isExternC();
- return cast<VarDecl>(ND)->isExternC();
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND))
+ return VD->isExternC();
+ return false;
}
static CCMangling getCallingConvMangling(const ASTContext &Context,
const NamedDecl *ND) {
const TargetInfo &TI = Context.getTargetInfo();
const llvm::Triple &Triple = TI.getTriple();
+
+ // On wasm, the argc/argv form of "main" is renamed so that the startup code
+ // can call it with the correct function signature.
+ // On Emscripten, users may be exporting "main" and expecting to call it
+ // themselves, so we can't mangle it.
+ if (Triple.isWasm() && !Triple.isOSEmscripten())
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ if (FD->isMain() && FD->hasPrototype() && FD->param_size() == 2)
+ return CCM_WasmMainArgcArgv;
+
if (!Triple.isOSWindows() || !Triple.isX86())
return CCM_Other;
@@ -111,10 +125,15 @@ bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
if (D->hasAttr<AsmLabelAttr>())
return true;
+ // Declarations that don't have identifier names always need to be mangled.
+ if (isa<MSGuidDecl>(D))
+ return true;
+
return shouldMangleCXXName(D);
}
-void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
+void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
+ const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
// Any decl can be declared with __asm("foo") on it, and this takes precedence
// over all other naming in the .o file.
if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
@@ -141,15 +160,24 @@ void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
return;
}
+ if (auto *GD = dyn_cast<MSGuidDecl>(D))
+ return mangleMSGuidDecl(GD, Out);
+
const ASTContext &ASTContext = getASTContext();
CCMangling CC = getCallingConvMangling(ASTContext, D);
+
+ if (CC == CCM_WasmMainArgcArgv) {
+ Out << "__main_argc_argv";
+ return;
+ }
+
bool MCXX = shouldMangleCXXName(D);
const TargetInfo &TI = Context.getTargetInfo();
if (CC == CCM_Other || (MCXX && TI.getCXXABI() == TargetCXXABI::Microsoft)) {
if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
mangleObjCMethodName(OMD, Out);
else
- mangleCXXName(D, Out);
+ mangleCXXName(GD, Out);
return;
}
@@ -166,7 +194,7 @@ void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
mangleObjCMethodName(OMD, Out);
else
- mangleCXXName(D, Out);
+ mangleCXXName(GD, Out);
const FunctionDecl *FD = cast<FunctionDecl>(D);
const FunctionType *FT = FD->getType()->castAs<FunctionType>();
@@ -191,6 +219,20 @@ void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
Out << ((TI.getPointerWidth(0) / 8) * ArgWords);
}
+void MangleContext::mangleMSGuidDecl(const MSGuidDecl *GD, raw_ostream &Out) {
+ // For now, follow the MSVC naming convention for GUID objects on all
+ // targets.
+ MSGuidDecl::Parts P = GD->getParts();
+ Out << llvm::format("_GUID_%08" PRIx32 "_%04" PRIx32 "_%04" PRIx32 "_",
+ P.Part1, P.Part2, P.Part3);
+ unsigned I = 0;
+ for (uint8_t C : P.Part4And5) {
+ Out << llvm::format("%02" PRIx8, C);
+ if (++I == 2)
+ Out << "_";
+ }
+}
+
void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
const NamedDecl *ID,
raw_ostream &Out) {
@@ -213,7 +255,7 @@ void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
raw_ostream &ResStream) {
SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- mangleCXXCtor(CD, CT, Out);
+ mangleName(GlobalDecl(CD, CT), Out);
mangleFunctionBlock(*this, Buffer, BD, ResStream);
}
@@ -222,7 +264,7 @@ void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD,
raw_ostream &ResStream) {
SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- mangleCXXDtor(DD, DT, Out);
+ mangleName(GlobalDecl(DD, DT), Out);
mangleFunctionBlock(*this, Buffer, BD, ResStream);
}
@@ -358,7 +400,7 @@ public:
SmallString<40> Mangled;
auto Prefix = getClassSymbolPrefix(Kind, OCD->getASTContext());
llvm::Mangler::getNameWithPrefix(Mangled, Prefix + ClassName, DL);
- return Mangled.str();
+ return std::string(Mangled.str());
};
return {
@@ -420,12 +462,16 @@ public:
private:
bool writeFuncOrVarName(const NamedDecl *D, raw_ostream &OS) {
if (MC->shouldMangleDeclName(D)) {
+ GlobalDecl GD;
if (const auto *CtorD = dyn_cast<CXXConstructorDecl>(D))
- MC->mangleCXXCtor(CtorD, Ctor_Complete, OS);
+ GD = GlobalDecl(CtorD, Ctor_Complete);
else if (const auto *DtorD = dyn_cast<CXXDestructorDecl>(D))
- MC->mangleCXXDtor(DtorD, Dtor_Complete, OS);
+ GD = GlobalDecl(DtorD, Dtor_Complete);
+ else if (D->hasAttr<CUDAGlobalAttr>())
+ GD = GlobalDecl(cast<FunctionDecl>(D));
else
- MC->mangleName(D, OS);
+ GD = GlobalDecl(D);
+ MC->mangleName(GD, OS);
return false;
} else {
IdentifierInfo *II = D->getIdentifier();
@@ -445,10 +491,12 @@ private:
std::string FrontendBuf;
llvm::raw_string_ostream FOS(FrontendBuf);
+ GlobalDecl GD;
if (const auto *CD = dyn_cast_or_null<CXXConstructorDecl>(ND))
- MC->mangleCXXCtor(CD, static_cast<CXXCtorType>(StructorType), FOS);
+ GD = GlobalDecl(CD, static_cast<CXXCtorType>(StructorType));
else if (const auto *DD = dyn_cast_or_null<CXXDestructorDecl>(ND))
- MC->mangleCXXDtor(DD, static_cast<CXXDtorType>(StructorType), FOS);
+ GD = GlobalDecl(DD, static_cast<CXXDtorType>(StructorType));
+ MC->mangleName(GD, FOS);
std::string BackendBuf;
llvm::raw_string_ostream BOS(BackendBuf);
diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp
index a286c5335584..529f301e4696 100644
--- a/clang/lib/AST/MicrosoftMangle.cpp
+++ b/clang/lib/AST/MicrosoftMangle.cpp
@@ -10,7 +10,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
@@ -22,9 +21,12 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CRC.h"
@@ -135,7 +137,7 @@ public:
MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags);
bool shouldMangleCXXName(const NamedDecl *D) override;
bool shouldMangleStringLiteral(const StringLiteral *SL) override;
- void mangleCXXName(const NamedDecl *D, raw_ostream &Out) override;
+ void mangleCXXName(GlobalDecl GD, raw_ostream &Out) override;
void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
const MethodVFTableLocation &ML,
raw_ostream &Out) override;
@@ -177,10 +179,6 @@ public:
ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) override;
void mangleTypeName(QualType T, raw_ostream &) override;
- void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- raw_ostream &) override;
- void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- raw_ostream &) override;
void mangleReferenceTemporary(const VarDecl *, unsigned ManglingNumber,
raw_ostream &) override;
void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override;
@@ -464,7 +462,7 @@ bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
if (VD->isExternC())
return false;
- // Variables at global scope with non-internal linkage are not mangled.
+ // Variables at global scope with internal linkage are not mangled.
const DeclContext *DC = getEffectiveDeclContext(D);
// Check for extern variable declared locally.
if (DC->isFunctionOrMethod() && D->hasLinkage())
@@ -499,6 +497,10 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD));
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleVariableEncoding(VD);
+ else if (isa<MSGuidDecl>(D))
+ // MSVC appears to mangle GUIDs as if they were variables of type
+ // 'const struct __s_GUID'.
+ Out << "3U__s_GUID@@B";
else
llvm_unreachable("Tried to mangle unexpected NamedDecl!");
}
@@ -895,6 +897,16 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
break;
}
+ if (const MSGuidDecl *GD = dyn_cast<MSGuidDecl>(ND)) {
+ // Mangle a GUID object as if it were a variable with the corresponding
+ // mangled name.
+ SmallString<sizeof("_GUID_12345678_1234_1234_1234_1234567890ab")> GUID;
+ llvm::raw_svector_ostream GUIDOS(GUID);
+ Context.mangleMSGuidDecl(GD, GUIDOS);
+ mangleSourceName(GUID);
+ break;
+ }
+
// We must have an anonymous struct.
const TagDecl *TD = cast<TagDecl>(ND);
if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
@@ -935,12 +947,12 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
mangleSourceName(Name);
- // If the context of a closure type is an initializer for a class
- // member (static or nonstatic), it is encoded in a qualified name.
+ // If the context is a variable or a class member and not a parameter,
+ // it is encoded in a qualified name.
if (LambdaManglingNumber && LambdaContextDecl) {
if ((isa<VarDecl>(LambdaContextDecl) ||
isa<FieldDecl>(LambdaContextDecl)) &&
- LambdaContextDecl->getDeclContext()->isRecord()) {
+ !isa<ParmVarDecl>(LambdaContextDecl)) {
mangleUnqualifiedName(cast<NamedDecl>(LambdaContextDecl));
}
}
@@ -1301,7 +1313,7 @@ void MicrosoftCXXNameMangler::mangleSourceName(StringRef Name) {
BackRefVec::iterator Found = llvm::find(NameBackReferences, Name);
if (Found == NameBackReferences.end()) {
if (NameBackReferences.size() < 10)
- NameBackReferences.push_back(Name);
+ NameBackReferences.push_back(std::string(Name));
Out << Name << '@';
} else {
Out << (Found - NameBackReferences.begin());
@@ -1366,45 +1378,6 @@ void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
return;
}
- // Look through no-op casts like template parameter substitutions.
- E = E->IgnoreParenNoopCasts(Context.getASTContext());
-
- const CXXUuidofExpr *UE = nullptr;
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
- if (UO->getOpcode() == UO_AddrOf)
- UE = dyn_cast<CXXUuidofExpr>(UO->getSubExpr());
- } else
- UE = dyn_cast<CXXUuidofExpr>(E);
-
- if (UE) {
- // If we had to peek through an address-of operator, treat this like we are
- // dealing with a pointer type. Otherwise, treat it like a const reference.
- //
- // N.B. This matches up with the handling of TemplateArgument::Declaration
- // in mangleTemplateArg
- if (UE == E)
- Out << "$E?";
- else
- Out << "$1?";
-
- // This CXXUuidofExpr is mangled as-if it were actually a VarDecl from
- // const __s_GUID _GUID_{lower case UUID with underscores}
- StringRef Uuid = UE->getUuidStr();
- std::string Name = "_GUID_" + Uuid.lower();
- std::replace(Name.begin(), Name.end(), '-', '_');
-
- mangleSourceName(Name);
- // Terminate the whole name with an '@'.
- Out << '@';
- // It's a global variable.
- Out << '3';
- // It's a struct called __s_GUID.
- mangleArtificialTagType(TTK_Struct, "__s_GUID");
- // It's const.
- Out << 'B';
- return;
- }
-
// As bad as this diagnostic is, it's better than crashing.
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
@@ -2141,6 +2114,7 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
+ case BuiltinType::BFloat16:
case BuiltinType::Float128: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
@@ -2757,6 +2731,23 @@ void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T,
<< Range;
}
+void MicrosoftCXXNameMangler::mangleType(const ConstantMatrixType *T,
+ Qualifiers quals, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "Cannot mangle this matrix type yet");
+ Diags.Report(Range.getBegin(), DiagID) << Range;
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedMatrixType *T,
+ Qualifiers quals, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Cannot mangle this dependent-sized matrix type yet");
+ Diags.Report(Range.getBegin(), DiagID) << Range;
+}
+
void MicrosoftCXXNameMangler::mangleType(const DependentAddressSpaceType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
@@ -2942,29 +2933,68 @@ void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers,
void MicrosoftCXXNameMangler::mangleType(const PipeType *T, Qualifiers,
SourceRange Range) {
- DiagnosticsEngine &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this OpenCL pipe type yet");
- Diags.Report(Range.getBegin(), DiagID)
- << Range;
+ QualType ElementType = T->getElementType();
+
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ Extra.mangleSourceName("ocl_pipe");
+ Extra.mangleType(ElementType, Range, QMM_Escape);
+ Extra.mangleIntegerLiteral(llvm::APSInt::get(T->isReadOnly()), true);
+
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
-void MicrosoftMangleContextImpl::mangleCXXName(const NamedDecl *D,
+void MicrosoftMangleContextImpl::mangleCXXName(GlobalDecl GD,
raw_ostream &Out) {
- assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
- "Invalid mangleName() call, argument is not a variable or function!");
- assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
- "Invalid mangleName() call on 'structor decl!");
-
+ const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
getASTContext().getSourceManager(),
"Mangling declaration");
msvc_hashing_ostream MHO(Out);
+
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(D)) {
+ auto Type = GD.getCtorType();
+ MicrosoftCXXNameMangler mangler(*this, MHO, CD, Type);
+ return mangler.mangle(D);
+ }
+
+ if (auto *DD = dyn_cast<CXXDestructorDecl>(D)) {
+ auto Type = GD.getDtorType();
+ MicrosoftCXXNameMangler mangler(*this, MHO, DD, Type);
+ return mangler.mangle(D);
+ }
+
MicrosoftCXXNameMangler Mangler(*this, MHO);
return Mangler.mangle(D);
}
+void MicrosoftCXXNameMangler::mangleType(const ExtIntType *T, Qualifiers,
+ SourceRange Range) {
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+ if (T->isUnsigned())
+ Extra.mangleSourceName("_UExtInt");
+ else
+ Extra.mangleSourceName("_ExtInt");
+ Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumBits()),
+ /*IsBoolean=*/false);
+
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentExtIntType *T,
+ Qualifiers, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle this DependentExtInt type yet");
+ Diags.Report(Range.getBegin(), DiagID) << Range;
+}
+
// <this-adjustment> ::= <no-adjustment> | <static-adjustment> |
// <virtual-adjustment>
// <no-adjustment> ::= A # private near
@@ -3218,7 +3248,7 @@ void MicrosoftMangleContextImpl::mangleCXXCatchableType(
if (!OmitCopyCtor && CD) {
llvm::raw_svector_ostream Stream(CopyCtorMangling);
msvc_hashing_ostream MHO(Stream);
- mangleCXXCtor(CD, CT, MHO);
+ mangleCXXName(GlobalDecl(CD, CT), MHO);
}
Mangler.getStream() << CopyCtorMangling;
@@ -3321,22 +3351,6 @@ void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) {
Mangler.mangleType(T, SourceRange());
}
-void MicrosoftMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D,
- CXXCtorType Type,
- raw_ostream &Out) {
- msvc_hashing_ostream MHO(Out);
- MicrosoftCXXNameMangler mangler(*this, MHO, D, Type);
- mangler.mangle(D);
-}
-
-void MicrosoftMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
- CXXDtorType Type,
- raw_ostream &Out) {
- msvc_hashing_ostream MHO(Out);
- MicrosoftCXXNameMangler mangler(*this, MHO, D, Type);
- mangler.mangle(D);
-}
-
void MicrosoftMangleContextImpl::mangleReferenceTemporary(
const VarDecl *VD, unsigned ManglingNumber, raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
diff --git a/clang/lib/AST/NSAPI.cpp b/clang/lib/AST/NSAPI.cpp
index ae6ff04f5126..ace7f1ceebe7 100644
--- a/clang/lib/AST/NSAPI.cpp
+++ b/clang/lib/AST/NSAPI.cpp
@@ -482,7 +482,11 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
case BuiltinType::Half:
case BuiltinType::PseudoObject:
case BuiltinType::BuiltinFn:
+ case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
+ case BuiltinType::OMPArrayShaping:
+ case BuiltinType::OMPIterator:
+ case BuiltinType::BFloat16:
break;
}
diff --git a/clang/lib/AST/NestedNameSpecifier.cpp b/clang/lib/AST/NestedNameSpecifier.cpp
index 137953fa8203..08e8819a4d69 100644
--- a/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/clang/lib/AST/NestedNameSpecifier.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -197,75 +198,53 @@ CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
llvm_unreachable("Invalid NNS Kind!");
}
-/// Whether this nested name specifier refers to a dependent
-/// type or not.
-bool NestedNameSpecifier::isDependent() const {
+NestedNameSpecifierDependence NestedNameSpecifier::getDependence() const {
switch (getKind()) {
- case Identifier:
+ case Identifier: {
// Identifier specifiers always represent dependent types
- return true;
+ auto F = NestedNameSpecifierDependence::Dependent |
+ NestedNameSpecifierDependence::Instantiation;
+ // Prefix can contain unexpanded template parameters.
+ if (getPrefix())
+ return F | getPrefix()->getDependence();
+ return F;
+ }
case Namespace:
case NamespaceAlias:
case Global:
- return false;
+ return NestedNameSpecifierDependence::None;
case Super: {
CXXRecordDecl *RD = static_cast<CXXRecordDecl *>(Specifier);
for (const auto &Base : RD->bases())
if (Base.getType()->isDependentType())
- return true;
-
- return false;
+ // FIXME: must also be instantiation-dependent.
+ return NestedNameSpecifierDependence::Dependent;
+ return NestedNameSpecifierDependence::None;
}
case TypeSpec:
case TypeSpecWithTemplate:
- return getAsType()->isDependentType();
+ return toNestedNameSpecifierDependendence(getAsType()->getDependence());
}
-
llvm_unreachable("Invalid NNS Kind!");
}
-/// Whether this nested name specifier refers to a dependent
-/// type or not.
-bool NestedNameSpecifier::isInstantiationDependent() const {
- switch (getKind()) {
- case Identifier:
- // Identifier specifiers always represent dependent types
- return true;
-
- case Namespace:
- case NamespaceAlias:
- case Global:
- case Super:
- return false;
-
- case TypeSpec:
- case TypeSpecWithTemplate:
- return getAsType()->isInstantiationDependentType();
- }
+bool NestedNameSpecifier::isDependent() const {
+ return getDependence() & NestedNameSpecifierDependence::Dependent;
+}
- llvm_unreachable("Invalid NNS Kind!");
+bool NestedNameSpecifier::isInstantiationDependent() const {
+ return getDependence() & NestedNameSpecifierDependence::Instantiation;
}
bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
- switch (getKind()) {
- case Identifier:
- return getPrefix() && getPrefix()->containsUnexpandedParameterPack();
-
- case Namespace:
- case NamespaceAlias:
- case Global:
- case Super:
- return false;
-
- case TypeSpec:
- case TypeSpecWithTemplate:
- return getAsType()->containsUnexpandedParameterPack();
- }
+ return getDependence() & NestedNameSpecifierDependence::UnexpandedPack;
+}
- llvm_unreachable("Invalid NNS Kind!");
+bool NestedNameSpecifier::containsErrors() const {
+ return getDependence() & NestedNameSpecifierDependence::Error;
}
/// Print this nested name specifier to the given output
@@ -336,6 +315,14 @@ void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
// Print the template argument list.
printTemplateArgumentList(OS, SpecType->template_arguments(),
InnerPolicy);
+ } else if (const auto *DepSpecType =
+ dyn_cast<DependentTemplateSpecializationType>(T)) {
+ // Print the template name without its corresponding
+ // nested-name-specifier.
+ OS << DepSpecType->getIdentifier()->getName();
+ // Print the template argument list.
+ printTemplateArgumentList(OS, DepSpecType->template_arguments(),
+ InnerPolicy);
} else {
// Print the type normally
QualType(T, 0).print(OS, InnerPolicy);
@@ -481,12 +468,14 @@ static void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
unsigned NewCapacity = std::max(
(unsigned)(BufferCapacity ? BufferCapacity * 2 : sizeof(void *) * 2),
(unsigned)(BufferSize + (End - Start)));
- char *NewBuffer = static_cast<char *>(llvm::safe_malloc(NewCapacity));
- if (BufferCapacity) {
- memcpy(NewBuffer, Buffer, BufferSize);
- free(Buffer);
+ if (!BufferCapacity) {
+ char *NewBuffer = static_cast<char *>(llvm::safe_malloc(NewCapacity));
+ if (Buffer)
+ memcpy(NewBuffer, Buffer, BufferSize);
+ Buffer = NewBuffer;
+ } else {
+ Buffer = static_cast<char *>(llvm::safe_realloc(Buffer, NewCapacity));
}
- Buffer = NewBuffer;
BufferCapacity = NewCapacity;
}
assert(Buffer && Start && End && End > Start && "Illegal memory buffer copy");
diff --git a/clang/lib/AST/ODRHash.cpp b/clang/lib/AST/ODRHash.cpp
index 27fdca1c4b9c..735bcff8f113 100644
--- a/clang/lib/AST/ODRHash.cpp
+++ b/clang/lib/AST/ODRHash.cpp
@@ -440,7 +440,7 @@ public:
// Only allow a small portion of Decl's to be processed. Remove this once
// all Decl's can be handled.
-bool ODRHash::isWhitelistedDecl(const Decl *D, const DeclContext *Parent) {
+bool ODRHash::isDeclToBeProcessed(const Decl *D, const DeclContext *Parent) {
if (D->isImplicit()) return false;
if (D->getDeclContext() != Parent) return false;
@@ -487,7 +487,7 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
for (Decl *SubDecl : Record->decls()) {
- if (isWhitelistedDecl(SubDecl, Record)) {
+ if (isDeclToBeProcessed(SubDecl, Record)) {
Decls.push_back(SubDecl);
if (auto *Function = dyn_cast<FunctionDecl>(SubDecl)) {
// Compute/Preload ODRHash into FunctionDecl.
@@ -588,7 +588,7 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
for (Decl *SubDecl : Function->decls()) {
- if (isWhitelistedDecl(SubDecl, Function)) {
+ if (isDeclToBeProcessed(SubDecl, Function)) {
Decls.push_back(SubDecl);
}
}
@@ -614,7 +614,7 @@ void ODRHash::AddEnumDecl(const EnumDecl *Enum) {
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
for (Decl *SubDecl : Enum->decls()) {
- if (isWhitelistedDecl(SubDecl, Enum)) {
+ if (isDeclToBeProcessed(SubDecl, Enum)) {
assert(isa<EnumConstantDecl>(SubDecl) && "Unexpected Decl");
Decls.push_back(SubDecl);
}
@@ -857,6 +857,13 @@ public:
void VisitAutoType(const AutoType *T) {
ID.AddInteger((unsigned)T->getKeyword());
+ ID.AddInteger(T->isConstrained());
+ if (T->isConstrained()) {
+ AddDecl(T->getTypeConstraintConcept());
+ ID.AddInteger(T->getNumArgs());
+ for (const auto &TA : T->getTypeConstraintArguments())
+ Hash.AddTemplateArgument(TA);
+ }
VisitDeducedType(T);
}
diff --git a/clang/lib/AST/OSLog.cpp b/clang/lib/AST/OSLog.cpp
index df2f808728cf..094c0102854b 100644
--- a/clang/lib/AST/OSLog.cpp
+++ b/clang/lib/AST/OSLog.cpp
@@ -55,9 +55,9 @@ public:
ArgsData.reserve(Args.size());
}
- virtual bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
- const char *StartSpecifier,
- unsigned SpecifierLen) {
+ bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
+ const char *StartSpecifier,
+ unsigned SpecifierLen) override {
if (!FS.consumesDataArgument() &&
FS.getConversionSpecifier().getKind() !=
clang::analyze_format_string::ConversionSpecifier::PrintErrno)
diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp
index 5ef82648c89d..a0b0dca55390 100644
--- a/clang/lib/AST/OpenMPClause.cpp
+++ b/clang/lib/AST/OpenMPClause.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/Basic/LLVM.h"
@@ -23,31 +24,35 @@
#include <cassert>
using namespace clang;
+using namespace llvm;
+using namespace omp;
OMPClause::child_range OMPClause::children() {
switch (getClauseKind()) {
default:
break;
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_##Name: \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case Enum: \
return static_cast<Class *>(this)->children();
-#include "clang/Basic/OpenMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
}
llvm_unreachable("unknown OMPClause");
}
OMPClause::child_range OMPClause::used_children() {
switch (getClauseKind()) {
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_##Name: \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case Enum: \
return static_cast<Class *>(this)->used_children();
-#include "clang/Basic/OpenMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
case OMPC_threadprivate:
case OMPC_uniform:
case OMPC_device_type:
case OMPC_match:
case OMPC_unknown:
break;
+ default:
+ break;
}
llvm_unreachable("unknown OMPClause");
}
@@ -111,11 +116,16 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_mergeable:
case OMPC_threadprivate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_threads:
case OMPC_simd:
@@ -128,6 +138,7 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -137,6 +148,15 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ break;
+ default:
break;
}
@@ -184,11 +204,16 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_mergeable:
case OMPC_threadprivate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_device:
case OMPC_threads:
@@ -207,6 +232,7 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -216,6 +242,15 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ break;
+ default:
break;
}
@@ -316,6 +351,39 @@ const Expr *OMPOrderedClause::getLoopCounter(unsigned NumLoop) const {
return getTrailingObjects<Expr *>()[NumberOfLoops + NumLoop];
}
+OMPUpdateClause *OMPUpdateClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (C) OMPUpdateClause(StartLoc, EndLoc, /*IsExtended=*/false);
+}
+
+OMPUpdateClause *
+OMPUpdateClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ArgumentLoc,
+ OpenMPDependClauseKind DK, SourceLocation EndLoc) {
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<SourceLocation, OpenMPDependClauseKind>(2, 1),
+ alignof(OMPUpdateClause));
+ auto *Clause =
+ new (Mem) OMPUpdateClause(StartLoc, EndLoc, /*IsExtended=*/true);
+ Clause->setLParenLoc(LParenLoc);
+ Clause->setArgumentLoc(ArgumentLoc);
+ Clause->setDependencyKind(DK);
+ return Clause;
+}
+
+OMPUpdateClause *OMPUpdateClause::CreateEmpty(const ASTContext &C,
+ bool IsExtended) {
+ if (!IsExtended)
+ return new (C) OMPUpdateClause(/*IsExtended=*/false);
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<SourceLocation, OpenMPDependClauseKind>(2, 1),
+ alignof(OMPUpdateClause));
+ auto *Clause = new (Mem) OMPUpdateClause(/*IsExtended=*/true);
+ Clause->IsExtended = true;
+ return Clause;
+}
+
void OMPPrivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
assert(VL.size() == varlist_size() &&
"Number of private copies is not the same as the preallocated buffer");
@@ -647,16 +715,46 @@ void OMPReductionClause::setReductionOps(ArrayRef<Expr *> ReductionOps) {
std::copy(ReductionOps.begin(), ReductionOps.end(), getRHSExprs().end());
}
+void OMPReductionClause::setInscanCopyOps(ArrayRef<Expr *> Ops) {
+ assert(Modifier == OMPC_REDUCTION_inscan && "Expected inscan reduction.");
+ assert(Ops.size() == varlist_size() && "Number of copy "
+ "expressions is not the same "
+ "as the preallocated buffer");
+ llvm::copy(Ops, getReductionOps().end());
+}
+
+void OMPReductionClause::setInscanCopyArrayTemps(
+ ArrayRef<Expr *> CopyArrayTemps) {
+ assert(Modifier == OMPC_REDUCTION_inscan && "Expected inscan reduction.");
+ assert(CopyArrayTemps.size() == varlist_size() &&
+ "Number of copy temp expressions is not the same as the preallocated "
+ "buffer");
+ llvm::copy(CopyArrayTemps, getInscanCopyOps().end());
+}
+
+void OMPReductionClause::setInscanCopyArrayElems(
+ ArrayRef<Expr *> CopyArrayElems) {
+ assert(Modifier == OMPC_REDUCTION_inscan && "Expected inscan reduction.");
+ assert(CopyArrayElems.size() == varlist_size() &&
+ "Number of copy temp expressions is not the same as the preallocated "
+ "buffer");
+ llvm::copy(CopyArrayElems, getInscanCopyArrayTemps().end());
+}
+
OMPReductionClause *OMPReductionClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL,
+ SourceLocation ModifierLoc, SourceLocation EndLoc, SourceLocation ColonLoc,
+ OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs,
- ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit,
- Expr *PostUpdate) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size()));
- OMPReductionClause *Clause = new (Mem) OMPReductionClause(
- StartLoc, LParenLoc, EndLoc, ColonLoc, VL.size(), QualifierLoc, NameInfo);
+ ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps,
+ ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps,
+ ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(
+ (Modifier == OMPC_REDUCTION_inscan ? 8 : 5) * VL.size()));
+ auto *Clause = new (Mem)
+ OMPReductionClause(StartLoc, LParenLoc, ModifierLoc, EndLoc, ColonLoc,
+ Modifier, VL.size(), QualifierLoc, NameInfo);
Clause->setVarRefs(VL);
Clause->setPrivates(Privates);
Clause->setLHSExprs(LHSExprs);
@@ -664,13 +762,29 @@ OMPReductionClause *OMPReductionClause::Create(
Clause->setReductionOps(ReductionOps);
Clause->setPreInitStmt(PreInit);
Clause->setPostUpdateExpr(PostUpdate);
+ if (Modifier == OMPC_REDUCTION_inscan) {
+ Clause->setInscanCopyOps(CopyOps);
+ Clause->setInscanCopyArrayTemps(CopyArrayTemps);
+ Clause->setInscanCopyArrayElems(CopyArrayElems);
+ } else {
+ assert(CopyOps.empty() &&
+ "copy operations are expected in inscan reductions only.");
+ assert(CopyArrayTemps.empty() &&
+ "copy array temps are expected in inscan reductions only.");
+ assert(CopyArrayElems.empty() &&
+ "copy array temps are expected in inscan reductions only.");
+ }
return Clause;
}
-OMPReductionClause *OMPReductionClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * N));
- return new (Mem) OMPReductionClause(N);
+OMPReductionClause *
+OMPReductionClause::CreateEmpty(const ASTContext &C, unsigned N,
+ OpenMPReductionClauseModifier Modifier) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(
+ (Modifier == OMPC_REDUCTION_inscan ? 8 : 5) * N));
+ auto *Clause = new (Mem) OMPReductionClause(N);
+ Clause->setModifier(Modifier);
+ return Clause;
}
void OMPTaskReductionClause::setPrivates(ArrayRef<Expr *> Privates) {
@@ -825,19 +939,36 @@ OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
return new (Mem) OMPFlushClause(N);
}
+OMPDepobjClause *OMPDepobjClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ Expr *Depobj) {
+ auto *Clause = new (C) OMPDepobjClause(StartLoc, LParenLoc, RParenLoc);
+ Clause->setDepobj(Depobj);
+ return Clause;
+}
+
+OMPDepobjClause *OMPDepobjClause::CreateEmpty(const ASTContext &C) {
+ return new (C) OMPDepobjClause();
+}
+
OMPDependClause *
OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
- OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
- SourceLocation ColonLoc, ArrayRef<Expr *> VL,
- unsigned NumLoops) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + NumLoops));
+ Expr *DepModifier, OpenMPDependClauseKind DepKind,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VL, unsigned NumLoops) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *>(VL.size() + /*depend-modifier*/ 1 + NumLoops),
+ alignof(OMPDependClause));
OMPDependClause *Clause = new (Mem)
OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops);
Clause->setVarRefs(VL);
Clause->setDependencyKind(DepKind);
Clause->setDependencyLoc(DepLoc);
Clause->setColonLoc(ColonLoc);
+ Clause->setModifier(DepModifier);
for (unsigned I = 0 ; I < NumLoops; ++I)
Clause->setLoopData(I, nullptr);
return Clause;
@@ -845,7 +976,9 @@ OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + NumLoops));
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<Expr *>(N + /*depend-modifier*/ 1 + NumLoops),
+ alignof(OMPDependClause));
return new (Mem) OMPDependClause(N, NumLoops);
}
@@ -855,7 +988,7 @@ void OMPDependClause::setLoopData(unsigned NumLoop, Expr *Cnt) {
NumLoop < NumLoops &&
"Expected sink or source depend + loop index must be less number of "
"loops.");
- auto It = std::next(getVarRefs().end(), NumLoop);
+ auto *It = std::next(getVarRefs().end(), NumLoop + 1);
*It = Cnt;
}
@@ -865,7 +998,7 @@ Expr *OMPDependClause::getLoopData(unsigned NumLoop) {
NumLoop < NumLoops &&
"Expected sink or source depend + loop index must be less number of "
"loops.");
- auto It = std::next(getVarRefs().end(), NumLoop);
+ auto *It = std::next(getVarRefs().end(), NumLoop + 1);
return *It;
}
@@ -875,10 +1008,15 @@ const Expr *OMPDependClause::getLoopData(unsigned NumLoop) const {
NumLoop < NumLoops &&
"Expected sink or source depend + loop index must be less number of "
"loops.");
- auto It = std::next(getVarRefs().end(), NumLoop);
+ const auto *It = std::next(getVarRefs().end(), NumLoop + 1);
return *It;
}
+void OMPDependClause::setModifier(Expr *DepModifier) {
+ *getVarRefs().end() = DepModifier;
+}
+Expr *OMPDependClause::getModifier() { return *getVarRefs().end(); }
+
unsigned OMPClauseMappableExprCommon::getComponentsTotalNumber(
MappableExprComponentListsRef ComponentLists) {
unsigned TotalNum = 0u;
@@ -1075,8 +1213,8 @@ OMPUseDevicePtrClause *OMPUseDevicePtrClause::Create(
Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
// We need to allocate:
- // 3 x NumVars x Expr* - we have an original list expression for each clause
- // list entry and an equal number of private copies and inits.
+ // NumVars x Expr* - we have an original list expression for each clause
+ // list entry.
// NumUniqueDeclarations x ValueDecl* - unique base declarations associated
// with each component list.
// (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
@@ -1112,6 +1250,53 @@ OMPUseDevicePtrClause::CreateEmpty(const ASTContext &C,
return new (Mem) OMPUseDevicePtrClause(Sizes);
}
+OMPUseDeviceAddrClause *
+OMPUseDeviceAddrClause::Create(const ASTContext &C, const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> Vars,
+ ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists) {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Vars.size();
+ Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
+ Sizes.NumComponentLists = ComponentLists.size();
+ Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
+
+ // We need to allocate:
+ // 3 x NumVars x Expr* - we have an original list expression for each clause
+ // list entry and an equal number of private copies and inits.
+ // NumUniqueDeclarations x ValueDecl* - unique base declarations associated
+ // with each component list.
+ // (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
+ // number of lists for each unique declaration and the size of each component
+ // list.
+ // NumComponents x MappableComponent - the total of all the components in all
+ // the lists.
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+
+ auto *Clause = new (Mem) OMPUseDeviceAddrClause(Locs, Sizes);
+
+ Clause->setVarRefs(Vars);
+ Clause->setClauseInfo(Declarations, ComponentLists);
+ return Clause;
+}
+
+OMPUseDeviceAddrClause *
+OMPUseDeviceAddrClause::CreateEmpty(const ASTContext &C,
+ const OMPMappableExprListSizeTy &Sizes) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ return new (Mem) OMPUseDeviceAddrClause(Sizes);
+}
+
OMPIsDevicePtrClause *
OMPIsDevicePtrClause::Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
@@ -1184,13 +1369,132 @@ void OMPNontemporalClause::setPrivateRefs(ArrayRef<Expr *> VL) {
std::copy(VL.begin(), VL.end(), varlist_end());
}
+OMPInclusiveClause *OMPInclusiveClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ auto *Clause =
+ new (Mem) OMPInclusiveClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPInclusiveClause *OMPInclusiveClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPInclusiveClause(N);
+}
+
+OMPExclusiveClause *OMPExclusiveClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
+ auto *Clause =
+ new (Mem) OMPExclusiveClause(StartLoc, LParenLoc, EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPExclusiveClause *OMPExclusiveClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPExclusiveClause(N);
+}
+
+void OMPUsesAllocatorsClause::setAllocatorsData(
+ ArrayRef<OMPUsesAllocatorsClause::Data> Data) {
+ assert(Data.size() == NumOfAllocators &&
+ "Size of allocators data is not the same as the preallocated buffer.");
+ for (unsigned I = 0, E = Data.size(); I < E; ++I) {
+ const OMPUsesAllocatorsClause::Data &D = Data[I];
+ getTrailingObjects<Expr *>()[I * static_cast<int>(ExprOffsets::Total) +
+ static_cast<int>(ExprOffsets::Allocator)] =
+ D.Allocator;
+ getTrailingObjects<Expr *>()[I * static_cast<int>(ExprOffsets::Total) +
+ static_cast<int>(
+ ExprOffsets::AllocatorTraits)] =
+ D.AllocatorTraits;
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(ParenLocsOffsets::Total) +
+ static_cast<int>(ParenLocsOffsets::LParen)] =
+ D.LParenLoc;
+ getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(ParenLocsOffsets::Total) +
+ static_cast<int>(ParenLocsOffsets::RParen)] =
+ D.RParenLoc;
+ }
+}
+
+OMPUsesAllocatorsClause::Data
+OMPUsesAllocatorsClause::getAllocatorData(unsigned I) const {
+ OMPUsesAllocatorsClause::Data Data;
+ Data.Allocator =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(ExprOffsets::Total) +
+ static_cast<int>(ExprOffsets::Allocator)];
+ Data.AllocatorTraits =
+ getTrailingObjects<Expr *>()[I * static_cast<int>(ExprOffsets::Total) +
+ static_cast<int>(
+ ExprOffsets::AllocatorTraits)];
+ Data.LParenLoc = getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(ParenLocsOffsets::Total) +
+ static_cast<int>(ParenLocsOffsets::LParen)];
+ Data.RParenLoc = getTrailingObjects<
+ SourceLocation>()[I * static_cast<int>(ParenLocsOffsets::Total) +
+ static_cast<int>(ParenLocsOffsets::RParen)];
+ return Data;
+}
+
+OMPUsesAllocatorsClause *
+OMPUsesAllocatorsClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<OMPUsesAllocatorsClause::Data> Data) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *, SourceLocation>(
+ static_cast<int>(ExprOffsets::Total) * Data.size(),
+ static_cast<int>(ParenLocsOffsets::Total) * Data.size()));
+ auto *Clause = new (Mem)
+ OMPUsesAllocatorsClause(StartLoc, LParenLoc, EndLoc, Data.size());
+ Clause->setAllocatorsData(Data);
+ return Clause;
+}
+
+OMPUsesAllocatorsClause *
+OMPUsesAllocatorsClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *, SourceLocation>(
+ static_cast<int>(ExprOffsets::Total) * N,
+ static_cast<int>(ParenLocsOffsets::Total) * N));
+ return new (Mem) OMPUsesAllocatorsClause(N);
+}
+
+OMPAffinityClause *
+OMPAffinityClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, Expr *Modifier,
+ ArrayRef<Expr *> Locators) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Locators.size() + 1));
+ auto *Clause = new (Mem)
+ OMPAffinityClause(StartLoc, LParenLoc, ColonLoc, EndLoc, Locators.size());
+ Clause->setModifier(Modifier);
+ Clause->setVarRefs(Locators);
+ return Clause;
+}
+
+OMPAffinityClause *OMPAffinityClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + 1));
+ return new (Mem) OMPAffinityClause(N);
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clauses printing methods
//===----------------------------------------------------------------------===//
void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
OS << "if(";
- if (Node->getNameModifier() != llvm::omp::OMPD_unknown)
+ if (Node->getNameModifier() != OMPD_unknown)
OS << getOpenMPDirectiveName(Node->getNameModifier()) << ": ";
Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
@@ -1232,9 +1536,16 @@ void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPDetachClause(OMPDetachClause *Node) {
+ OS << "detach(";
+ Node->getEventHandler()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
OS << "default("
- << getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
+ << getOpenMPSimpleClauseTypeName(OMPC_default,
+ unsigned(Node->getDefaultKind()))
<< ")";
}
@@ -1320,8 +1631,14 @@ void OMPClausePrinter::VisitOMPReadClause(OMPReadClause *) { OS << "read"; }
void OMPClausePrinter::VisitOMPWriteClause(OMPWriteClause *) { OS << "write"; }
-void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *) {
+void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *Node) {
OS << "update";
+ if (Node->isExtended()) {
+ OS << "(";
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
+ Node->getDependencyKind());
+ OS << ")";
+ }
}
void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
@@ -1332,6 +1649,22 @@ void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
OS << "seq_cst";
}
+void OMPClausePrinter::VisitOMPAcqRelClause(OMPAcqRelClause *) {
+ OS << "acq_rel";
+}
+
+void OMPClausePrinter::VisitOMPAcquireClause(OMPAcquireClause *) {
+ OS << "acquire";
+}
+
+void OMPClausePrinter::VisitOMPReleaseClause(OMPReleaseClause *) {
+ OS << "release";
+}
+
+void OMPClausePrinter::VisitOMPRelaxedClause(OMPRelaxedClause *) {
+ OS << "relaxed";
+}
+
void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) {
OS << "threads";
}
@@ -1340,6 +1673,11 @@ void OMPClausePrinter::VisitOMPSIMDClause(OMPSIMDClause *) { OS << "simd"; }
void OMPClausePrinter::VisitOMPDeviceClause(OMPDeviceClause *Node) {
OS << "device(";
+ OpenMPDeviceClauseModifier Modifier = Node->getModifier();
+ if (Modifier != OMPC_DEVICE_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), Modifier)
+ << ": ";
+ }
Node->getDevice()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
@@ -1380,6 +1718,10 @@ void OMPClausePrinter::VisitOMPHintClause(OMPHintClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPDestroyClause(OMPDestroyClause *) {
+ OS << "destroy";
+}
+
template<typename T>
void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
for (typename T::varlist_iterator I = Node->varlist_begin(),
@@ -1453,6 +1795,9 @@ void OMPClausePrinter::VisitOMPSharedClause(OMPSharedClause *Node) {
void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
if (!Node->varlist_empty()) {
OS << "reduction(";
+ if (Node->getModifierLoc().isValid())
+ OS << getOpenMPSimpleClauseTypeName(OMPC_reduction, Node->getModifier())
+ << ", ";
NestedNameSpecifier *QualifierLoc =
Node->getQualifierLoc().getNestedNameSpecifier();
OverloadedOperatorKind OOK =
@@ -1570,8 +1915,18 @@ void OMPClausePrinter::VisitOMPFlushClause(OMPFlushClause *Node) {
}
}
+void OMPClausePrinter::VisitOMPDepobjClause(OMPDepobjClause *Node) {
+ OS << "(";
+ Node->getDepobj()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
OS << "depend(";
+ if (Expr *DepModifier = Node->getModifier()) {
+ DepModifier->printPretty(OS, nullptr, Policy);
+ OS << ", ";
+ }
OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
Node->getDependencyKind());
if (!Node->varlist_empty()) {
@@ -1585,7 +1940,7 @@ void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
if (!Node->varlist_empty()) {
OS << "map(";
if (Node->getMapType() != OMPC_MAP_unknown) {
- for (unsigned I = 0; I < OMPMapClause::NumberOfModifiers; ++I) {
+ for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) {
if (Node->getMapTypeModifier(I) != OMPC_MAP_MODIFIER_unknown) {
OS << getOpenMPSimpleClauseTypeName(OMPC_map,
Node->getMapTypeModifier(I));
@@ -1662,9 +2017,11 @@ void OMPClausePrinter::VisitOMPDefaultmapClause(OMPDefaultmapClause *Node) {
OS << "defaultmap(";
OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
Node->getDefaultmapModifier());
- OS << ": ";
- OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- Node->getDefaultmapKind());
+ if (Node->getDefaultmapKind() != OMPC_DEFAULTMAP_unknown) {
+ OS << ": ";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
+ Node->getDefaultmapKind());
+ }
OS << ")";
}
@@ -1676,6 +2033,15 @@ void OMPClausePrinter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *Node) {
}
}
+void OMPClausePrinter::VisitOMPUseDeviceAddrClause(
+ OMPUseDeviceAddrClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "use_device_addr";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) {
if (!Node->varlist_empty()) {
OS << "is_device_ptr";
@@ -1691,3 +2057,226 @@ void OMPClausePrinter::VisitOMPNontemporalClause(OMPNontemporalClause *Node) {
OS << ")";
}
}
+
+void OMPClausePrinter::VisitOMPOrderClause(OMPOrderClause *Node) {
+ OS << "order(" << getOpenMPSimpleClauseTypeName(OMPC_order, Node->getKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPInclusiveClause(OMPInclusiveClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "inclusive";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPExclusiveClause(OMPExclusiveClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "exclusive";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPUsesAllocatorsClause(
+ OMPUsesAllocatorsClause *Node) {
+ if (Node->getNumberOfAllocators() == 0)
+ return;
+ OS << "uses_allocators(";
+ for (unsigned I = 0, E = Node->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data Data = Node->getAllocatorData(I);
+ Data.Allocator->printPretty(OS, nullptr, Policy);
+ if (Data.AllocatorTraits) {
+ OS << "(";
+ Data.AllocatorTraits->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ }
+ if (I < E - 1)
+ OS << ",";
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPAffinityClause(OMPAffinityClause *Node) {
+ if (Node->varlist_empty())
+ return;
+ OS << "affinity";
+ char StartSym = '(';
+ if (Expr *Modifier = Node->getModifier()) {
+ OS << "(";
+ Modifier->printPretty(OS, nullptr, Policy);
+ OS << " :";
+ StartSym = ' ';
+ }
+ VisitOMPClauseList(Node, StartSym);
+ OS << ")";
+}
+
+void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx,
+ VariantMatchInfo &VMI) const {
+ for (const OMPTraitSet &Set : Sets) {
+ for (const OMPTraitSelector &Selector : Set.Selectors) {
+
+ // User conditions are special as we evaluate the condition here.
+ if (Selector.Kind == TraitSelector::user_condition) {
+ assert(Selector.ScoreOrCondition &&
+ "Ill-formed user condition, expected condition expression!");
+ assert(Selector.Properties.size() == 1 &&
+ Selector.Properties.front().Kind ==
+ TraitProperty::user_condition_unknown &&
+ "Ill-formed user condition, expected unknown trait property!");
+
+ llvm::APSInt CondVal;
+ if (Selector.ScoreOrCondition->isIntegerConstantExpr(CondVal, ASTCtx))
+ VMI.addTrait(CondVal.isNullValue()
+ ? TraitProperty::user_condition_false
+ : TraitProperty::user_condition_true);
+ else
+ VMI.addTrait(TraitProperty::user_condition_false);
+ continue;
+ }
+
+ llvm::APSInt Score;
+ llvm::APInt *ScorePtr = nullptr;
+ if (Selector.ScoreOrCondition) {
+ if (Selector.ScoreOrCondition->isIntegerConstantExpr(Score, ASTCtx))
+ ScorePtr = &Score;
+ else
+ VMI.addTrait(TraitProperty::user_condition_false);
+ }
+
+ for (const OMPTraitProperty &Property : Selector.Properties)
+ VMI.addTrait(Set.Kind, Property.Kind, ScorePtr);
+
+ if (Set.Kind != TraitSet::construct)
+ continue;
+
+ // TODO: This might not hold once we implement SIMD properly.
+ assert(Selector.Properties.size() == 1 &&
+ Selector.Properties.front().Kind ==
+ getOpenMPContextTraitPropertyForSelector(
+ Selector.Kind) &&
+ "Ill-formed construct selector!");
+
+ VMI.ConstructTraits.push_back(Selector.Properties.front().Kind);
+ }
+ }
+}
+
+void OMPTraitInfo::print(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ bool FirstSet = true;
+ for (const OMPTraitSet &Set : Sets) {
+ if (!FirstSet)
+ OS << ", ";
+ FirstSet = false;
+ OS << getOpenMPContextTraitSetName(Set.Kind) << "={";
+
+ bool FirstSelector = true;
+ for (const OMPTraitSelector &Selector : Set.Selectors) {
+ if (!FirstSelector)
+ OS << ", ";
+ FirstSelector = false;
+ OS << getOpenMPContextTraitSelectorName(Selector.Kind);
+
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ isValidTraitSelectorForTraitSet(
+ Selector.Kind, Set.Kind, AllowsTraitScore, RequiresProperty);
+
+ if (!RequiresProperty)
+ continue;
+
+ OS << "(";
+ if (Selector.Kind == TraitSelector::user_condition) {
+ Selector.ScoreOrCondition->printPretty(OS, nullptr, Policy);
+ } else {
+
+ if (Selector.ScoreOrCondition) {
+ OS << "score(";
+ Selector.ScoreOrCondition->printPretty(OS, nullptr, Policy);
+ OS << "): ";
+ }
+
+ bool FirstProperty = true;
+ for (const OMPTraitProperty &Property : Selector.Properties) {
+ if (!FirstProperty)
+ OS << ", ";
+ FirstProperty = false;
+ OS << getOpenMPContextTraitPropertyName(Property.Kind);
+ }
+ }
+ OS << ")";
+ }
+ OS << "}";
+ }
+}
+
+std::string OMPTraitInfo::getMangledName() const {
+ std::string MangledName;
+ llvm::raw_string_ostream OS(MangledName);
+ for (const OMPTraitSet &Set : Sets) {
+ OS << '$' << 'S' << unsigned(Set.Kind);
+ for (const OMPTraitSelector &Selector : Set.Selectors) {
+
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ isValidTraitSelectorForTraitSet(
+ Selector.Kind, Set.Kind, AllowsTraitScore, RequiresProperty);
+ OS << '$' << 's' << unsigned(Selector.Kind);
+
+ if (!RequiresProperty ||
+ Selector.Kind == TraitSelector::user_condition)
+ continue;
+
+ for (const OMPTraitProperty &Property : Selector.Properties)
+ OS << '$' << 'P' << getOpenMPContextTraitPropertyName(Property.Kind);
+ }
+ }
+ return OS.str();
+}
+
+OMPTraitInfo::OMPTraitInfo(StringRef MangledName) {
+ unsigned long U;
+ do {
+ if (!MangledName.consume_front("$S"))
+ break;
+ if (MangledName.consumeInteger(10, U))
+ break;
+ Sets.push_back(OMPTraitSet());
+ OMPTraitSet &Set = Sets.back();
+ Set.Kind = TraitSet(U);
+ do {
+ if (!MangledName.consume_front("$s"))
+ break;
+ if (MangledName.consumeInteger(10, U))
+ break;
+ Set.Selectors.push_back(OMPTraitSelector());
+ OMPTraitSelector &Selector = Set.Selectors.back();
+ Selector.Kind = TraitSelector(U);
+ do {
+ if (!MangledName.consume_front("$P"))
+ break;
+ Selector.Properties.push_back(OMPTraitProperty());
+ OMPTraitProperty &Property = Selector.Properties.back();
+ std::pair<StringRef, StringRef> PropRestPair = MangledName.split('$');
+ Property.Kind =
+ getOpenMPContextTraitPropertyKind(Set.Kind, PropRestPair.first);
+ MangledName = PropRestPair.second;
+ } while (true);
+ } while (true);
+ } while (true);
+}
+
+llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
+ const OMPTraitInfo &TI) {
+ LangOptions LO;
+ PrintingPolicy Policy(LO);
+ TI.print(OS, Policy);
+ return OS;
+}
+llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
+ const OMPTraitInfo *TI) {
+ return TI ? OS << *TI : OS;
+}
diff --git a/clang/lib/AST/ParentMapContext.cpp b/clang/lib/AST/ParentMapContext.cpp
new file mode 100644
index 000000000000..b73b32774b53
--- /dev/null
+++ b/clang/lib/AST/ParentMapContext.cpp
@@ -0,0 +1,321 @@
+//===- ParentMapContext.cpp - Map of parents using DynTypedNode -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Similar to ParentMap.cpp, but generalizes to non-Stmt nodes, which can have
+// multiple parents.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ParentMapContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/TemplateBase.h"
+
+using namespace clang;
+
+ParentMapContext::ParentMapContext(ASTContext &Ctx) : ASTCtx(Ctx) {}
+
+ParentMapContext::~ParentMapContext() = default;
+
+void ParentMapContext::clear() { Parents.reset(); }
+
+const Expr *ParentMapContext::traverseIgnored(const Expr *E) const {
+ return traverseIgnored(const_cast<Expr *>(E));
+}
+
+Expr *ParentMapContext::traverseIgnored(Expr *E) const {
+ if (!E)
+ return nullptr;
+
+ switch (Traversal) {
+ case TK_AsIs:
+ return E;
+ case TK_IgnoreImplicitCastsAndParentheses:
+ return E->IgnoreParenImpCasts();
+ case TK_IgnoreUnlessSpelledInSource:
+ return E->IgnoreUnlessSpelledInSource();
+ }
+ llvm_unreachable("Invalid Traversal type!");
+}
+
+DynTypedNode ParentMapContext::traverseIgnored(const DynTypedNode &N) const {
+ if (const auto *E = N.get<Expr>()) {
+ return DynTypedNode::create(*traverseIgnored(E));
+ }
+ return N;
+}
+
+class ParentMapContext::ParentMap {
+ /// Contains parents of a node.
+ using ParentVector = llvm::SmallVector<DynTypedNode, 2>;
+
+ /// Maps from a node to its parents. This is used for nodes that have
+ /// pointer identity only, which are more common and we can save space by
+ /// only storing a unique pointer to them.
+ using ParentMapPointers =
+ llvm::DenseMap<const void *,
+ llvm::PointerUnion<const Decl *, const Stmt *,
+ DynTypedNode *, ParentVector *>>;
+
+ /// Parent map for nodes without pointer identity. We store a full
+ /// DynTypedNode for all keys.
+ using ParentMapOtherNodes =
+ llvm::DenseMap<DynTypedNode,
+ llvm::PointerUnion<const Decl *, const Stmt *,
+ DynTypedNode *, ParentVector *>>;
+
+ ParentMapPointers PointerParents;
+ ParentMapOtherNodes OtherParents;
+ class ASTVisitor;
+
+ static DynTypedNode
+ getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
+ if (const auto *D = U.dyn_cast<const Decl *>())
+ return DynTypedNode::create(*D);
+ if (const auto *S = U.dyn_cast<const Stmt *>())
+ return DynTypedNode::create(*S);
+ return *U.get<DynTypedNode *>();
+ }
+
+ template <typename NodeTy, typename MapTy>
+ static DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
+ const MapTy &Map) {
+ auto I = Map.find(Node);
+ if (I == Map.end()) {
+ return llvm::ArrayRef<DynTypedNode>();
+ }
+ if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
+ return llvm::makeArrayRef(*V);
+ }
+ return getSingleDynTypedNodeFromParentMap(I->second);
+ }
+
+public:
+ ParentMap(ASTContext &Ctx);
+ ~ParentMap() {
+ for (const auto &Entry : PointerParents) {
+ if (Entry.second.is<DynTypedNode *>()) {
+ delete Entry.second.get<DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
+ }
+ for (const auto &Entry : OtherParents) {
+ if (Entry.second.is<DynTypedNode *>()) {
+ delete Entry.second.get<DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
+ }
+ }
+
+ DynTypedNodeList getParents(TraversalKind TK, const DynTypedNode &Node) {
+ if (Node.getNodeKind().hasPointerIdentity()) {
+ auto ParentList =
+ getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
+ if (ParentList.size() == 1 && TK == TK_IgnoreUnlessSpelledInSource) {
+ const auto *E = ParentList[0].get<Expr>();
+ const auto *Child = Node.get<Expr>();
+ if (E && Child)
+ return AscendIgnoreUnlessSpelledInSource(E, Child);
+ }
+ return ParentList;
+ }
+ return getDynNodeFromMap(Node, OtherParents);
+ }
+
+ DynTypedNodeList AscendIgnoreUnlessSpelledInSource(const Expr *E,
+ const Expr *Child) {
+
+ auto ShouldSkip = [](const Expr *E, const Expr *Child) {
+ if (isa<ImplicitCastExpr>(E))
+ return true;
+
+ if (isa<FullExpr>(E))
+ return true;
+
+ if (isa<MaterializeTemporaryExpr>(E))
+ return true;
+
+ if (isa<CXXBindTemporaryExpr>(E))
+ return true;
+
+ if (isa<ParenExpr>(E))
+ return true;
+
+ if (isa<ExprWithCleanups>(E))
+ return true;
+
+ auto SR = Child->getSourceRange();
+
+ if (const auto *C = dyn_cast<CXXConstructExpr>(E)) {
+ if (C->getSourceRange() == SR || !isa<CXXTemporaryObjectExpr>(C))
+ return true;
+ }
+
+ if (const auto *C = dyn_cast<CXXMemberCallExpr>(E)) {
+ if (C->getSourceRange() == SR)
+ return true;
+ }
+
+ if (const auto *C = dyn_cast<MemberExpr>(E)) {
+ if (C->getSourceRange() == SR)
+ return true;
+ }
+ return false;
+ };
+
+ while (ShouldSkip(E, Child)) {
+ auto It = PointerParents.find(E);
+ if (It == PointerParents.end())
+ break;
+ const auto *S = It->second.dyn_cast<const Stmt *>();
+ if (!S) {
+ if (auto *Vec = It->second.dyn_cast<ParentVector *>())
+ return llvm::makeArrayRef(*Vec);
+ return getSingleDynTypedNodeFromParentMap(It->second);
+ }
+ const auto *P = dyn_cast<Expr>(S);
+ if (!P)
+ return DynTypedNode::create(*S);
+ Child = E;
+ E = P;
+ }
+ return DynTypedNode::create(*E);
+ }
+};
+
+/// Template specializations to abstract away from pointers and TypeLocs.
+/// @{
+template <typename T> static DynTypedNode createDynTypedNode(const T &Node) {
+ return DynTypedNode::create(*Node);
+}
+template <> DynTypedNode createDynTypedNode(const TypeLoc &Node) {
+ return DynTypedNode::create(Node);
+}
+template <>
+DynTypedNode createDynTypedNode(const NestedNameSpecifierLoc &Node) {
+ return DynTypedNode::create(Node);
+}
+/// @}
+
+/// A \c RecursiveASTVisitor that builds a map from nodes to their
+/// parents as defined by the \c RecursiveASTVisitor.
+///
+/// Note that the relationship described here is purely in terms of AST
+/// traversal - there are other relationships (for example declaration context)
+/// in the AST that are better modeled by special matchers.
+///
+/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
+class ParentMapContext::ParentMap::ASTVisitor
+ : public RecursiveASTVisitor<ASTVisitor> {
+public:
+ ASTVisitor(ParentMap &Map) : Map(Map) {}
+
+private:
+ friend class RecursiveASTVisitor<ASTVisitor>;
+
+ using VisitorBase = RecursiveASTVisitor<ASTVisitor>;
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+
+ bool shouldVisitImplicitCode() const { return true; }
+
+ template <typename T, typename MapNodeTy, typename BaseTraverseFn,
+ typename MapTy>
+ bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
+ MapTy *Parents) {
+ if (!Node)
+ return true;
+ if (ParentStack.size() > 0) {
+ // FIXME: Currently we add the same parent multiple times, but only
+ // when no memoization data is available for the type.
+ // For example when we visit all subexpressions of template
+ // instantiations; this is suboptimal, but benign: the only way to
+ // visit those is with hasAncestor / hasParent, and those do not create
+ // new matches.
+ // The plan is to enable DynTypedNode to be storable in a map or hash
+ // map. The main problem there is to implement hash functions /
+ // comparison operators for all types that DynTypedNode supports that
+ // do not have pointer identity.
+ auto &NodeOrVector = (*Parents)[MapNode];
+ if (NodeOrVector.isNull()) {
+ if (const auto *D = ParentStack.back().get<Decl>())
+ NodeOrVector = D;
+ else if (const auto *S = ParentStack.back().get<Stmt>())
+ NodeOrVector = S;
+ else
+ NodeOrVector = new DynTypedNode(ParentStack.back());
+ } else {
+ if (!NodeOrVector.template is<ParentVector *>()) {
+ auto *Vector = new ParentVector(
+ 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
+ delete NodeOrVector.template dyn_cast<DynTypedNode *>();
+ NodeOrVector = Vector;
+ }
+
+ auto *Vector = NodeOrVector.template get<ParentVector *>();
+ // Skip duplicates for types that have memoization data.
+ // We must check that the type has memoization data before calling
+ // std::find() because DynTypedNode::operator== can't compare all
+ // types.
+ bool Found = ParentStack.back().getMemoizationData() &&
+ std::find(Vector->begin(), Vector->end(),
+ ParentStack.back()) != Vector->end();
+ if (!Found)
+ Vector->push_back(ParentStack.back());
+ }
+ }
+ ParentStack.push_back(createDynTypedNode(Node));
+ bool Result = BaseTraverse();
+ ParentStack.pop_back();
+ return Result;
+ }
+
+ bool TraverseDecl(Decl *DeclNode) {
+ return TraverseNode(
+ DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
+ &Map.PointerParents);
+ }
+
+ bool TraverseStmt(Stmt *StmtNode) {
+ return TraverseNode(StmtNode, StmtNode,
+ [&] { return VisitorBase::TraverseStmt(StmtNode); },
+ &Map.PointerParents);
+ }
+
+ bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ return TraverseNode(
+ TypeLocNode, DynTypedNode::create(TypeLocNode),
+ [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
+ &Map.OtherParents);
+ }
+
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
+ return TraverseNode(
+ NNSLocNode, DynTypedNode::create(NNSLocNode),
+ [&] { return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode); },
+ &Map.OtherParents);
+ }
+
+ ParentMap &Map;
+ llvm::SmallVector<DynTypedNode, 16> ParentStack;
+};
+
+ParentMapContext::ParentMap::ParentMap(ASTContext &Ctx) {
+ ASTVisitor(*this).TraverseAST(Ctx);
+}
+
+DynTypedNodeList ParentMapContext::getParents(const DynTypedNode &Node) {
+ if (!Parents)
+ // We build the parent map for the traversal scope (usually whole TU), as
+ // hasAncestor can escape any subtree.
+ Parents = std::make_unique<ParentMap>(ASTCtx);
+ return Parents->getParents(getTraversalKind(), Node);
+}
diff --git a/clang/lib/AST/PrintfFormatString.cpp b/clang/lib/AST/PrintfFormatString.cpp
index bae60d464407..f3ac181214ac 100644
--- a/clang/lib/AST/PrintfFormatString.cpp
+++ b/clang/lib/AST/PrintfFormatString.cpp
@@ -11,10 +11,11 @@
//
//===----------------------------------------------------------------------===//
+#include "FormatStringParsing.h"
#include "clang/AST/FormatString.h"
#include "clang/AST/OSLog.h"
-#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/Regex.h"
using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
@@ -751,6 +752,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
case BuiltinType::UInt128:
case BuiltinType::Int128:
case BuiltinType::Half:
+ case BuiltinType::BFloat16:
case BuiltinType::Float16:
case BuiltinType::Float128:
case BuiltinType::ShortAccum:
diff --git a/clang/lib/AST/RawCommentList.cpp b/clang/lib/AST/RawCommentList.cpp
index 83e8a0b942a4..a8d15036cab9 100644
--- a/clang/lib/AST/RawCommentList.cpp
+++ b/clang/lib/AST/RawCommentList.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/CommentSema.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Allocator.h"
using namespace clang;
@@ -430,7 +431,7 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
};
auto DropTrailingNewLines = [](std::string &Str) {
- while (Str.back() == '\n')
+ while (!Str.empty() && Str.back() == '\n')
Str.pop_back();
};
diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp
index 9a21732b63e3..d56c7e2ab8c0 100644
--- a/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/Format.h"
@@ -1186,11 +1187,10 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
// Query the external layout to see if it provides an offset.
bool HasExternalLayout = false;
if (UseExternalLayout) {
- // FIXME: This appears to be reversed.
if (Base->IsVirtual)
- HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset);
- else
HasExternalLayout = External.getExternalVBaseOffset(Base->Class, Offset);
+ else
+ HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset);
}
// Clang <= 6 incorrectly applied the 'packed' attribute to base classes.
@@ -2107,7 +2107,7 @@ static const CXXMethodDecl *computeKeyFunction(ASTContext &Context,
if (MD->isImplicit())
continue;
- if (MD->isInlineSpecified())
+ if (MD->isInlineSpecified() || MD->isConstexpr())
continue;
if (MD->hasInlineBody())
@@ -2568,9 +2568,11 @@ MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) {
// information about the bases, such as required alignment and the presence of
// zero sized members.
const ASTRecordLayout *PreviousBaseLayout = nullptr;
+ bool HasPolymorphicBaseClass = false;
// Iterate through the bases and lay out the non-virtual ones.
for (const CXXBaseSpecifier &Base : RD->bases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ HasPolymorphicBaseClass |= BaseDecl->isPolymorphic();
const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
// Mark and skip virtual bases.
if (Base.isVirtual()) {
@@ -2594,11 +2596,23 @@ MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) {
layoutNonVirtualBase(RD, BaseDecl, BaseLayout, PreviousBaseLayout);
}
// Figure out if we need a fresh VFPtr for this class.
- if (!PrimaryBase && RD->isDynamicClass())
- for (CXXRecordDecl::method_iterator i = RD->method_begin(),
- e = RD->method_end();
- !HasOwnVFPtr && i != e; ++i)
- HasOwnVFPtr = i->isVirtual() && i->size_overridden_methods() == 0;
+ if (RD->isPolymorphic()) {
+ if (!HasPolymorphicBaseClass)
+ // This class introduces polymorphism, so we need a vftable to store the
+ // RTTI information.
+ HasOwnVFPtr = true;
+ else if (!PrimaryBase) {
+ // We have a polymorphic base class but can't extend its vftable. Add a
+ // new vfptr if we would use any vftable slots.
+ for (CXXMethodDecl *M : RD->methods()) {
+ if (MicrosoftVTableContext::hasVtableSlot(M) &&
+ M->size_overridden_methods() == 0) {
+ HasOwnVFPtr = true;
+ break;
+ }
+ }
+ }
+ }
// If we don't have a primary base then we have a leading object that could
// itself lead with a zero-sized object, something we track.
bool CheckLeadingLayout = !PrimaryBase;
@@ -2993,7 +3007,8 @@ void MicrosoftRecordLayoutBuilder::computeVtorDispSet(
llvm::SmallPtrSet<const CXXRecordDecl *, 2> BasesWithOverriddenMethods;
// Seed the working set with our non-destructor, non-pure virtual methods.
for (const CXXMethodDecl *MD : RD->methods())
- if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD) && !MD->isPure())
+ if (MicrosoftVTableContext::hasVtableSlot(MD) &&
+ !isa<CXXDestructorDecl>(MD) && !MD->isPure())
Work.insert(MD);
while (!Work.empty()) {
const CXXMethodDecl *MD = *Work.begin();
@@ -3222,7 +3237,8 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
if (D->hasExternalLexicalStorage() && !D->getDefinition())
getExternalSource()->CompleteType(const_cast<ObjCInterfaceDecl*>(D));
D = D->getDefinition();
- assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!");
+ assert(D && !D->isInvalidDecl() && D->isThisDeclarationADefinition() &&
+ "Invalid interface decl!");
// Look up this layout, if already laid out, return what we have.
const ObjCContainerDecl *Key =
diff --git a/clang/lib/AST/Stmt.cpp b/clang/lib/AST/Stmt.cpp
index b6e4d8aff21e..25e685be3e9b 100644
--- a/clang/lib/AST/Stmt.cpp
+++ b/clang/lib/AST/Stmt.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclGroup.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
@@ -272,7 +273,6 @@ SourceRange Stmt::getSourceRange() const {
}
SourceLocation Stmt::getBeginLoc() const {
- // llvm::errs() << "getBeginLoc() for " << getStmtClassName() << "\n";
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
@@ -456,7 +456,7 @@ void GCCAsmStmt::setInputExpr(unsigned i, Expr *E) {
}
AddrLabelExpr *GCCAsmStmt::getLabelExpr(unsigned i) const {
- return cast<AddrLabelExpr>(Exprs[i + NumInputs]);
+ return cast<AddrLabelExpr>(Exprs[i + NumOutputs + NumInputs]);
}
StringRef GCCAsmStmt::getLabelName(unsigned i) const {
@@ -522,7 +522,7 @@ int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const {
for (unsigned i = 0, e = getNumLabels(); i != e; ++i)
if (getLabelName(i) == SymbolicName)
- return i + getNumInputs();
+ return i + getNumOutputs() + getNumInputs();
// Not found.
return -1;
@@ -731,7 +731,7 @@ std::string GCCAsmStmt::generateAsmString(const ASTContext &C) const {
/// Assemble final IR asm string (MS-style).
std::string MSAsmStmt::generateAsmString(const ASTContext &C) const {
// FIXME: This needs to be translated into the IR string representation.
- return AsmStr;
+ return std::string(AsmStr);
}
Expr *MSAsmStmt::getOutputExpr(unsigned i) {
@@ -1012,7 +1012,8 @@ void SwitchStmt::setConditionVariable(const ASTContext &Ctx, VarDecl *V) {
}
WhileStmt::WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
- Stmt *Body, SourceLocation WL)
+ Stmt *Body, SourceLocation WL, SourceLocation LParenLoc,
+ SourceLocation RParenLoc)
: Stmt(WhileStmtClass) {
bool HasVar = Var != nullptr;
WhileStmtBits.HasVar = HasVar;
@@ -1023,6 +1024,8 @@ WhileStmt::WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
setConditionVariable(Ctx, Var);
setWhileLoc(WL);
+ setLParenLoc(LParenLoc);
+ setRParenLoc(RParenLoc);
}
WhileStmt::WhileStmt(EmptyShell Empty, bool HasVar)
@@ -1031,12 +1034,14 @@ WhileStmt::WhileStmt(EmptyShell Empty, bool HasVar)
}
WhileStmt *WhileStmt::Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
- Stmt *Body, SourceLocation WL) {
+ Stmt *Body, SourceLocation WL,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
bool HasVar = Var != nullptr;
void *Mem =
Ctx.Allocate(totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasVar),
alignof(WhileStmt));
- return new (Mem) WhileStmt(Ctx, Var, Cond, Body, WL);
+ return new (Mem) WhileStmt(Ctx, Var, Cond, Body, WL, LParenLoc, RParenLoc);
}
WhileStmt *WhileStmt::CreateEmpty(const ASTContext &Ctx, bool HasVar) {
diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp
index da6d308ad15b..788fac789270 100644
--- a/clang/lib/AST/StmtOpenMP.cpp
+++ b/clang/lib/AST/StmtOpenMP.cpp
@@ -10,9 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/StmtOpenMP.h"
-
#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtOpenMP.h"
using namespace clang;
using namespace llvm::omp;
@@ -162,7 +161,8 @@ void OMPLoopDirective::setFinalsConditions(ArrayRef<Expr *> A) {
OMPParallelDirective *OMPParallelDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPParallelDirective), alignof(OMPClause *));
void *Mem =
@@ -171,6 +171,7 @@ OMPParallelDirective *OMPParallelDirective::Create(
new (Mem) OMPParallelDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -228,11 +229,10 @@ OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
}
-OMPForDirective *
-OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation EndLoc, unsigned CollapsedNum,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+OMPForDirective *OMPForDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
unsigned Size = llvm::alignTo(sizeof(OMPForDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
@@ -265,6 +265,7 @@ OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -335,7 +336,8 @@ OMPForSimdDirective *OMPForSimdDirective::CreateEmpty(const ASTContext &C,
OMPSectionsDirective *OMPSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPSectionsDirective), alignof(OMPClause *));
void *Mem =
@@ -344,6 +346,7 @@ OMPSectionsDirective *OMPSectionsDirective::Create(
new (Mem) OMPSectionsDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -450,7 +453,7 @@ OMPCriticalDirective *OMPCriticalDirective::CreateEmpty(const ASTContext &C,
OMPParallelForDirective *OMPParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPParallelForDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
@@ -484,6 +487,7 @@ OMPParallelForDirective *OMPParallelForDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -553,7 +557,7 @@ OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
OMPParallelMasterDirective *OMPParallelMasterDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef) {
unsigned Size =
llvm::alignTo(sizeof(OMPParallelMasterDirective), alignof(OMPClause *));
void *Mem =
@@ -562,6 +566,7 @@ OMPParallelMasterDirective *OMPParallelMasterDirective::Create(
new (Mem) OMPParallelMasterDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
return Dir;
}
@@ -577,7 +582,8 @@ OMPParallelMasterDirective *OMPParallelMasterDirective::CreateEmpty(const ASTCon
OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPParallelSectionsDirective), alignof(OMPClause *));
void *Mem =
@@ -586,6 +592,7 @@ OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
new (Mem) OMPParallelSectionsDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -759,6 +766,50 @@ OMPFlushDirective *OMPFlushDirective::CreateEmpty(const ASTContext &C,
return new (Mem) OMPFlushDirective(NumClauses);
}
+OMPDepobjDirective *OMPDepobjDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses) {
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPDepobjDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size(),
+ alignof(OMPDepobjDirective));
+ auto *Dir = new (Mem) OMPDepobjDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ return Dir;
+}
+
+OMPDepobjDirective *OMPDepobjDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPDepobjDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses,
+ alignof(OMPDepobjDirective));
+ return new (Mem) OMPDepobjDirective(NumClauses);
+}
+
+OMPScanDirective *OMPScanDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses) {
+ unsigned Size = llvm::alignTo(sizeof(OMPScanDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size(),
+ alignof(OMPScanDirective));
+ auto *Dir = new (Mem) OMPScanDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ return Dir;
+}
+
+OMPScanDirective *OMPScanDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ unsigned Size = llvm::alignTo(sizeof(OMPScanDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses,
+ alignof(OMPScanDirective));
+ return new (Mem) OMPScanDirective(NumClauses);
+}
+
OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
@@ -844,7 +895,8 @@ OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
OMPTargetParallelDirective *OMPTargetParallelDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
+ bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPTargetParallelDirective), alignof(OMPClause *));
void *Mem =
@@ -853,6 +905,8 @@ OMPTargetParallelDirective *OMPTargetParallelDirective::Create(
new (Mem) OMPTargetParallelDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
+ Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -869,7 +923,7 @@ OMPTargetParallelDirective::CreateEmpty(const ASTContext &C,
OMPTargetParallelForDirective *OMPTargetParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForDirective),
alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -903,6 +957,7 @@ OMPTargetParallelForDirective *OMPTargetParallelForDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -1014,7 +1069,7 @@ OMPTeamsDirective *OMPTeamsDirective::CreateEmpty(const ASTContext &C,
OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
+ const HelperExprs &Exprs, bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPTaskLoopDirective), alignof(OMPClause *));
void *Mem =
@@ -1048,6 +1103,7 @@ OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -1117,7 +1173,7 @@ OMPTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
OMPMasterTaskLoopDirective *OMPMasterTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
+ const HelperExprs &Exprs, bool HasCancel) {
unsigned Size =
llvm::alignTo(sizeof(OMPMasterTaskLoopDirective), alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -1151,6 +1207,7 @@ OMPMasterTaskLoopDirective *OMPMasterTaskLoopDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -1223,7 +1280,7 @@ OMPMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
OMPParallelMasterTaskLoopDirective *OMPParallelMasterTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
+ const HelperExprs &Exprs, bool HasCancel) {
unsigned Size = llvm::alignTo(sizeof(OMPParallelMasterTaskLoopDirective),
alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -1258,6 +1315,7 @@ OMPParallelMasterTaskLoopDirective *OMPParallelMasterTaskLoopDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -1410,7 +1468,7 @@ OMPTargetUpdateDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForDirective),
alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -1459,6 +1517,7 @@ OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -1885,7 +1944,7 @@ OMPTeamsDistributeParallelForDirective *
OMPTeamsDistributeParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForDirective),
alignof(OMPClause *));
void *Mem = C.Allocate(
@@ -1934,6 +1993,7 @@ OMPTeamsDistributeParallelForDirective::Create(
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -2037,7 +2097,7 @@ OMPTargetTeamsDistributeParallelForDirective *
OMPTargetTeamsDistributeParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
auto Size =
llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForDirective),
alignof(OMPClause *));
@@ -2088,6 +2148,7 @@ OMPTargetTeamsDistributeParallelForDirective::Create(
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->HasCancel = HasCancel;
return Dir;
}
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index c14bb886bb11..f797f5fe8e6d 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -75,14 +75,11 @@ namespace {
public:
StmtPrinter(raw_ostream &os, PrinterHelper *helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
- StringRef NL = "\n",
- const ASTContext *Context = nullptr)
+ StringRef NL = "\n", const ASTContext *Context = nullptr)
: OS(os), IndentLevel(Indentation), Helper(helper), Policy(Policy),
NL(NL), Context(Context) {}
- void PrintStmt(Stmt *S) {
- PrintStmt(S, Policy.Indentation);
- }
+ void PrintStmt(Stmt *S) { PrintStmt(S, Policy.Indentation); }
void PrintStmt(Stmt *S, int SubIndent) {
IndentLevel += SubIndent;
@@ -756,6 +753,16 @@ void StmtPrinter::VisitOMPFlushDirective(OMPFlushDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPDepobjDirective(OMPDepobjDirective *Node) {
+ Indent() << "#pragma omp depobj";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPScanDirective(OMPScanDirective *Node) {
+ Indent() << "#pragma omp scan";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPOrderedDirective(OMPOrderedDirective *Node) {
Indent() << "#pragma omp ordered";
PrintOMPExecutableDirective(Node, Node->hasClausesOfKind<OMPDependClause>());
@@ -1274,29 +1281,20 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
OS << ")";
}
-void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
- switch(Node->getKind()) {
- case UETT_SizeOf:
- OS << "sizeof";
- break;
- case UETT_AlignOf:
+void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(
+ UnaryExprOrTypeTraitExpr *Node) {
+ const char *Spelling = getTraitSpelling(Node->getKind());
+ if (Node->getKind() == UETT_AlignOf) {
if (Policy.Alignof)
- OS << "alignof";
+ Spelling = "alignof";
else if (Policy.UnderscoreAlignof)
- OS << "_Alignof";
+ Spelling = "_Alignof";
else
- OS << "__alignof";
- break;
- case UETT_PreferredAlignOf:
- OS << "__alignof";
- break;
- case UETT_VecStep:
- OS << "vec_step";
- break;
- case UETT_OpenMPRequiredSimdAlign:
- OS << "__builtin_omp_required_simd_align";
- break;
+ Spelling = "__alignof";
}
+
+ OS << Spelling;
+
if (Node->isArgumentType()) {
OS << '(';
Node->getArgumentType().print(OS, Policy);
@@ -1330,19 +1328,65 @@ void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
OS << "]";
}
+void StmtPrinter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << "[";
+ PrintExpr(Node->getRowIdx());
+ OS << "]";
+ OS << "[";
+ PrintExpr(Node->getColumnIdx());
+ OS << "]";
+}
+
void StmtPrinter::VisitOMPArraySectionExpr(OMPArraySectionExpr *Node) {
PrintExpr(Node->getBase());
OS << "[";
if (Node->getLowerBound())
PrintExpr(Node->getLowerBound());
- if (Node->getColonLoc().isValid()) {
+ if (Node->getColonLocFirst().isValid()) {
OS << ":";
if (Node->getLength())
PrintExpr(Node->getLength());
}
+ if (Node->getColonLocSecond().isValid()) {
+ OS << ":";
+ if (Node->getStride())
+ PrintExpr(Node->getStride());
+ }
OS << "]";
}
+void StmtPrinter::VisitOMPArrayShapingExpr(OMPArrayShapingExpr *Node) {
+ OS << "(";
+ for (Expr *E : Node->getDimensions()) {
+ OS << "[";
+ PrintExpr(E);
+ OS << "]";
+ }
+ OS << ")";
+ PrintExpr(Node->getBase());
+}
+
+void StmtPrinter::VisitOMPIteratorExpr(OMPIteratorExpr *Node) {
+ OS << "iterator(";
+ for (unsigned I = 0, E = Node->numOfIterators(); I < E; ++I) {
+ auto *VD = cast<ValueDecl>(Node->getIteratorDecl(I));
+ VD->getType().print(OS, Policy);
+ const OMPIteratorExpr::IteratorRange Range = Node->getIteratorRange(I);
+ OS << " " << VD->getName() << " = ";
+ PrintExpr(Range.Begin);
+ OS << ":";
+ PrintExpr(Range.End);
+ if (Range.Step) {
+ OS << ":";
+ PrintExpr(Range.Step);
+ }
+ if (I < E - 1)
+ OS << ", ";
+ }
+ OS << ")";
+}
+
void StmtPrinter::PrintCallArgs(CallExpr *Call) {
for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) {
if (isa<CXXDefaultArgExpr>(Call->getArg(i))) {
@@ -1750,6 +1794,10 @@ void StmtPrinter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *Node) {
OS << ")";
}
+void StmtPrinter::VisitCXXAddrspaceCastExpr(CXXAddrspaceCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
OS << "typeid(";
if (Node->isTypeOperand()) {
@@ -2008,7 +2056,7 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
if (Policy.TerseOutput)
OS << "{}";
else
- PrintRawCompoundStmt(Node->getBody());
+ PrintRawCompoundStmt(Node->getCompoundStmtBody());
}
void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
@@ -2160,37 +2208,8 @@ void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
printTemplateArgumentList(OS, Node->template_arguments(), Policy);
}
-static const char *getTypeTraitName(TypeTrait TT) {
- switch (TT) {
-#define TYPE_TRAIT_1(Spelling, Name, Key) \
-case clang::UTT_##Name: return #Spelling;
-#define TYPE_TRAIT_2(Spelling, Name, Key) \
-case clang::BTT_##Name: return #Spelling;
-#define TYPE_TRAIT_N(Spelling, Name, Key) \
- case clang::TT_##Name: return #Spelling;
-#include "clang/Basic/TokenKinds.def"
- }
- llvm_unreachable("Type trait not covered by switch");
-}
-
-static const char *getTypeTraitName(ArrayTypeTrait ATT) {
- switch (ATT) {
- case ATT_ArrayRank: return "__array_rank";
- case ATT_ArrayExtent: return "__array_extent";
- }
- llvm_unreachable("Array type trait not covered by switch");
-}
-
-static const char *getExpressionTraitName(ExpressionTrait ET) {
- switch (ET) {
- case ET_IsLValueExpr: return "__is_lvalue_expr";
- case ET_IsRValueExpr: return "__is_rvalue_expr";
- }
- llvm_unreachable("Expression type trait not covered by switch");
-}
-
void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
- OS << getTypeTraitName(E->getTrait()) << "(";
+ OS << getTraitSpelling(E->getTrait()) << "(";
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
if (I > 0)
OS << ", ";
@@ -2200,13 +2219,13 @@ void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
}
void StmtPrinter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
- OS << getTypeTraitName(E->getTrait()) << '(';
+ OS << getTraitSpelling(E->getTrait()) << '(';
E->getQueriedType().print(OS, Policy);
OS << ')';
}
void StmtPrinter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
- OS << getExpressionTraitName(E->getTrait()) << '(';
+ OS << getTraitSpelling(E->getTrait()) << '(';
PrintExpr(E->getQueriedExpression());
OS << ')';
}
@@ -2269,6 +2288,60 @@ void StmtPrinter::VisitConceptSpecializationExpr(ConceptSpecializationExpr *E) {
Policy);
}
+void StmtPrinter::VisitRequiresExpr(RequiresExpr *E) {
+ OS << "requires ";
+ auto LocalParameters = E->getLocalParameters();
+ if (!LocalParameters.empty()) {
+ OS << "(";
+ for (ParmVarDecl *LocalParam : LocalParameters) {
+ PrintRawDecl(LocalParam);
+ if (LocalParam != LocalParameters.back())
+ OS << ", ";
+ }
+
+ OS << ") ";
+ }
+ OS << "{ ";
+ auto Requirements = E->getRequirements();
+ for (concepts::Requirement *Req : Requirements) {
+ if (auto *TypeReq = dyn_cast<concepts::TypeRequirement>(Req)) {
+ if (TypeReq->isSubstitutionFailure())
+ OS << "<<error-type>>";
+ else
+ TypeReq->getType()->getType().print(OS, Policy);
+ } else if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(Req)) {
+ if (ExprReq->isCompound())
+ OS << "{ ";
+ if (ExprReq->isExprSubstitutionFailure())
+ OS << "<<error-expression>>";
+ else
+ PrintExpr(ExprReq->getExpr());
+ if (ExprReq->isCompound()) {
+ OS << " }";
+ if (ExprReq->getNoexceptLoc().isValid())
+ OS << " noexcept";
+ const auto &RetReq = ExprReq->getReturnTypeRequirement();
+ if (!RetReq.isEmpty()) {
+ OS << " -> ";
+ if (RetReq.isSubstitutionFailure())
+ OS << "<<error-type>>";
+ else if (RetReq.isTypeConstraint())
+ RetReq.getTypeConstraint()->print(OS, Policy);
+ }
+ }
+ } else {
+ auto *NestedReq = cast<concepts::NestedRequirement>(Req);
+ OS << "requires ";
+ if (NestedReq->isSubstitutionFailure())
+ OS << "<<error-expression>>";
+ else
+ PrintExpr(NestedReq->getConstraintExpr());
+ }
+ OS << "; ";
+ }
+ OS << "}";
+}
+
// C++ Coroutines TS
void StmtPrinter::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) {
@@ -2445,6 +2518,17 @@ void StmtPrinter::VisitTypoExpr(TypoExpr *Node) {
llvm_unreachable("Cannot print TypoExpr nodes");
}
+void StmtPrinter::VisitRecoveryExpr(RecoveryExpr *Node) {
+ OS << "<recovery-expr>(";
+ const char *Sep = "";
+ for (Expr *E : Node->subExpressions()) {
+ OS << Sep;
+ PrintExpr(E);
+ Sep = ", ";
+ }
+ OS << ')';
+}
+
void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
OS << "__builtin_astype(";
PrintExpr(Node->getSrcExpr());
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 2aa5106e90fa..bf3b43b816f1 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ODRHash.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/FoldingSet.h"
using namespace clang;
@@ -413,9 +414,8 @@ class OMPClauseProfiler : public ConstOMPClauseVisitor<OMPClauseProfiler> {
public:
OMPClauseProfiler(StmtProfiler *P) : Profiler(P) { }
-#define OPENMP_CLAUSE(Name, Class) \
- void Visit##Class(const Class *C);
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(const Class *C);
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
void VistOMPClauseWithPreInit(const OMPClauseWithPreInit *C);
void VistOMPClauseWithPostUpdate(const OMPClauseWithPostUpdate *C);
};
@@ -471,6 +471,11 @@ void OMPClauseProfiler::VisitOMPCollapseClause(const OMPCollapseClause *C) {
Profiler->VisitStmt(C->getNumForLoops());
}
+void OMPClauseProfiler::VisitOMPDetachClause(const OMPDetachClause *C) {
+ if (Expr *Evt = C->getEventHandler())
+ Profiler->VisitStmt(Evt);
+}
+
void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { }
@@ -517,12 +522,22 @@ void OMPClauseProfiler::VisitOMPCaptureClause(const OMPCaptureClause *) {}
void OMPClauseProfiler::VisitOMPSeqCstClause(const OMPSeqCstClause *) {}
+void OMPClauseProfiler::VisitOMPAcqRelClause(const OMPAcqRelClause *) {}
+
+void OMPClauseProfiler::VisitOMPAcquireClause(const OMPAcquireClause *) {}
+
+void OMPClauseProfiler::VisitOMPReleaseClause(const OMPReleaseClause *) {}
+
+void OMPClauseProfiler::VisitOMPRelaxedClause(const OMPRelaxedClause *) {}
+
void OMPClauseProfiler::VisitOMPThreadsClause(const OMPThreadsClause *) {}
void OMPClauseProfiler::VisitOMPSIMDClause(const OMPSIMDClause *) {}
void OMPClauseProfiler::VisitOMPNogroupClause(const OMPNogroupClause *) {}
+void OMPClauseProfiler::VisitOMPDestroyClause(const OMPDestroyClause *) {}
+
template<typename T>
void OMPClauseProfiler::VisitOMPClauseList(T *Node) {
for (auto *E : Node->varlists()) {
@@ -594,6 +609,20 @@ void OMPClauseProfiler::VisitOMPReductionClause(
if (E)
Profiler->VisitStmt(E);
}
+ if (C->getModifier() == clang::OMPC_REDUCTION_inscan) {
+ for (auto *E : C->copy_ops()) {
+ if (E)
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->copy_array_temps()) {
+ if (E)
+ Profiler->VisitStmt(E);
+ }
+ for (auto *E : C->copy_array_elems()) {
+ if (E)
+ Profiler->VisitStmt(E);
+ }
+ }
}
void OMPClauseProfiler::VisitOMPTaskReductionClause(
const OMPTaskReductionClause *C) {
@@ -710,6 +739,10 @@ OMPClauseProfiler::VisitOMPCopyprivateClause(const OMPCopyprivateClause *C) {
void OMPClauseProfiler::VisitOMPFlushClause(const OMPFlushClause *C) {
VisitOMPClauseList(C);
}
+void OMPClauseProfiler::VisitOMPDepobjClause(const OMPDepobjClause *C) {
+ if (const Expr *Depobj = C->getDepobj())
+ Profiler->VisitStmt(Depobj);
+}
void OMPClauseProfiler::VisitOMPDependClause(const OMPDependClause *C) {
VisitOMPClauseList(C);
}
@@ -765,6 +798,10 @@ void OMPClauseProfiler::VisitOMPUseDevicePtrClause(
const OMPUseDevicePtrClause *C) {
VisitOMPClauseList(C);
}
+void OMPClauseProfiler::VisitOMPUseDeviceAddrClause(
+ const OMPUseDeviceAddrClause *C) {
+ VisitOMPClauseList(C);
+}
void OMPClauseProfiler::VisitOMPIsDevicePtrClause(
const OMPIsDevicePtrClause *C) {
VisitOMPClauseList(C);
@@ -775,6 +812,28 @@ void OMPClauseProfiler::VisitOMPNontemporalClause(
for (auto *E : C->private_refs())
Profiler->VisitStmt(E);
}
+void OMPClauseProfiler::VisitOMPInclusiveClause(const OMPInclusiveClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPExclusiveClause(const OMPExclusiveClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPUsesAllocatorsClause(
+ const OMPUsesAllocatorsClause *C) {
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
+ Profiler->VisitStmt(D.Allocator);
+ if (D.AllocatorTraits)
+ Profiler->VisitStmt(D.AllocatorTraits);
+ }
+}
+void OMPClauseProfiler::VisitOMPAffinityClause(const OMPAffinityClause *C) {
+ if (const Expr *Modifier = C->getModifier())
+ Profiler->VisitStmt(Modifier);
+ for (const Expr *E : C->varlists())
+ Profiler->VisitStmt(E);
+}
+void OMPClauseProfiler::VisitOMPOrderClause(const OMPOrderClause *C) {}
} // namespace
void
@@ -875,6 +934,14 @@ void StmtProfiler::VisitOMPFlushDirective(const OMPFlushDirective *S) {
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPDepobjDirective(const OMPDepobjDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPScanDirective(const OMPScanDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitOMPOrderedDirective(const OMPOrderedDirective *S) {
VisitOMPExecutableDirective(S);
}
@@ -1155,10 +1222,24 @@ void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) {
VisitExpr(S);
}
+void StmtProfiler::VisitMatrixSubscriptExpr(const MatrixSubscriptExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitOMPArraySectionExpr(const OMPArraySectionExpr *S) {
VisitExpr(S);
}
+void StmtProfiler::VisitOMPArrayShapingExpr(const OMPArrayShapingExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitOMPIteratorExpr(const OMPIteratorExpr *S) {
+ VisitExpr(S);
+ for (unsigned I = 0, E = S->numOfIterators(); I < E; ++I)
+ VisitDecl(S->getIteratorDecl(I));
+}
+
void StmtProfiler::VisitCallExpr(const CallExpr *S) {
VisitExpr(S);
}
@@ -1335,9 +1416,52 @@ void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) {
void StmtProfiler::VisitConceptSpecializationExpr(
const ConceptSpecializationExpr *S) {
VisitExpr(S);
- VisitDecl(S->getFoundDecl());
- VisitTemplateArguments(S->getTemplateArgsAsWritten()->getTemplateArgs(),
- S->getTemplateArgsAsWritten()->NumTemplateArgs);
+ VisitDecl(S->getNamedConcept());
+ for (const TemplateArgument &Arg : S->getTemplateArguments())
+ VisitTemplateArgument(Arg);
+}
+
+void StmtProfiler::VisitRequiresExpr(const RequiresExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getLocalParameters().size());
+ for (ParmVarDecl *LocalParam : S->getLocalParameters())
+ VisitDecl(LocalParam);
+ ID.AddInteger(S->getRequirements().size());
+ for (concepts::Requirement *Req : S->getRequirements()) {
+ if (auto *TypeReq = dyn_cast<concepts::TypeRequirement>(Req)) {
+ ID.AddInteger(concepts::Requirement::RK_Type);
+ ID.AddBoolean(TypeReq->isSubstitutionFailure());
+ if (!TypeReq->isSubstitutionFailure())
+ VisitType(TypeReq->getType()->getType());
+ } else if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(Req)) {
+ ID.AddInteger(concepts::Requirement::RK_Compound);
+ ID.AddBoolean(ExprReq->isExprSubstitutionFailure());
+ if (!ExprReq->isExprSubstitutionFailure())
+ Visit(ExprReq->getExpr());
+ // C++2a [expr.prim.req.compound]p1 Example:
+ // [...] The compound-requirement in C1 requires that x++ is a valid
+ // expression. It is equivalent to the simple-requirement x++; [...]
+ // We therefore do not profile isSimple() here.
+ ID.AddBoolean(ExprReq->getNoexceptLoc().isValid());
+ const concepts::ExprRequirement::ReturnTypeRequirement &RetReq =
+ ExprReq->getReturnTypeRequirement();
+ if (RetReq.isEmpty()) {
+ ID.AddInteger(0);
+ } else if (RetReq.isTypeConstraint()) {
+ ID.AddInteger(1);
+ Visit(RetReq.getTypeConstraint()->getImmediatelyDeclaredConstraint());
+ } else {
+ assert(RetReq.isSubstitutionFailure());
+ ID.AddInteger(2);
+ }
+ } else {
+ ID.AddInteger(concepts::Requirement::RK_Nested);
+ auto *NestedReq = cast<concepts::NestedRequirement>(Req);
+ ID.AddBoolean(NestedReq->isSubstitutionFailure());
+ if (!NestedReq->isSubstitutionFailure())
+ Visit(NestedReq->getConstraintExpr());
+ }
+ }
}
static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
@@ -1350,7 +1474,6 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
case OO_Array_New:
case OO_Array_Delete:
case OO_Arrow:
- case OO_Call:
case OO_Conditional:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Invalid operator call kind");
@@ -1492,8 +1615,8 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
return Stmt::BinaryOperatorClass;
case OO_Spaceship:
- // FIXME: Update this once we support <=> expressions.
- llvm_unreachable("<=> expressions not supported yet");
+ BinaryOp = BO_Cmp;
+ return Stmt::BinaryOperatorClass;
case OO_AmpAmp:
BinaryOp = BO_LAnd;
@@ -1524,6 +1647,9 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
case OO_Subscript:
return Stmt::ArraySubscriptExprClass;
+ case OO_Call:
+ return Stmt::CallExprClass;
+
case OO_Coawait:
UnaryOp = UO_Coawait;
return Stmt::UnaryOperatorClass;
@@ -1564,7 +1690,7 @@ void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
SC == Stmt::CompoundAssignOperatorClass)
ID.AddInteger(BinaryOp);
else
- assert(SC == Stmt::ArraySubscriptExprClass);
+ assert(SC == Stmt::ArraySubscriptExprClass || SC == Stmt::CallExprClass);
return;
}
@@ -1627,6 +1753,10 @@ void StmtProfiler::VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *S) {
VisitType(S->getTypeInfoAsWritten()->getType());
}
+void StmtProfiler::VisitCXXAddrspaceCastExpr(const CXXAddrspaceCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
VisitCallExpr(S);
}
@@ -1947,6 +2077,8 @@ void StmtProfiler::VisitSourceLocExpr(const SourceLocExpr *E) {
VisitExpr(E);
}
+void StmtProfiler::VisitRecoveryExpr(const RecoveryExpr *E) { VisitExpr(E); }
+
void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
VisitExpr(S);
}
diff --git a/clang/lib/AST/TemplateBase.cpp b/clang/lib/AST/TemplateBase.cpp
index db16c2a06b64..6a3d2b30e46e 100644
--- a/clang/lib/AST/TemplateBase.cpp
+++ b/clang/lib/AST/TemplateBase.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyPrinter.h"
@@ -111,84 +112,60 @@ TemplateArgument::CreatePackCopy(ASTContext &Context,
return TemplateArgument(Args.copy(Context));
}
-bool TemplateArgument::isDependent() const {
+TemplateArgumentDependence TemplateArgument::getDependence() const {
+ auto Deps = TemplateArgumentDependence::None;
switch (getKind()) {
case Null:
llvm_unreachable("Should not have a NULL template argument");
case Type:
- return getAsType()->isDependentType() ||
- isa<PackExpansionType>(getAsType());
+ Deps = toTemplateArgumentDependence(getAsType()->getDependence());
+ if (isa<PackExpansionType>(getAsType()))
+ Deps |= TemplateArgumentDependence::Dependent;
+ return Deps;
case Template:
- return getAsTemplate().isDependent();
+ return toTemplateArgumentDependence(getAsTemplate().getDependence());
case TemplateExpansion:
- return true;
+ return TemplateArgumentDependence::Dependent |
+ TemplateArgumentDependence::Instantiation;
- case Declaration:
- if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
- return DC->isDependentContext();
- return getAsDecl()->getDeclContext()->isDependentContext();
+ case Declaration: {
+ auto *DC = dyn_cast<DeclContext>(getAsDecl());
+ if (!DC)
+ DC = getAsDecl()->getDeclContext();
+ if (DC->isDependentContext())
+ Deps = TemplateArgumentDependence::Dependent |
+ TemplateArgumentDependence::Instantiation;
+ return Deps;
+ }
case NullPtr:
- return false;
-
case Integral:
- // Never dependent
- return false;
+ return TemplateArgumentDependence::None;
case Expression:
- return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent() ||
- isa<PackExpansionExpr>(getAsExpr()));
+ Deps = toTemplateArgumentDependence(getAsExpr()->getDependence());
+ if (isa<PackExpansionExpr>(getAsExpr()))
+ Deps |= TemplateArgumentDependence::Dependent |
+ TemplateArgumentDependence::Instantiation;
+ return Deps;
case Pack:
for (const auto &P : pack_elements())
- if (P.isDependent())
- return true;
- return false;
+ Deps |= P.getDependence();
+ return Deps;
}
+ llvm_unreachable("unhandled ArgKind");
+}
- llvm_unreachable("Invalid TemplateArgument Kind!");
+bool TemplateArgument::isDependent() const {
+ return getDependence() & TemplateArgumentDependence::Dependent;
}
bool TemplateArgument::isInstantiationDependent() const {
- switch (getKind()) {
- case Null:
- llvm_unreachable("Should not have a NULL template argument");
-
- case Type:
- return getAsType()->isInstantiationDependentType();
-
- case Template:
- return getAsTemplate().isInstantiationDependent();
-
- case TemplateExpansion:
- return true;
-
- case Declaration:
- if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
- return DC->isDependentContext();
- return getAsDecl()->getDeclContext()->isDependentContext();
-
- case NullPtr:
- return false;
-
- case Integral:
- // Never dependent
- return false;
-
- case Expression:
- return getAsExpr()->isInstantiationDependent();
-
- case Pack:
- for (const auto &P : pack_elements())
- if (P.isInstantiationDependent())
- return true;
- return false;
- }
-
- llvm_unreachable("Invalid TemplateArgument Kind!");
+ return getDependence() & TemplateArgumentDependence::Instantiation;
}
bool TemplateArgument::isPackExpansion() const {
@@ -215,38 +192,7 @@ bool TemplateArgument::isPackExpansion() const {
}
bool TemplateArgument::containsUnexpandedParameterPack() const {
- switch (getKind()) {
- case Null:
- case Declaration:
- case Integral:
- case TemplateExpansion:
- case NullPtr:
- break;
-
- case Type:
- if (getAsType()->containsUnexpandedParameterPack())
- return true;
- break;
-
- case Template:
- if (getAsTemplate().containsUnexpandedParameterPack())
- return true;
- break;
-
- case Expression:
- if (getAsExpr()->containsUnexpandedParameterPack())
- return true;
- break;
-
- case Pack:
- for (const auto &P : pack_elements())
- if (P.containsUnexpandedParameterPack())
- return true;
-
- break;
- }
-
- return false;
+ return getDependence() & TemplateArgumentDependence::UnexpandedPack;
}
Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
@@ -406,13 +352,9 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
case Declaration: {
NamedDecl *ND = getAsDecl();
- Out << '&';
- if (ND->getDeclName()) {
- // FIXME: distinguish between pointer and reference args?
- ND->printQualifiedName(Out);
- } else {
- Out << "(anonymous)";
- }
+ if (!getParamTypeForDecl()->isReferenceType())
+ Out << '&';
+ ND->printQualifiedName(Out);
break;
}
@@ -561,7 +503,7 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
}
const ASTTemplateArgumentListInfo *
-ASTTemplateArgumentListInfo::Create(ASTContext &C,
+ASTTemplateArgumentListInfo::Create(const ASTContext &C,
const TemplateArgumentListInfo &List) {
std::size_t size = totalSizeToAlloc<TemplateArgumentLoc>(List.size());
void *Mem = C.Allocate(size, alignof(ASTTemplateArgumentListInfo));
@@ -601,20 +543,14 @@ void ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
void ASTTemplateKWAndArgsInfo::initializeFrom(
SourceLocation TemplateKWLoc, const TemplateArgumentListInfo &Info,
- TemplateArgumentLoc *OutArgArray, bool &Dependent,
- bool &InstantiationDependent, bool &ContainsUnexpandedParameterPack) {
+ TemplateArgumentLoc *OutArgArray, TemplateArgumentDependence &Deps) {
this->TemplateKWLoc = TemplateKWLoc;
LAngleLoc = Info.getLAngleLoc();
RAngleLoc = Info.getRAngleLoc();
NumTemplateArgs = Info.size();
for (unsigned i = 0; i != NumTemplateArgs; ++i) {
- Dependent = Dependent || Info[i].getArgument().isDependent();
- InstantiationDependent = InstantiationDependent ||
- Info[i].getArgument().isInstantiationDependent();
- ContainsUnexpandedParameterPack =
- ContainsUnexpandedParameterPack ||
- Info[i].getArgument().containsUnexpandedParameterPack();
+ Deps |= Info[i].getArgument().getDependence();
new (&OutArgArray[i]) TemplateArgumentLoc(Info[i]);
}
diff --git a/clang/lib/AST/TemplateName.cpp b/clang/lib/AST/TemplateName.cpp
index 06e1dcec7449..40a8736ae1af 100644
--- a/clang/lib/AST/TemplateName.cpp
+++ b/clang/lib/AST/TemplateName.cpp
@@ -11,8 +11,10 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/TemplateName.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateBase.h"
@@ -168,52 +170,54 @@ TemplateName TemplateName::getNameToSubstitute() const {
return TemplateName(Decl);
}
-bool TemplateName::isDependent() const {
+TemplateNameDependence TemplateName::getDependence() const {
+ auto D = TemplateNameDependence::None;
+ switch (getKind()) {
+ case TemplateName::NameKind::QualifiedTemplate:
+ D |= toTemplateNameDependence(
+ getAsQualifiedTemplateName()->getQualifier()->getDependence());
+ break;
+ case TemplateName::NameKind::DependentTemplate:
+ D |= toTemplateNameDependence(
+ getAsDependentTemplateName()->getQualifier()->getDependence());
+ break;
+ case TemplateName::NameKind::SubstTemplateTemplateParmPack:
+ D |= TemplateNameDependence::UnexpandedPack;
+ break;
+ case TemplateName::NameKind::OverloadedTemplate:
+ llvm_unreachable("overloaded templates shouldn't survive to here.");
+ default:
+ break;
+ }
if (TemplateDecl *Template = getAsTemplateDecl()) {
- if (isa<TemplateTemplateParmDecl>(Template))
- return true;
+ if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) {
+ D |= TemplateNameDependence::DependentInstantiation;
+ if (TTP->isParameterPack())
+ D |= TemplateNameDependence::UnexpandedPack;
+ }
// FIXME: Hack, getDeclContext() can be null if Template is still
// initializing due to PCH reading, so we check it before using it.
// Should probably modify TemplateSpecializationType to allow constructing
// it without the isDependent() checking.
- return Template->getDeclContext() &&
- Template->getDeclContext()->isDependentContext();
+ if (Template->getDeclContext() &&
+ Template->getDeclContext()->isDependentContext())
+ D |= TemplateNameDependence::DependentInstantiation;
+ } else {
+ D |= TemplateNameDependence::DependentInstantiation;
}
+ return D;
+}
- assert(!getAsOverloadedTemplate() &&
- "overloaded templates shouldn't survive to here");
-
- return true;
+bool TemplateName::isDependent() const {
+ return getDependence() & TemplateNameDependence::Dependent;
}
bool TemplateName::isInstantiationDependent() const {
- if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
- if (QTN->getQualifier()->isInstantiationDependent())
- return true;
- }
-
- return isDependent();
+ return getDependence() & TemplateNameDependence::Instantiation;
}
bool TemplateName::containsUnexpandedParameterPack() const {
- if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
- if (QTN->getQualifier()->containsUnexpandedParameterPack())
- return true;
- }
-
- if (TemplateDecl *Template = getAsTemplateDecl()) {
- if (TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(Template))
- return TTP->isParameterPack();
-
- return false;
- }
-
- if (DependentTemplateName *DTN = getAsDependentTemplateName())
- return DTN->getQualifier() &&
- DTN->getQualifier()->containsUnexpandedParameterPack();
-
- return getAsSubstTemplateTemplateParmPack() != nullptr;
+ return getDependence() & TemplateNameDependence::UnexpandedPack;
}
void
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 965ad17fcfa5..5b0a0ac392c0 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -11,10 +11,19 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/TextNodeDumper.h"
+#include "clang/AST/APValue.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/LocInfoType.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TypeTraits.h"
+
+#include <algorithm>
+#include <utility>
using namespace clang;
@@ -47,12 +56,15 @@ static void dumpPreviousDecl(raw_ostream &OS, const Decl *D) {
llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
}
-TextNodeDumper::TextNodeDumper(raw_ostream &OS, bool ShowColors,
- const SourceManager *SM,
- const PrintingPolicy &PrintPolicy,
- const comments::CommandTraits *Traits)
- : TextTreeStructure(OS, ShowColors), OS(OS), ShowColors(ShowColors), SM(SM),
- PrintPolicy(PrintPolicy), Traits(Traits) {}
+TextNodeDumper::TextNodeDumper(raw_ostream &OS, const ASTContext &Context,
+ bool ShowColors)
+ : TextTreeStructure(OS, ShowColors), OS(OS), ShowColors(ShowColors),
+ Context(&Context), SM(&Context.getSourceManager()),
+ PrintPolicy(Context.getPrintingPolicy()),
+ Traits(&Context.getCommentCommandTraits()) {}
+
+TextNodeDumper::TextNodeDumper(raw_ostream &OS, bool ShowColors)
+ : TextTreeStructure(OS, ShowColors), OS(OS), ShowColors(ShowColors) {}
void TextNodeDumper::Visit(const comments::Comment *C,
const comments::FullComment *FC) {
@@ -121,12 +133,14 @@ void TextNodeDumper::Visit(const Stmt *Node) {
dumpPointer(Node);
dumpSourceRange(Node->getSourceRange());
- if (Node->isOMPStructuredBlock())
- OS << " openmp_structured_block";
-
if (const auto *E = dyn_cast<Expr>(Node)) {
dumpType(E->getType());
+ if (E->containsErrors()) {
+ ColorScope Color(OS, ShowColors, ErrorsColor);
+ OS << " contains-errors";
+ }
+
{
ColorScope Color(OS, ShowColors, ValueKindColor);
switch (E->getValueKind()) {
@@ -158,6 +172,9 @@ void TextNodeDumper::Visit(const Stmt *Node) {
case OK_VectorComponent:
OS << " vectorcomponent";
break;
+ case OK_MatrixComponent:
+ OS << " matrixcomponent";
+ break;
}
}
}
@@ -193,6 +210,11 @@ void TextNodeDumper::Visit(const Type *T) {
if (SingleStepDesugar != QualType(T, 0))
OS << " sugar";
+ if (T->containsErrors()) {
+ ColorScope Color(OS, ShowColors, ErrorsColor);
+ OS << " contains-errors";
+ }
+
if (T->isDependentType())
OS << " dependent";
else if (T->isInstantiationDependentType())
@@ -243,7 +265,7 @@ void TextNodeDumper::Visit(const Decl *D) {
const_cast<NamedDecl *>(ND)))
AddChild([=] { OS << "also in " << M->getFullModuleName(); });
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
- if (ND->isHidden())
+ if (!ND->isUnconditionallyVisible())
OS << " hidden";
if (D->isImplicit())
OS << " implicit";
@@ -310,7 +332,7 @@ void TextNodeDumper::Visit(const OMPClause *C) {
}
{
ColorScope Color(OS, ShowColors, AttrColor);
- StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
+ StringRef ClauseName(llvm::omp::getOpenMPClauseName(C->getClauseKind()));
OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
<< ClauseName.drop_front() << "Clause";
}
@@ -333,6 +355,218 @@ void TextNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
OS << " selected";
}
+static double GetApproxValue(const llvm::APFloat &F) {
+ llvm::APFloat V = F;
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble(), llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+/// True if the \p APValue \p Value can be folded onto the current line.
+static bool isSimpleAPValue(const APValue &Value) {
+ switch (Value.getKind()) {
+ case APValue::None:
+ case APValue::Indeterminate:
+ case APValue::Int:
+ case APValue::Float:
+ case APValue::FixedPoint:
+ case APValue::ComplexInt:
+ case APValue::ComplexFloat:
+ case APValue::LValue:
+ case APValue::MemberPointer:
+ case APValue::AddrLabelDiff:
+ return true;
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ return false;
+ case APValue::Union:
+ return isSimpleAPValue(Value.getUnionValue());
+ }
+ llvm_unreachable("unexpected APValue kind!");
+}
+
+/// Dump the children of the \p APValue \p Value.
+///
+/// \param[in] Value The \p APValue to visit
+/// \param[in] Ty The \p QualType passed to \p Visit
+///
+/// \param[in] IdxToChildFun A function mapping an \p APValue and an index
+/// to one of the child of the \p APValue
+///
+/// \param[in] NumChildren \p IdxToChildFun will be called on \p Value with
+/// the indices in the range \p [0,NumChildren(
+///
+/// \param[in] LabelSingular The label to use on a line with a single child
+/// \param[in] LabelPlurial The label to use on a line with multiple children
+void TextNodeDumper::dumpAPValueChildren(
+ const APValue &Value, QualType Ty,
+ const APValue &(*IdxToChildFun)(const APValue &, unsigned),
+ unsigned NumChildren, StringRef LabelSingular, StringRef LabelPlurial) {
+ // To save some vertical space we print up to MaxChildrenPerLine APValues
+ // considered to be simple (by isSimpleAPValue) on a single line.
+ constexpr unsigned MaxChildrenPerLine = 4;
+ unsigned I = 0;
+ while (I < NumChildren) {
+ unsigned J = I;
+ while (J < NumChildren) {
+ if (isSimpleAPValue(IdxToChildFun(Value, J)) &&
+ (J - I < MaxChildrenPerLine)) {
+ ++J;
+ continue;
+ }
+ break;
+ }
+
+ J = std::max(I + 1, J);
+
+ // Print [I,J) on a single line.
+ AddChild(J - I > 1 ? LabelPlurial : LabelSingular, [=]() {
+ for (unsigned X = I; X < J; ++X) {
+ Visit(IdxToChildFun(Value, X), Ty);
+ if (X + 1 != J)
+ OS << ", ";
+ }
+ });
+ I = J;
+ }
+}
+
+void TextNodeDumper::Visit(const APValue &Value, QualType Ty) {
+ ColorScope Color(OS, ShowColors, ValueKindColor);
+ switch (Value.getKind()) {
+ case APValue::None:
+ OS << "None";
+ return;
+ case APValue::Indeterminate:
+ OS << "Indeterminate";
+ return;
+ case APValue::Int:
+ OS << "Int ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << Value.getInt();
+ }
+ return;
+ case APValue::Float:
+ OS << "Float ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << GetApproxValue(Value.getFloat());
+ }
+ return;
+ case APValue::FixedPoint:
+ OS << "FixedPoint ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << Value.getFixedPoint();
+ }
+ return;
+ case APValue::Vector: {
+ unsigned VectorLength = Value.getVectorLength();
+ OS << "Vector length=" << VectorLength;
+
+ dumpAPValueChildren(
+ Value, Ty,
+ [](const APValue &Value, unsigned Index) -> const APValue & {
+ return Value.getVectorElt(Index);
+ },
+ VectorLength, "element", "elements");
+ return;
+ }
+ case APValue::ComplexInt:
+ OS << "ComplexInt ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << Value.getComplexIntReal() << " + " << Value.getComplexIntImag()
+ << 'i';
+ }
+ return;
+ case APValue::ComplexFloat:
+ OS << "ComplexFloat ";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << GetApproxValue(Value.getComplexFloatReal()) << " + "
+ << GetApproxValue(Value.getComplexFloatImag()) << 'i';
+ }
+ return;
+ case APValue::LValue:
+ (void)Context;
+ OS << "LValue <todo>";
+ return;
+ case APValue::Array: {
+ unsigned ArraySize = Value.getArraySize();
+ unsigned NumInitializedElements = Value.getArrayInitializedElts();
+ OS << "Array size=" << ArraySize;
+
+ dumpAPValueChildren(
+ Value, Ty,
+ [](const APValue &Value, unsigned Index) -> const APValue & {
+ return Value.getArrayInitializedElt(Index);
+ },
+ NumInitializedElements, "element", "elements");
+
+ if (Value.hasArrayFiller()) {
+ AddChild("filler", [=] {
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << ArraySize - NumInitializedElements << " x ";
+ }
+ Visit(Value.getArrayFiller(), Ty);
+ });
+ }
+
+ return;
+ }
+ case APValue::Struct: {
+ OS << "Struct";
+
+ dumpAPValueChildren(
+ Value, Ty,
+ [](const APValue &Value, unsigned Index) -> const APValue & {
+ return Value.getStructBase(Index);
+ },
+ Value.getStructNumBases(), "base", "bases");
+
+ dumpAPValueChildren(
+ Value, Ty,
+ [](const APValue &Value, unsigned Index) -> const APValue & {
+ return Value.getStructField(Index);
+ },
+ Value.getStructNumFields(), "field", "fields");
+
+ return;
+ }
+ case APValue::Union: {
+ OS << "Union";
+ {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ if (const FieldDecl *FD = Value.getUnionField())
+ OS << " ." << *cast<NamedDecl>(FD);
+ }
+ // If the union value is considered to be simple, fold it into the
+ // current line to save some vertical space.
+ const APValue &UnionValue = Value.getUnionValue();
+ if (isSimpleAPValue(UnionValue)) {
+ OS << ' ';
+ Visit(UnionValue, Ty);
+ } else {
+ AddChild([=] { Visit(UnionValue, Ty); });
+ }
+
+ return;
+ }
+ case APValue::MemberPointer:
+ OS << "MemberPointer <todo>";
+ return;
+ case APValue::AddrLabelDiff:
+ OS << "AddrLabelDiff <todo>";
+ return;
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
void TextNodeDumper::dumpPointer(const void *Ptr) {
ColorScope Color(OS, ShowColors, AddressColor);
OS << ' ' << Ptr;
@@ -432,19 +666,27 @@ void TextNodeDumper::dumpName(const NamedDecl *ND) {
}
void TextNodeDumper::dumpAccessSpecifier(AccessSpecifier AS) {
- switch (AS) {
- case AS_none:
- break;
- case AS_public:
- OS << "public";
- break;
- case AS_protected:
- OS << "protected";
- break;
- case AS_private:
- OS << "private";
- break;
- }
+ const auto AccessSpelling = getAccessSpelling(AS);
+ if (AccessSpelling.empty())
+ return;
+ OS << AccessSpelling;
+}
+
+void TextNodeDumper::dumpCleanupObject(
+ const ExprWithCleanups::CleanupObject &C) {
+ if (auto *BD = C.dyn_cast<BlockDecl *>())
+ dumpDeclRef(BD, "cleanup");
+ else if (auto *CLE = C.dyn_cast<CompoundLiteralExpr *>())
+ AddChild([=] {
+ OS << "cleanup ";
+ {
+ ColorScope Color(OS, ShowColors, StmtColor);
+ OS << CLE->getStmtClassName();
+ }
+ dumpPointer(CLE);
+ });
+ else
+ llvm_unreachable("unexpected cleanup type");
}
void TextNodeDumper::dumpDeclRef(const Decl *D, StringRef Label) {
@@ -687,11 +929,9 @@ void TextNodeDumper::VisitCaseStmt(const CaseStmt *Node) {
}
void TextNodeDumper::VisitConstantExpr(const ConstantExpr *Node) {
- if (Node->getResultAPValueKind() != APValue::None) {
- ColorScope Color(OS, ShowColors, ValueColor);
- OS << " ";
- Node->getAPValueResult().dump(OS);
- }
+ if (Node->hasAPValueResult())
+ AddChild("value",
+ [=] { Visit(Node->getAPValueResult(), Node->getType()); });
}
void TextNodeDumper::VisitCallExpr(const CallExpr *Node) {
@@ -699,6 +939,14 @@ void TextNodeDumper::VisitCallExpr(const CallExpr *Node) {
OS << " adl";
}
+void TextNodeDumper::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *Node) {
+ const char *OperatorSpelling = clang::getOperatorSpelling(Node->getOperator());
+ if (OperatorSpelling)
+ OS << " '" << OperatorSpelling << "'";
+
+ VisitCallExpr(Node);
+}
+
void TextNodeDumper::VisitCastExpr(const CastExpr *Node) {
OS << " <";
{
@@ -809,23 +1057,8 @@ void TextNodeDumper::VisitUnaryOperator(const UnaryOperator *Node) {
void TextNodeDumper::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *Node) {
- switch (Node->getKind()) {
- case UETT_SizeOf:
- OS << " sizeof";
- break;
- case UETT_AlignOf:
- OS << " alignof";
- break;
- case UETT_VecStep:
- OS << " vec_step";
- break;
- case UETT_OpenMPRequiredSimdAlign:
- OS << " __builtin_omp_required_simd_align";
- break;
- case UETT_PreferredAlignOf:
- OS << " __alignof";
- break;
- }
+ OS << " " << getTraitSpelling(Node->getKind());
+
if (Node->isArgumentType())
dumpType(Node->getArgumentType());
}
@@ -939,6 +1172,18 @@ void TextNodeDumper::VisitCXXDeleteExpr(const CXXDeleteExpr *Node) {
}
}
+void TextNodeDumper::VisitTypeTraitExpr(const TypeTraitExpr *Node) {
+ OS << " " << getTraitSpelling(Node->getTrait());
+}
+
+void TextNodeDumper::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *Node) {
+ OS << " " << getTraitSpelling(Node->getTrait());
+}
+
+void TextNodeDumper::VisitExpressionTraitExpr(const ExpressionTraitExpr *Node) {
+ OS << " " << getTraitSpelling(Node->getTrait());
+}
+
void TextNodeDumper::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *Node) {
if (const ValueDecl *VD = Node->getExtendingDecl()) {
@@ -949,7 +1194,7 @@ void TextNodeDumper::VisitMaterializeTemporaryExpr(
void TextNodeDumper::VisitExprWithCleanups(const ExprWithCleanups *Node) {
for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i)
- dumpDeclRef(Node->getObject(i), "cleanup");
+ dumpCleanupObject(Node->getObject(i));
}
void TextNodeDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
@@ -1065,6 +1310,23 @@ void TextNodeDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node) {
OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
}
+void TextNodeDumper::VisitOMPIteratorExpr(const OMPIteratorExpr *Node) {
+ OS << " ";
+ for (unsigned I = 0, E = Node->numOfIterators(); I < E; ++I) {
+ Visit(Node->getIteratorDecl(I));
+ OS << " = ";
+ const OMPIteratorExpr::IteratorRange Range = Node->getIteratorRange(I);
+ OS << " begin ";
+ Visit(Range.Begin);
+ OS << " end ";
+ Visit(Range.End);
+ if (Range.Step) {
+ OS << " step ";
+ Visit(Range.Step);
+ }
+ }
+}
+
void TextNodeDumper::VisitRValueReferenceType(const ReferenceType *T) {
if (T->isSpelledAsLValue())
OS << " written as lvalue reference";
@@ -1201,6 +1463,11 @@ void TextNodeDumper::VisitAutoType(const AutoType *T) {
OS << " decltype(auto)";
if (!T->isDeduced())
OS << " undeduced";
+ if (T->isConstrained()) {
+ dumpDeclRef(T->getTypeConstraintConcept());
+ for (const auto &Arg : T->getTypeConstraintArguments())
+ VisitTemplateArgument(Arg);
+ }
}
void TextNodeDumper::VisitTemplateSpecializationType(
@@ -1402,6 +1669,16 @@ void TextNodeDumper::VisitVarDecl(const VarDecl *D) {
OS << " destroyed";
if (D->isParameterPack())
OS << " pack";
+
+ if (D->hasInit()) {
+ const Expr *E = D->getInit();
+ // Only dump the value of constexpr VarDecls for now.
+ if (E && !E->isValueDependent() && D->isConstexpr()) {
+ const APValue *Value = D->evaluateValue();
+ if (Value)
+ AddChild("value", [=] { Visit(*Value, E->getType()); });
+ }
+ }
}
void TextNodeDumper::VisitBindingDecl(const BindingDecl *D) {
@@ -1491,7 +1768,8 @@ void TextNodeDumper::VisitOMPRequiresDecl(const OMPRequiresDecl *D) {
}
{
ColorScope Color(OS, ShowColors, AttrColor);
- StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
+ StringRef ClauseName(
+ llvm::omp::getOpenMPClauseName(C->getClauseKind()));
OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
<< ClauseName.drop_front() << "Clause";
}
@@ -1624,6 +1902,7 @@ void TextNodeDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "CopyAssignment";
}
+ FLAG(hasSimpleCopyAssignment, simple);
FLAG(hasTrivialCopyAssignment, trivial);
FLAG(hasNonTrivialCopyAssignment, non_trivial);
FLAG(hasCopyAssignmentWithConstParam, has_const_param);
@@ -1914,35 +2193,35 @@ void TextNodeDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
else if (D->getPropertyImplementation() == ObjCPropertyDecl::Optional)
OS << " optional";
- ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes();
- if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) {
- if (Attrs & ObjCPropertyDecl::OBJC_PR_readonly)
+ ObjCPropertyAttribute::Kind Attrs = D->getPropertyAttributes();
+ if (Attrs != ObjCPropertyAttribute::kind_noattr) {
+ if (Attrs & ObjCPropertyAttribute::kind_readonly)
OS << " readonly";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
+ if (Attrs & ObjCPropertyAttribute::kind_assign)
OS << " assign";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_readwrite)
+ if (Attrs & ObjCPropertyAttribute::kind_readwrite)
OS << " readwrite";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_retain)
+ if (Attrs & ObjCPropertyAttribute::kind_retain)
OS << " retain";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_copy)
+ if (Attrs & ObjCPropertyAttribute::kind_copy)
OS << " copy";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ if (Attrs & ObjCPropertyAttribute::kind_nonatomic)
OS << " nonatomic";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_atomic)
+ if (Attrs & ObjCPropertyAttribute::kind_atomic)
OS << " atomic";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_weak)
+ if (Attrs & ObjCPropertyAttribute::kind_weak)
OS << " weak";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_strong)
+ if (Attrs & ObjCPropertyAttribute::kind_strong)
OS << " strong";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained)
+ if (Attrs & ObjCPropertyAttribute::kind_unsafe_unretained)
OS << " unsafe_unretained";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_class)
+ if (Attrs & ObjCPropertyAttribute::kind_class)
OS << " class";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_direct)
+ if (Attrs & ObjCPropertyAttribute::kind_direct)
OS << " direct";
- if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
+ if (Attrs & ObjCPropertyAttribute::kind_getter)
dumpDeclRef(D->getGetterMethodDecl(), "getter");
- if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
+ if (Attrs & ObjCPropertyAttribute::kind_setter)
dumpDeclRef(D->getSetterMethodDecl(), "setter");
}
}
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index c5ad711d872e..10a6a2610130 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
@@ -123,14 +124,15 @@ ArrayType::ArrayType(TypeClass tc, QualType et, QualType can,
//
// template<int ...N> int arr[] = {N...};
: Type(tc, can,
- et->isDependentType() || (sz && sz->isValueDependent()) ||
- tc == DependentSizedArray,
- et->isInstantiationDependentType() ||
- (sz && sz->isInstantiationDependent()) ||
- tc == DependentSizedArray,
- (tc == VariableArray || et->isVariablyModifiedType()),
- et->containsUnexpandedParameterPack() ||
- (sz && sz->containsUnexpandedParameterPack())),
+ et->getDependence() |
+ (sz ? toTypeDependence(
+ turnValueToTypeDependence(sz->getDependence()))
+ : TypeDependence::None) |
+ (tc == VariableArray ? TypeDependence::VariablyModified
+ : TypeDependence::None) |
+ (tc == DependentSizedArray
+ ? TypeDependence::DependentInstantiation
+ : TypeDependence::None)),
ElementType(et) {
ArrayTypeBits.IndexTypeQuals = tq;
ArrayTypeBits.SizeModifier = sm;
@@ -217,14 +219,16 @@ void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
E->Profile(ID, Context, true);
}
-DependentVectorType::DependentVectorType(
- const ASTContext &Context, QualType ElementType, QualType CanonType,
- Expr *SizeExpr, SourceLocation Loc, VectorType::VectorKind VecKind)
- : Type(DependentVector, CanonType, /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- ElementType->isVariablyModifiedType(),
- ElementType->containsUnexpandedParameterPack() ||
- (SizeExpr && SizeExpr->containsUnexpandedParameterPack())),
+DependentVectorType::DependentVectorType(const ASTContext &Context,
+ QualType ElementType,
+ QualType CanonType, Expr *SizeExpr,
+ SourceLocation Loc,
+ VectorType::VectorKind VecKind)
+ : Type(DependentVector, CanonType,
+ TypeDependence::DependentInstantiation |
+ ElementType->getDependence() |
+ (SizeExpr ? toTypeDependence(SizeExpr->getDependence())
+ : TypeDependence::None)),
Context(Context), ElementType(ElementType), SizeExpr(SizeExpr), Loc(Loc) {
VectorTypeBits.VecKind = VecKind;
}
@@ -238,19 +242,16 @@ void DependentVectorType::Profile(llvm::FoldingSetNodeID &ID,
SizeExpr->Profile(ID, Context, true);
}
-DependentSizedExtVectorType::DependentSizedExtVectorType(const
- ASTContext &Context,
- QualType ElementType,
- QualType can,
- Expr *SizeExpr,
- SourceLocation loc)
- : Type(DependentSizedExtVector, can, /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- ElementType->isVariablyModifiedType(),
- (ElementType->containsUnexpandedParameterPack() ||
- (SizeExpr && SizeExpr->containsUnexpandedParameterPack()))),
- Context(Context), SizeExpr(SizeExpr), ElementType(ElementType),
- loc(loc) {}
+DependentSizedExtVectorType::DependentSizedExtVectorType(
+ const ASTContext &Context, QualType ElementType, QualType can,
+ Expr *SizeExpr, SourceLocation loc)
+ : Type(DependentSizedExtVector, can,
+ TypeDependence::DependentInstantiation |
+ ElementType->getDependence() |
+ (SizeExpr ? toTypeDependence(SizeExpr->getDependence())
+ : TypeDependence::None)),
+ Context(Context), SizeExpr(SizeExpr), ElementType(ElementType), loc(loc) {
+}
void
DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
@@ -260,15 +261,16 @@ DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
SizeExpr->Profile(ID, Context, true);
}
-DependentAddressSpaceType::DependentAddressSpaceType(
- const ASTContext &Context, QualType PointeeType, QualType can,
- Expr *AddrSpaceExpr, SourceLocation loc)
- : Type(DependentAddressSpace, can, /*Dependent=*/true,
- /*InstantiationDependent=*/true,
- PointeeType->isVariablyModifiedType(),
- (PointeeType->containsUnexpandedParameterPack() ||
- (AddrSpaceExpr &&
- AddrSpaceExpr->containsUnexpandedParameterPack()))),
+DependentAddressSpaceType::DependentAddressSpaceType(const ASTContext &Context,
+ QualType PointeeType,
+ QualType can,
+ Expr *AddrSpaceExpr,
+ SourceLocation loc)
+ : Type(DependentAddressSpace, can,
+ TypeDependence::DependentInstantiation |
+ PointeeType->getDependence() |
+ (AddrSpaceExpr ? toTypeDependence(AddrSpaceExpr->getDependence())
+ : TypeDependence::None)),
Context(Context), AddrSpaceExpr(AddrSpaceExpr), PointeeType(PointeeType),
loc(loc) {}
@@ -280,21 +282,89 @@ void DependentAddressSpaceType::Profile(llvm::FoldingSetNodeID &ID,
AddrSpaceExpr->Profile(ID, Context, true);
}
+MatrixType::MatrixType(TypeClass tc, QualType matrixType, QualType canonType,
+ const Expr *RowExpr, const Expr *ColumnExpr)
+ : Type(tc, canonType,
+ (RowExpr ? (matrixType->getDependence() | TypeDependence::Dependent |
+ TypeDependence::Instantiation |
+ (matrixType->isVariablyModifiedType()
+ ? TypeDependence::VariablyModified
+ : TypeDependence::None) |
+ (matrixType->containsUnexpandedParameterPack() ||
+ (RowExpr &&
+ RowExpr->containsUnexpandedParameterPack()) ||
+ (ColumnExpr &&
+ ColumnExpr->containsUnexpandedParameterPack())
+ ? TypeDependence::UnexpandedPack
+ : TypeDependence::None))
+ : matrixType->getDependence())),
+ ElementType(matrixType) {}
+
+ConstantMatrixType::ConstantMatrixType(QualType matrixType, unsigned nRows,
+ unsigned nColumns, QualType canonType)
+ : ConstantMatrixType(ConstantMatrix, matrixType, nRows, nColumns,
+ canonType) {}
+
+ConstantMatrixType::ConstantMatrixType(TypeClass tc, QualType matrixType,
+ unsigned nRows, unsigned nColumns,
+ QualType canonType)
+ : MatrixType(tc, matrixType, canonType) {
+ ConstantMatrixTypeBits.NumRows = nRows;
+ ConstantMatrixTypeBits.NumColumns = nColumns;
+}
+
+DependentSizedMatrixType::DependentSizedMatrixType(
+ const ASTContext &CTX, QualType ElementType, QualType CanonicalType,
+ Expr *RowExpr, Expr *ColumnExpr, SourceLocation loc)
+ : MatrixType(DependentSizedMatrix, ElementType, CanonicalType, RowExpr,
+ ColumnExpr),
+ Context(CTX), RowExpr(RowExpr), ColumnExpr(ColumnExpr), loc(loc) {}
+
+void DependentSizedMatrixType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &CTX,
+ QualType ElementType, Expr *RowExpr,
+ Expr *ColumnExpr) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ RowExpr->Profile(ID, CTX, true);
+ ColumnExpr->Profile(ID, CTX, true);
+}
+
VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
VectorKind vecKind)
: VectorType(Vector, vecType, nElements, canonType, vecKind) {}
VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
QualType canonType, VectorKind vecKind)
- : Type(tc, canonType, vecType->isDependentType(),
- vecType->isInstantiationDependentType(),
- vecType->isVariablyModifiedType(),
- vecType->containsUnexpandedParameterPack()),
- ElementType(vecType) {
+ : Type(tc, canonType, vecType->getDependence()), ElementType(vecType) {
VectorTypeBits.VecKind = vecKind;
VectorTypeBits.NumElements = nElements;
}
+ExtIntType::ExtIntType(bool IsUnsigned, unsigned NumBits)
+ : Type(ExtInt, QualType{}, TypeDependence::None), IsUnsigned(IsUnsigned),
+ NumBits(NumBits) {}
+
+DependentExtIntType::DependentExtIntType(const ASTContext &Context,
+ bool IsUnsigned, Expr *NumBitsExpr)
+ : Type(DependentExtInt, QualType{},
+ toTypeDependence(NumBitsExpr->getDependence())),
+ Context(Context), ExprAndUnsigned(NumBitsExpr, IsUnsigned) {}
+
+bool DependentExtIntType::isUnsigned() const {
+ return ExprAndUnsigned.getInt();
+}
+
+clang::Expr *DependentExtIntType::getNumBitsExpr() const {
+ return ExprAndUnsigned.getPointer();
+}
+
+void DependentExtIntType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, bool IsUnsigned,
+ Expr *NumBitsExpr) {
+ ID.AddBoolean(IsUnsigned);
+ NumBitsExpr->Profile(ID, Context, true);
+}
+
/// getArrayElementTypeNoTypeQual - If this is an array type, return the
/// element type of the array, potentially with type qualifiers missing.
/// This method should never be used when type qualifiers are meaningful.
@@ -652,14 +722,11 @@ bool Type::isObjCClassOrClassKindOfType() const {
return OPT->isObjCClassType() || OPT->isObjCQualifiedClassType();
}
-ObjCTypeParamType::ObjCTypeParamType(const ObjCTypeParamDecl *D,
- QualType can,
+ObjCTypeParamType::ObjCTypeParamType(const ObjCTypeParamDecl *D, QualType can,
ArrayRef<ObjCProtocolDecl *> protocols)
- : Type(ObjCTypeParam, can, can->isDependentType(),
- can->isInstantiationDependentType(),
- can->isVariablyModifiedType(),
- /*ContainsUnexpandedParameterPack=*/false),
- OTPDecl(const_cast<ObjCTypeParamDecl*>(D)) {
+ : Type(ObjCTypeParam, can,
+ can->getDependence() & ~TypeDependence::UnexpandedPack),
+ OTPDecl(const_cast<ObjCTypeParamDecl *>(D)) {
initialize(protocols);
}
@@ -667,11 +734,7 @@ ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
ArrayRef<QualType> typeArgs,
ArrayRef<ObjCProtocolDecl *> protocols,
bool isKindOf)
- : Type(ObjCObject, Canonical, Base->isDependentType(),
- Base->isInstantiationDependentType(),
- Base->isVariablyModifiedType(),
- Base->containsUnexpandedParameterPack()),
- BaseType(Base) {
+ : Type(ObjCObject, Canonical, Base->getDependence()), BaseType(Base) {
ObjCObjectTypeBits.IsKindOf = isKindOf;
ObjCObjectTypeBits.NumTypeArgs = typeArgs.size();
@@ -682,13 +745,7 @@ ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
typeArgs.size() * sizeof(QualType));
for (auto typeArg : typeArgs) {
- if (typeArg->isDependentType())
- setDependent();
- else if (typeArg->isInstantiationDependentType())
- setInstantiationDependent();
-
- if (typeArg->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(typeArg->getDependence() & ~TypeDependence::VariablyModified);
}
// Initialize the protocol qualifiers. The protocol storage is known
// after we set number of type arguments.
@@ -953,6 +1010,17 @@ public:
return Ctx.getExtVectorType(elementType, T->getNumElements());
}
+ QualType VisitConstantMatrixType(const ConstantMatrixType *T) {
+ QualType elementType = recurse(T->getElementType());
+ if (elementType.isNull())
+ return {};
+ if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
+ return QualType(T, 0);
+
+ return Ctx.getConstantMatrixType(elementType, T->getNumRows(),
+ T->getNumColumns());
+ }
+
QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
QualType returnType = recurse(T->getReturnType());
if (returnType.isNull())
@@ -1114,7 +1182,9 @@ public:
return QualType(T, 0);
return Ctx.getAutoType(deducedType, T->getKeyword(),
- T->isDependentType());
+ T->isDependentType(), /*IsPack=*/false,
+ T->getTypeConstraintConcept(),
+ T->getTypeConstraintArguments());
}
// FIXME: Non-trivial to implement, but important for C++
@@ -1770,6 +1840,14 @@ namespace {
return Visit(T->getElementType());
}
+ Type *VisitDependentSizedMatrixType(const DependentSizedMatrixType *T) {
+ return Visit(T->getElementType());
+ }
+
+ Type *VisitConstantMatrixType(const ConstantMatrixType *T) {
+ return Visit(T->getElementType());
+ }
+
Type *VisitFunctionProtoType(const FunctionProtoType *T) {
if (Syntactic && T->hasTrailingReturn())
return const_cast<FunctionProtoType*>(T);
@@ -1849,13 +1927,17 @@ bool Type::isIntegralType(const ASTContext &Ctx) const {
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete();
- return false;
+ return isExtIntType();
}
bool Type::isIntegralOrUnscopedEnumerationType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
+
+ if (isExtIntType())
+ return true;
+
return isUnscopedEnumerationType();
}
@@ -1936,6 +2018,9 @@ bool Type::isSignedIntegerType() const {
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
+ if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ return IT->isSigned();
+
return false;
}
@@ -1950,6 +2035,10 @@ bool Type::isSignedIntegerOrEnumerationType() const {
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
+ if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ return IT->isSigned();
+
+
return false;
}
@@ -1976,6 +2065,9 @@ bool Type::isUnsignedIntegerType() const {
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
+ if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ return IT->isUnsigned();
+
return false;
}
@@ -1990,6 +2082,9 @@ bool Type::isUnsignedIntegerOrEnumerationType() const {
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
+ if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ return IT->isUnsigned();
+
return false;
}
@@ -2028,13 +2123,14 @@ bool Type::isRealType() const {
BT->getKind() <= BuiltinType::Float128;
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
- return false;
+ return isExtIntType();
}
bool Type::isArithmeticType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
- BT->getKind() <= BuiltinType::Float128;
+ BT->getKind() <= BuiltinType::Float128 &&
+ BT->getKind() != BuiltinType::BFloat16;
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
@@ -2043,7 +2139,7 @@ bool Type::isArithmeticType() const {
// false for scoped enumerations since that will disable any
// unwanted implicit conversions.
return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
- return isa<ComplexType>(CanonicalType);
+ return isa<ComplexType>(CanonicalType) || isExtIntType();
}
Type::ScalarTypeKind Type::getScalarTypeKind() const {
@@ -2072,6 +2168,8 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
if (CT->getElementType()->isRealFloatingType())
return STK_FloatingComplex;
return STK_IntegralComplex;
+ } else if (isExtIntType()) {
+ return STK_Integral;
}
llvm_unreachable("unknown scalar type");
@@ -2180,6 +2278,22 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
}
}
+bool Type::isSizelessBuiltinType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ // SVE Types
+#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AArch64SVEACLETypes.def"
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+bool Type::isSizelessType() const { return isSizelessBuiltinType(); }
+
bool QualType::isPODType(const ASTContext &Context) const {
// C++11 has a more relaxed definition of POD.
if (Context.getLangOpts().CPlusPlus11)
@@ -2221,6 +2335,7 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const {
case Type::MemberPointer:
case Type::Vector:
case Type::ExtVector:
+ case Type::ExtInt:
return true;
case Type::Enum:
@@ -2246,6 +2361,9 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
if ((*this)->isArrayType())
return Context.getBaseElementType(*this).isTrivialType(Context);
+ if ((*this)->isSizelessBuiltinType())
+ return true;
+
// Return false for incomplete types after skipping any incomplete array
// types which are expressly allowed by the standard and thus our API.
if ((*this)->isIncompleteType())
@@ -2300,6 +2418,9 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
if (CanonicalType->isDependentType())
return false;
+ if (CanonicalType->isSizelessBuiltinType())
+ return true;
+
// Return false for incomplete types after skipping any incomplete array types
// which are expressly allowed by the standard and thus our API.
if (CanonicalType->isIncompleteType())
@@ -2493,6 +2614,9 @@ bool QualType::isCXX11PODType(const ASTContext &Context) const {
const Type *BaseTy = ty->getBaseElementTypeUnsafe();
assert(BaseTy && "NULL element type");
+ if (BaseTy->isSizelessBuiltinType())
+ return true;
+
// Return false for incomplete types after skipping any incomplete array
// types which are expressly allowed by the standard and thus our API.
if (BaseTy->isIncompleteType())
@@ -2697,21 +2821,20 @@ StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
}
DependentTemplateSpecializationType::DependentTemplateSpecializationType(
- ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, const IdentifierInfo *Name,
- ArrayRef<TemplateArgument> Args,
- QualType Canon)
- : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true, true,
- /*VariablyModified=*/false,
- NNS && NNS->containsUnexpandedParameterPack()),
- NNS(NNS), Name(Name) {
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name, ArrayRef<TemplateArgument> Args, QualType Canon)
+ : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon,
+ TypeDependence::DependentInstantiation |
+ (NNS ? toTypeDependence(NNS->getDependence())
+ : TypeDependence::None)),
+ NNS(NNS), Name(Name) {
DependentTemplateSpecializationTypeBits.NumArgs = Args.size();
assert((!NNS || NNS->isDependent()) &&
"DependentTemplateSpecializatonType requires dependent qualifier");
TemplateArgument *ArgBuffer = getArgBuffer();
for (const TemplateArgument &Arg : Args) {
- if (Arg.containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(toTypeDependence(Arg.getDependence() &
+ TemplateArgumentDependence::UnexpandedPack));
new (ArgBuffer++) TemplateArgument(Arg);
}
@@ -2792,6 +2915,8 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "unsigned __int128";
case Half:
return Policy.Half ? "half" : "__fp16";
+ case BFloat16:
+ return "__bf16";
case Float:
return "float";
case Double:
@@ -2895,8 +3020,14 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "queue_t";
case OCLReserveID:
return "reserve_id_t";
+ case IncompleteMatrixIdx:
+ return "<incomplete matrix index type>";
case OMPArraySection:
return "<OpenMP array section type>";
+ case OMPArrayShaping:
+ return "<OpenMP array shaping type>";
+ case OMPIterator:
+ return "<OpenMP iterator type>";
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
case Id: \
return #ExtType;
@@ -2910,6 +3041,13 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
llvm_unreachable("Invalid builtin type.");
}
+QualType QualType::getNonPackExpansionType() const {
+ // We never wrap type sugar around a PackExpansionType.
+ if (auto *PET = dyn_cast<PackExpansionType>(getTypePtr()))
+ return PET->getPattern();
+ return *this;
+}
+
QualType QualType::getNonLValueExprType(const ASTContext &Context) const {
if (const auto *RefType = getTypePtr()->getAs<ReferenceType>())
return RefType->getPointeeType();
@@ -2954,10 +3092,8 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
QualType canonical,
const ExtProtoInfo &epi)
- : FunctionType(FunctionProto, result, canonical, result->isDependentType(),
- result->isInstantiationDependentType(),
- result->isVariablyModifiedType(),
- result->containsUnexpandedParameterPack(), epi.ExtInfo) {
+ : FunctionType(FunctionProto, result, canonical, result->getDependence(),
+ epi.ExtInfo) {
FunctionTypeBits.FastTypeQuals = epi.TypeQuals.getFastQualifiers();
FunctionTypeBits.RefQualifier = epi.RefQualifier;
FunctionTypeBits.NumParams = params.size();
@@ -2976,14 +3112,8 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// Fill in the trailing argument array.
auto *argSlot = getTrailingObjects<QualType>();
for (unsigned i = 0; i != getNumParams(); ++i) {
- if (params[i]->isDependentType())
- setDependent();
- else if (params[i]->isInstantiationDependentType())
- setInstantiationDependent();
-
- if (params[i]->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
-
+ addDependence(params[i]->getDependence() &
+ ~TypeDependence::VariablyModified);
argSlot[i] = params[i];
}
@@ -2997,11 +3127,9 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// Note that, before C++17, a dependent exception specification does
// *not* make a type dependent; it's not even part of the C++ type
// system.
- if (ExceptionType->isInstantiationDependentType())
- setInstantiationDependent();
-
- if (ExceptionType->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(
+ ExceptionType->getDependence() &
+ (TypeDependence::Instantiation | TypeDependence::UnexpandedPack));
exnSlot[I++] = ExceptionType;
}
@@ -3015,12 +3143,9 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// Store the noexcept expression and context.
*getTrailingObjects<Expr *>() = epi.ExceptionSpec.NoexceptExpr;
- if (epi.ExceptionSpec.NoexceptExpr->isValueDependent() ||
- epi.ExceptionSpec.NoexceptExpr->isInstantiationDependent())
- setInstantiationDependent();
-
- if (epi.ExceptionSpec.NoexceptExpr->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(
+ toTypeDependence(epi.ExceptionSpec.NoexceptExpr->getDependence()) &
+ (TypeDependence::Instantiation | TypeDependence::UnexpandedPack));
}
// Fill in the FunctionDecl * in the exception specification if present.
else if (getExceptionSpecType() == EST_Uninstantiated) {
@@ -3044,11 +3169,11 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
if (getExceptionSpecType() == EST_Dynamic ||
getExceptionSpecType() == EST_DependentNoexcept) {
assert(hasDependentExceptionSpec() && "type should not be canonical");
- setDependent();
+ addDependence(TypeDependence::DependentInstantiation);
}
} else if (getCanonicalTypeInternal()->isDependentType()) {
// Ask our canonical type whether our exception specification was dependent.
- setDependent();
+ addDependence(TypeDependence::DependentInstantiation);
}
// Fill in the extra parameter info if present.
@@ -3211,10 +3336,10 @@ QualType MacroQualifiedType::getModifiedType() const {
}
TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
- : Type(TypeOfExpr, can, E->isTypeDependent(),
- E->isInstantiationDependent(),
- E->getType()->isVariablyModifiedType(),
- E->containsUnexpandedParameterPack()),
+ : Type(TypeOfExpr, can,
+ toTypeDependence(E->getDependence()) |
+ (E->getType()->getDependence() &
+ TypeDependence::VariablyModified)),
TOExpr(E) {}
bool TypeOfExprType::isSugared() const {
@@ -3234,13 +3359,15 @@ void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
}
DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
- // C++11 [temp.type]p2: "If an expression e involves a template parameter,
- // decltype(e) denotes a unique dependent type." Hence a decltype type is
- // type-dependent even if its expression is only instantiation-dependent.
- : Type(Decltype, can, E->isInstantiationDependent(),
- E->isInstantiationDependent(),
- E->getType()->isVariablyModifiedType(),
- E->containsUnexpandedParameterPack()),
+ // C++11 [temp.type]p2: "If an expression e involves a template parameter,
+ // decltype(e) denotes a unique dependent type." Hence a decltype type is
+ // type-dependent even if its expression is only instantiation-dependent.
+ : Type(Decltype, can,
+ toTypeDependence(E->getDependence()) |
+ (E->isInstantiationDependent() ? TypeDependence::Dependent
+ : TypeDependence::None) |
+ (E->getType()->getDependence() &
+ TypeDependence::VariablyModified)),
E(E), UnderlyingType(underlyingType) {}
bool DecltypeType::isSugared() const { return !E->isInstantiationDependent(); }
@@ -3261,13 +3388,9 @@ void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
}
UnaryTransformType::UnaryTransformType(QualType BaseType,
- QualType UnderlyingType,
- UTTKind UKind,
+ QualType UnderlyingType, UTTKind UKind,
QualType CanonicalType)
- : Type(UnaryTransform, CanonicalType, BaseType->isDependentType(),
- BaseType->isInstantiationDependentType(),
- BaseType->isVariablyModifiedType(),
- BaseType->containsUnexpandedParameterPack()),
+ : Type(UnaryTransform, CanonicalType, BaseType->getDependence()),
BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind) {}
DependentUnaryTransformType::DependentUnaryTransformType(const ASTContext &C,
@@ -3276,11 +3399,10 @@ DependentUnaryTransformType::DependentUnaryTransformType(const ASTContext &C,
: UnaryTransformType(BaseType, C.DependentTy, UKind, QualType()) {}
TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
- : Type(TC, can, D->isDependentType(),
- /*InstantiationDependent=*/D->isDependentType(),
- /*VariablyModified=*/false,
- /*ContainsUnexpandedParameterPack=*/false),
- decl(const_cast<TagDecl*>(D)) {}
+ : Type(TC, can,
+ D->isDependentType() ? TypeDependence::DependentInstantiation
+ : TypeDependence::None),
+ decl(const_cast<TagDecl *>(D)) {}
static TagDecl *getInterestingTagDecl(TagDecl *decl) {
for (auto I : decl->redecls()) {
@@ -3389,11 +3511,12 @@ IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
return isCanonicalUnqualified() ? nullptr : getDecl()->getIdentifier();
}
-SubstTemplateTypeParmPackType::
-SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
- QualType Canon,
- const TemplateArgument &ArgPack)
- : Type(SubstTemplateTypeParmPack, Canon, true, true, false, true),
+SubstTemplateTypeParmPackType::SubstTemplateTypeParmPackType(
+ const TemplateTypeParmType *Param, QualType Canon,
+ const TemplateArgument &ArgPack)
+ : Type(SubstTemplateTypeParmPack, Canon,
+ TypeDependence::DependentInstantiation |
+ TypeDependence::UnexpandedPack),
Replaced(Param), Arguments(ArgPack.pack_begin()) {
SubstTemplateTypeParmPackTypeBits.NumArgs = ArgPack.pack_size();
}
@@ -3437,16 +3560,17 @@ anyDependentTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
return false;
}
-TemplateSpecializationType::
-TemplateSpecializationType(TemplateName T,
- ArrayRef<TemplateArgument> Args,
- QualType Canon, QualType AliasedType)
- : Type(TemplateSpecialization,
- Canon.isNull()? QualType(this, 0) : Canon,
- Canon.isNull()? true : Canon->isDependentType(),
- Canon.isNull()? true : Canon->isInstantiationDependentType(),
- false,
- T.containsUnexpandedParameterPack()), Template(T) {
+TemplateSpecializationType::TemplateSpecializationType(
+ TemplateName T, ArrayRef<TemplateArgument> Args, QualType Canon,
+ QualType AliasedType)
+ : Type(TemplateSpecialization, Canon.isNull() ? QualType(this, 0) : Canon,
+ (Canon.isNull()
+ ? TypeDependence::DependentInstantiation
+ : Canon->getDependence() & ~(TypeDependence::VariablyModified |
+ TypeDependence::UnexpandedPack)) |
+ (toTypeDependence(T.getDependence()) &
+ TypeDependence::UnexpandedPack)),
+ Template(T) {
TemplateSpecializationTypeBits.NumArgs = Args.size();
TemplateSpecializationTypeBits.TypeAlias = !AliasedType.isNull();
@@ -3459,7 +3583,7 @@ TemplateSpecializationType(TemplateName T,
auto *TemplateArgs = reinterpret_cast<TemplateArgument *>(this + 1);
for (const TemplateArgument &Arg : Args) {
- // Update instantiation-dependent and variably-modified bits.
+ // Update instantiation-dependent, variably-modified, and error bits.
// If the canonical type exists and is non-dependent, the template
// specialization type can be non-dependent even if one of the type
// arguments is. Given:
@@ -3467,13 +3591,11 @@ TemplateSpecializationType(TemplateName T,
// U<T> is always non-dependent, irrespective of the type T.
// However, U<Ts> contains an unexpanded parameter pack, even though
// its expansion (and thus its desugared type) doesn't.
- if (Arg.isInstantiationDependent())
- setInstantiationDependent();
- if (Arg.getKind() == TemplateArgument::Type &&
- Arg.getAsType()->isVariablyModifiedType())
- setVariablyModified();
- if (Arg.containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
+ addDependence(toTypeDependence(Arg.getDependence()) &
+ ~TypeDependence::Dependent);
+ if (Arg.getKind() == TemplateArgument::Type)
+ addDependence(Arg.getAsType()->getDependence() &
+ TypeDependence::VariablyModified);
new (TemplateArgs++) TemplateArgument(Arg);
}
@@ -3533,15 +3655,17 @@ void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID,
const ObjCTypeParamDecl *OTPDecl,
+ QualType CanonicalType,
ArrayRef<ObjCProtocolDecl *> protocols) {
ID.AddPointer(OTPDecl);
+ ID.AddPointer(CanonicalType.getAsOpaquePtr());
ID.AddInteger(protocols.size());
for (auto proto : protocols)
ID.AddPointer(proto);
}
void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getDecl(),
+ Profile(ID, getDecl(), getCanonicalTypeInternal(),
llvm::makeArrayRef(qual_begin(), getNumProtocols()));
}
@@ -3644,6 +3768,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
// here in error recovery.
return CachedProperties(ExternalLinkage, false);
+ case Type::ExtInt:
case Type::Builtin:
// C++ [basic.link]p8:
// A type is said to have linkage if and only if:
@@ -3689,6 +3814,8 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::Vector:
case Type::ExtVector:
return Cache::get(cast<VectorType>(T)->getElementType());
+ case Type::ConstantMatrix:
+ return Cache::get(cast<ConstantMatrixType>(T)->getElementType());
case Type::FunctionNoProto:
return Cache::get(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
@@ -3741,6 +3868,7 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
assert(T->isInstantiationDependentType());
return LinkageInfo::external();
+ case Type::ExtInt:
case Type::Builtin:
return LinkageInfo::external();
@@ -3774,6 +3902,9 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
case Type::Vector:
case Type::ExtVector:
return computeTypeLinkageInfo(cast<VectorType>(T)->getElementType());
+ case Type::ConstantMatrix:
+ return computeTypeLinkageInfo(
+ cast<ConstantMatrixType>(T)->getElementType());
case Type::FunctionNoProto:
return computeTypeLinkageInfo(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
@@ -3918,7 +4049,10 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#include "clang/Basic/AArch64SVEACLETypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
+ case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
+ case BuiltinType::OMPArrayShaping:
+ case BuiltinType::OMPIterator:
return false;
}
llvm_unreachable("unknown builtin type");
@@ -3935,6 +4069,8 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::DependentSizedMatrix:
case Type::DependentAddressSpace:
case Type::FunctionProto:
case Type::FunctionNoProto:
@@ -3947,6 +4083,8 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::ObjCInterface:
case Type::Atomic:
case Type::Pipe:
+ case Type::ExtInt:
+ case Type::DependentExtInt:
return false;
}
llvm_unreachable("bad type kind!");
@@ -4098,6 +4236,20 @@ bool Type::isCARCBridgableType() const {
return Pointee->isVoidType() || Pointee->isRecordType();
}
+/// Check if the specified type is the CUDA device builtin surface type.
+bool Type::isCUDADeviceBuiltinSurfaceType() const {
+ if (const auto *RT = getAs<RecordType>())
+ return RT->getDecl()->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>();
+ return false;
+}
+
+/// Check if the specified type is the CUDA device builtin texture type.
+bool Type::isCUDADeviceBuiltinTextureType() const {
+ if (const auto *RT = getAs<RecordType>())
+ return RT->getDecl()->hasAttr<CUDADeviceBuiltinTextureTypeAttr>();
+ return false;
+}
+
bool Type::hasSizedVLAType() const {
if (!isVariablyModifiedType()) return false;
@@ -4158,3 +4310,34 @@ void clang::FixedPointValueToString(SmallVectorImpl<char> &Str,
/*HasUnsignedPadding=*/false);
APFixedPoint(Val, FXSema).toString(Str);
}
+
+AutoType::AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
+ TypeDependence ExtraDependence,
+ ConceptDecl *TypeConstraintConcept,
+ ArrayRef<TemplateArgument> TypeConstraintArgs)
+ : DeducedType(Auto, DeducedAsType, ExtraDependence) {
+ AutoTypeBits.Keyword = (unsigned)Keyword;
+ AutoTypeBits.NumArgs = TypeConstraintArgs.size();
+ this->TypeConstraintConcept = TypeConstraintConcept;
+ if (TypeConstraintConcept) {
+ TemplateArgument *ArgBuffer = getArgBuffer();
+ for (const TemplateArgument &Arg : TypeConstraintArgs) {
+ addDependence(toTypeDependence(
+ Arg.getDependence() & TemplateArgumentDependence::UnexpandedPack));
+
+ new (ArgBuffer++) TemplateArgument(Arg);
+ }
+ }
+}
+
+void AutoType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType Deduced, AutoTypeKeyword Keyword,
+ bool IsDependent, ConceptDecl *CD,
+ ArrayRef<TemplateArgument> Arguments) {
+ ID.AddPointer(Deduced.getAsOpaquePtr());
+ ID.AddInteger((unsigned)Keyword);
+ ID.AddBoolean(IsDependent);
+ ID.AddPointer(CD);
+ for (const TemplateArgument &Arg : Arguments)
+ Arg.Profile(ID, Context);
+}
diff --git a/clang/lib/AST/TypeLoc.cpp b/clang/lib/AST/TypeLoc.cpp
index 6e67ca8e0af7..57c11ca5571d 100644
--- a/clang/lib/AST/TypeLoc.cpp
+++ b/clang/lib/AST/TypeLoc.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/TypeLoc.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Expr.h"
@@ -374,6 +375,7 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
+ case BuiltinType::BFloat16:
llvm_unreachable("Builtin type needs extra local data!");
// Fall through, if the impossible happens.
@@ -402,7 +404,10 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
case BuiltinType::BuiltinFn:
+ case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
+ case BuiltinType::OMPArrayShaping:
+ case BuiltinType::OMPIterator:
return TST_unspecified;
}
@@ -589,3 +594,97 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
}
}
}
+
+DeclarationNameInfo AutoTypeLoc::getConceptNameInfo() const {
+ return DeclarationNameInfo(getNamedConcept()->getDeclName(),
+ getLocalData()->ConceptNameLoc);
+}
+
+void AutoTypeLoc::initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setNestedNameSpecifierLoc(NestedNameSpecifierLoc());
+ setTemplateKWLoc(Loc);
+ setConceptNameLoc(Loc);
+ setFoundDecl(nullptr);
+ setRAngleLoc(Loc);
+ setLAngleLoc(Loc);
+ TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
+ getTypePtr()->getArgs(),
+ getArgInfos(), Loc);
+ setNameLoc(Loc);
+}
+
+
+namespace {
+
+ class GetContainedAutoTypeLocVisitor :
+ public TypeLocVisitor<GetContainedAutoTypeLocVisitor, TypeLoc> {
+ public:
+ using TypeLocVisitor<GetContainedAutoTypeLocVisitor, TypeLoc>::Visit;
+
+ TypeLoc VisitAutoTypeLoc(AutoTypeLoc TL) {
+ return TL;
+ }
+
+ // Only these types can contain the desired 'auto' type.
+
+ TypeLoc VisitElaboratedTypeLoc(ElaboratedTypeLoc T) {
+ return Visit(T.getNamedTypeLoc());
+ }
+
+ TypeLoc VisitQualifiedTypeLoc(QualifiedTypeLoc T) {
+ return Visit(T.getUnqualifiedLoc());
+ }
+
+ TypeLoc VisitPointerTypeLoc(PointerTypeLoc T) {
+ return Visit(T.getPointeeLoc());
+ }
+
+ TypeLoc VisitBlockPointerTypeLoc(BlockPointerTypeLoc T) {
+ return Visit(T.getPointeeLoc());
+ }
+
+ TypeLoc VisitReferenceTypeLoc(ReferenceTypeLoc T) {
+ return Visit(T.getPointeeLoc());
+ }
+
+ TypeLoc VisitMemberPointerTypeLoc(MemberPointerTypeLoc T) {
+ return Visit(T.getPointeeLoc());
+ }
+
+ TypeLoc VisitArrayTypeLoc(ArrayTypeLoc T) {
+ return Visit(T.getElementLoc());
+ }
+
+ TypeLoc VisitFunctionTypeLoc(FunctionTypeLoc T) {
+ return Visit(T.getReturnLoc());
+ }
+
+ TypeLoc VisitParenTypeLoc(ParenTypeLoc T) {
+ return Visit(T.getInnerLoc());
+ }
+
+ TypeLoc VisitAttributedTypeLoc(AttributedTypeLoc T) {
+ return Visit(T.getModifiedLoc());
+ }
+
+ TypeLoc VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc T) {
+ return Visit(T.getInnerLoc());
+ }
+
+ TypeLoc VisitAdjustedTypeLoc(AdjustedTypeLoc T) {
+ return Visit(T.getOriginalLoc());
+ }
+
+ TypeLoc VisitPackExpansionTypeLoc(PackExpansionTypeLoc T) {
+ return Visit(T.getPatternLoc());
+ }
+ };
+
+} // namespace
+
+AutoTypeLoc TypeLoc::getContainedAutoTypeLoc() const {
+ TypeLoc Res = GetContainedAutoTypeLocVisitor().Visit(*this);
+ if (Res.isNull())
+ return AutoTypeLoc();
+ return Res.getAs<AutoTypeLoc>();
+}
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index c2f4baec989e..6f6932e65214 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -227,6 +227,8 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::ObjCInterface:
case Type::Atomic:
case Type::Pipe:
+ case Type::ExtInt:
+ case Type::DependentExtInt:
CanPrefixQualifiers = true;
break;
@@ -254,6 +256,8 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::DependentSizedMatrix:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Paren:
@@ -718,6 +722,38 @@ void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) {
OS << ")))";
}
+void TypePrinter::printConstantMatrixBefore(const ConstantMatrixType *T,
+ raw_ostream &OS) {
+ printBefore(T->getElementType(), OS);
+ OS << " __attribute__((matrix_type(";
+ OS << T->getNumRows() << ", " << T->getNumColumns();
+ OS << ")))";
+}
+
+void TypePrinter::printConstantMatrixAfter(const ConstantMatrixType *T,
+ raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+}
+
+void TypePrinter::printDependentSizedMatrixBefore(
+ const DependentSizedMatrixType *T, raw_ostream &OS) {
+ printBefore(T->getElementType(), OS);
+ OS << " __attribute__((matrix_type(";
+ if (T->getRowExpr()) {
+ T->getRowExpr()->printPretty(OS, nullptr, Policy);
+ }
+ OS << ", ";
+ if (T->getColumnExpr()) {
+ T->getColumnExpr()->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")))";
+}
+
+void TypePrinter::printDependentSizedMatrixAfter(
+ const DependentSizedMatrixType *T, raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+}
+
void
FunctionProtoType::printExceptionSpecification(raw_ostream &OS,
const PrintingPolicy &Policy)
@@ -909,6 +945,8 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
if (Info.getNoReturn())
OS << " __attribute__((noreturn))";
+ if (Info.getCmseNSCall())
+ OS << " __attribute__((cmse_nonsecure_call))";
if (Info.getProducesResult())
OS << " __attribute__((ns_returns_retained))";
if (Info.getRegParm())
@@ -1046,6 +1084,13 @@ void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) {
if (!T->getDeducedType().isNull()) {
printBefore(T->getDeducedType(), OS);
} else {
+ if (T->isConstrained()) {
+ OS << T->getTypeConstraintConcept()->getName();
+ auto Args = T->getTypeConstraintArguments();
+ if (!Args.empty())
+ printTemplateArgumentList(OS, Args, Policy);
+ OS << ' ';
+ }
switch (T->getKeyword()) {
case AutoTypeKeyword::Auto: OS << "auto"; break;
case AutoTypeKeyword::DecltypeAuto: OS << "decltype(auto)"; break;
@@ -1105,6 +1150,28 @@ void TypePrinter::printPipeBefore(const PipeType *T, raw_ostream &OS) {
void TypePrinter::printPipeAfter(const PipeType *T, raw_ostream &OS) {}
+void TypePrinter::printExtIntBefore(const ExtIntType *T, raw_ostream &OS) {
+ if (T->isUnsigned())
+ OS << "unsigned ";
+ OS << "_ExtInt(" << T->getNumBits() << ")";
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printExtIntAfter(const ExtIntType *T, raw_ostream &OS) {}
+
+void TypePrinter::printDependentExtIntBefore(const DependentExtIntType *T,
+ raw_ostream &OS) {
+ if (T->isUnsigned())
+ OS << "unsigned ";
+ OS << "_ExtInt(";
+ T->getNumBitsExpr()->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printDependentExtIntAfter(const DependentExtIntType *T,
+ raw_ostream &OS) {}
+
/// Appends the given scope to the end of a string.
void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) {
if (DC->isTranslationUnit()) return;
@@ -1234,20 +1301,18 @@ void TypePrinter::printEnumAfter(const EnumType *T, raw_ostream &OS) {}
void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
raw_ostream &OS) {
- if (IdentifierInfo *Id = T->getIdentifier())
- OS << Id->getName();
- else {
- bool IsLambdaAutoParam = false;
- if (auto D = T->getDecl()) {
- if (auto M = dyn_cast_or_null<CXXMethodDecl>(D->getDeclContext()))
- IsLambdaAutoParam = D->isImplicit() && M->getParent()->isLambda();
+ TemplateTypeParmDecl *D = T->getDecl();
+ if (D && D->isImplicit()) {
+ if (auto *TC = D->getTypeConstraint()) {
+ TC->print(OS, Policy);
+ OS << ' ';
}
+ OS << "auto";
+ } else if (IdentifierInfo *Id = T->getIdentifier())
+ OS << Id->getName();
+ else
+ OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
- if (IsLambdaAutoParam)
- OS << "auto";
- else
- OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
- }
spaceBeforePlaceHolder(OS);
}
@@ -1298,7 +1363,12 @@ void TypePrinter::printTemplateSpecializationAfter(
void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
raw_ostream &OS) {
- printTemplateSpecializationBefore(T->getInjectedTST(), OS);
+ if (Policy.PrintInjectedClassNameWithArguments)
+ return printTemplateSpecializationBefore(T->getInjectedTST(), OS);
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ T->getTemplateName().print(OS, Policy);
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
@@ -1381,7 +1451,7 @@ void TypePrinter::printDependentTemplateSpecializationBefore(
if (T->getQualifier())
T->getQualifier()->print(OS, Policy);
- OS << T->getIdentifier()->getName();
+ OS << "template " << T->getIdentifier()->getName();
printTemplateArgumentList(OS, T->template_arguments(), Policy);
spaceBeforePlaceHolder(OS);
}
@@ -1514,6 +1584,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::SPtr:
case attr::UPtr:
case attr::AddressSpace:
+ case attr::CmseNSCall:
llvm_unreachable("This attribute should have been handled already");
case attr::NSReturnsRetained:
@@ -1558,6 +1629,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::AcquireHandle:
OS << "acquire_handle";
break;
+ case attr::ArmMveStrictPolymorphism:
+ OS << "__clang_arm_mve_strict_polymorphism";
+ break;
}
OS << "))";
}
@@ -1711,13 +1785,13 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
OS << ArgString;
- NeedSpace = (!ArgString.empty() && ArgString.back() == '>');
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens.
+ NeedSpace = Policy.SplitTemplateClosers && !ArgString.empty() &&
+ ArgString.back() == '>';
FirstArg = false;
}
- // If the last character of our string is '>', add another space to
- // keep the two '>''s separate tokens. We don't *have* to do this in
- // C++0x, but it's still good hygiene.
if (NeedSpace)
OS << ' ';
@@ -1755,7 +1829,7 @@ std::string Qualifiers::getAsString(const PrintingPolicy &Policy) const {
SmallString<64> Buf;
llvm::raw_svector_ostream StrOS(Buf);
print(StrOS, Policy);
- return StrOS.str();
+ return std::string(StrOS.str());
}
bool Qualifiers::isEmptyWhenPrinted(const PrintingPolicy &Policy) const {
@@ -1913,6 +1987,6 @@ void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
SmallString<256> Buf;
llvm::raw_svector_ostream StrOS(Buf);
TypePrinter(policy).print(ty, qs, StrOS, buffer);
- std::string str = StrOS.str();
+ std::string str = std::string(StrOS.str());
buffer.swap(str);
}
diff --git a/clang/lib/AST/VTableBuilder.cpp b/clang/lib/AST/VTableBuilder.cpp
index 2b5b74be5961..f5865ce96b64 100644
--- a/clang/lib/AST/VTableBuilder.cpp
+++ b/clang/lib/AST/VTableBuilder.cpp
@@ -408,7 +408,7 @@ void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base,
// Now dump the overriders for this base subobject.
for (const auto *MD : RD->methods()) {
- if (!MD->isVirtual())
+ if (!VTableContextBase::hasVtableSlot(MD))
continue;
MD = MD->getCanonicalDecl();
@@ -486,8 +486,8 @@ static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
const CXXMethodDecl *RHS) {
- assert(LHS->isVirtual() && "LHS must be virtual!");
- assert(RHS->isVirtual() && "LHS must be virtual!");
+ assert(VTableContextBase::hasVtableSlot(LHS) && "LHS must be virtual!");
+ assert(VTableContextBase::hasVtableSlot(RHS) && "LHS must be virtual!");
// A destructor can share a vcall offset with another destructor.
if (isa<CXXDestructorDecl>(LHS))
@@ -535,6 +535,8 @@ public:
VBaseOffsetOffsetsMapTy;
private:
+ const ItaniumVTableContext &VTables;
+
/// MostDerivedClass - The most derived class for which we're building vcall
/// and vbase offsets.
const CXXRecordDecl *MostDerivedClass;
@@ -583,13 +585,15 @@ private:
CharUnits getCurrentOffsetOffset() const;
public:
- VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
+ VCallAndVBaseOffsetBuilder(const ItaniumVTableContext &VTables,
+ const CXXRecordDecl *MostDerivedClass,
const CXXRecordDecl *LayoutClass,
const FinalOverriders *Overriders,
BaseSubobject Base, bool BaseIsVirtual,
CharUnits OffsetInLayoutClass)
- : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
- Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
+ : VTables(VTables), MostDerivedClass(MostDerivedClass),
+ LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
+ Overriders(Overriders) {
// Add vcall and vbase offsets.
AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
@@ -662,9 +666,13 @@ CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
// vcall offset itself).
int64_t OffsetIndex = -(int64_t)(3 + Components.size());
- CharUnits PointerWidth =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- CharUnits OffsetOffset = PointerWidth * OffsetIndex;
+ // Under the relative ABI, the offset widths are 32-bit ints instead of
+ // pointer widths.
+ CharUnits OffsetWidth = Context.toCharUnitsFromBits(
+ VTables.isRelativeLayout() ? 32
+ : Context.getTargetInfo().getPointerWidth(0));
+ CharUnits OffsetOffset = OffsetWidth * OffsetIndex;
+
return OffsetOffset;
}
@@ -689,7 +697,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
// Add the vcall offsets.
for (const auto *MD : RD->methods()) {
- if (!MD->isVirtual())
+ if (!VTableContextBase::hasVtableSlot(MD))
continue;
MD = MD->getCanonicalDecl();
@@ -1077,7 +1085,7 @@ typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
template <class VisitorTy>
static void
visitAllOverriddenMethods(const CXXMethodDecl *MD, VisitorTy &Visitor) {
- assert(MD->isVirtual() && "Method is not virtual!");
+ assert(VTableContextBase::hasVtableSlot(MD) && "Method is not virtual!");
for (const CXXMethodDecl *OverriddenMD : MD->overridden_methods()) {
if (!Visitor(OverriddenMD))
@@ -1271,13 +1279,13 @@ ThisAdjustment ItaniumVTableBuilder::ComputeThisAdjustment(
if (VCallOffsets.empty()) {
// We don't have vcall offsets for this virtual base, go ahead and
// build them.
- VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
- /*Overriders=*/nullptr,
- BaseSubobject(Offset.VirtualBase,
- CharUnits::Zero()),
- /*BaseIsVirtual=*/true,
- /*OffsetInLayoutClass=*/
- CharUnits::Zero());
+ VCallAndVBaseOffsetBuilder Builder(
+ VTables, MostDerivedClass, MostDerivedClass,
+ /*Overriders=*/nullptr,
+ BaseSubobject(Offset.VirtualBase, CharUnits::Zero()),
+ /*BaseIsVirtual=*/true,
+ /*OffsetInLayoutClass=*/
+ CharUnits::Zero());
VCallOffsets = Builder.getVCallOffsets();
}
@@ -1474,14 +1482,14 @@ void ItaniumVTableBuilder::AddMethods(
llvm_unreachable("Found a duplicate primary base!");
}
- const CXXDestructorDecl *ImplicitVirtualDtor = nullptr;
-
typedef llvm::SmallVector<const CXXMethodDecl *, 8> NewVirtualFunctionsTy;
NewVirtualFunctionsTy NewVirtualFunctions;
+ llvm::SmallVector<const CXXMethodDecl*, 4> NewImplicitVirtualFunctions;
+
// Now go through all virtual member functions and add them.
for (const auto *MD : RD->methods()) {
- if (!MD->isVirtual())
+ if (!ItaniumVTableContext::hasVtableSlot(MD))
continue;
MD = MD->getCanonicalDecl();
@@ -1542,24 +1550,30 @@ void ItaniumVTableBuilder::AddMethods(
}
}
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- if (MD->isImplicit()) {
- // Itanium C++ ABI 2.5.2:
- // If a class has an implicitly-defined virtual destructor,
- // its entries come after the declared virtual function pointers.
-
- assert(!ImplicitVirtualDtor &&
- "Did already see an implicit virtual dtor!");
- ImplicitVirtualDtor = DD;
- continue;
- }
- }
-
- NewVirtualFunctions.push_back(MD);
- }
-
- if (ImplicitVirtualDtor)
- NewVirtualFunctions.push_back(ImplicitVirtualDtor);
+ if (MD->isImplicit())
+ NewImplicitVirtualFunctions.push_back(MD);
+ else
+ NewVirtualFunctions.push_back(MD);
+ }
+
+ std::stable_sort(
+ NewImplicitVirtualFunctions.begin(), NewImplicitVirtualFunctions.end(),
+ [](const CXXMethodDecl *A, const CXXMethodDecl *B) {
+ if (A->isCopyAssignmentOperator() != B->isCopyAssignmentOperator())
+ return A->isCopyAssignmentOperator();
+ if (A->isMoveAssignmentOperator() != B->isMoveAssignmentOperator())
+ return A->isMoveAssignmentOperator();
+ if (isa<CXXDestructorDecl>(A) != isa<CXXDestructorDecl>(B))
+ return isa<CXXDestructorDecl>(A);
+ assert(A->getOverloadedOperator() == OO_EqualEqual &&
+ B->getOverloadedOperator() == OO_EqualEqual &&
+ "unexpected or duplicate implicit virtual function");
+ // We rely on Sema to have declared the operator== members in the
+ // same order as the corresponding operator<=> members.
+ return false;
+ });
+ NewVirtualFunctions.append(NewImplicitVirtualFunctions.begin(),
+ NewImplicitVirtualFunctions.end());
for (const CXXMethodDecl *MD : NewVirtualFunctions) {
// Get the final overrider.
@@ -1629,9 +1643,9 @@ void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables(
VTableIndices.push_back(VTableIndex);
// Add vcall and vbase offsets for this vtable.
- VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
- Base, BaseIsVirtualInLayoutClass,
- OffsetInLayoutClass);
+ VCallAndVBaseOffsetBuilder Builder(
+ VTables, MostDerivedClass, LayoutClass, &Overriders, Base,
+ BaseIsVirtualInLayoutClass, OffsetInLayoutClass);
Components.append(Builder.components_begin(), Builder.components_end());
// Check if we need to add these vcall offsets.
@@ -2155,7 +2169,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
for (const auto *MD : MostDerivedClass->methods()) {
// We only want virtual member functions.
- if (!MD->isVirtual())
+ if (!ItaniumVTableContext::hasVtableSlot(MD))
continue;
MD = MD->getCanonicalDecl();
@@ -2194,12 +2208,40 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
}
}
+static VTableLayout::AddressPointsIndexMapTy
+MakeAddressPointIndices(const VTableLayout::AddressPointsMapTy &addressPoints,
+ unsigned numVTables) {
+ VTableLayout::AddressPointsIndexMapTy indexMap(numVTables);
+
+ for (auto it = addressPoints.begin(); it != addressPoints.end(); ++it) {
+ const auto &addressPointLoc = it->second;
+ unsigned vtableIndex = addressPointLoc.VTableIndex;
+ unsigned addressPoint = addressPointLoc.AddressPointIndex;
+ if (indexMap[vtableIndex]) {
+ // Multiple BaseSubobjects can map to the same AddressPointLocation, but
+ // every vtable index should have a unique address point.
+ assert(indexMap[vtableIndex] == addressPoint &&
+ "Every vtable index should have a unique address point. Found a "
+ "vtable that has two different address points.");
+ } else {
+ indexMap[vtableIndex] = addressPoint;
+ }
+ }
+
+ // Note that by this point, not all the address may be initialized if the
+ // AddressPoints map is empty. This is ok if the map isn't needed. See
+ // MicrosoftVTableContext::computeVTableRelatedInformation() which uses an
+ // emprt map.
+ return indexMap;
+}
+
VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
ArrayRef<VTableComponent> VTableComponents,
ArrayRef<VTableThunkTy> VTableThunks,
const AddressPointsMapTy &AddressPoints)
: VTableComponents(VTableComponents), VTableThunks(VTableThunks),
- AddressPoints(AddressPoints) {
+ AddressPoints(AddressPoints), AddressPointIndices(MakeAddressPointIndices(
+ AddressPoints, VTableIndices.size())) {
if (VTableIndices.size() <= 1)
assert(VTableIndices.size() == 1 && VTableIndices[0] == 0);
else
@@ -2215,8 +2257,13 @@ VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
VTableLayout::~VTableLayout() { }
-ItaniumVTableContext::ItaniumVTableContext(ASTContext &Context)
- : VTableContextBase(/*MS=*/false) {}
+bool VTableContextBase::hasVtableSlot(const CXXMethodDecl *MD) {
+ return MD->isVirtual() && !MD->isConsteval();
+}
+
+ItaniumVTableContext::ItaniumVTableContext(
+ ASTContext &Context, VTableComponentLayout ComponentLayout)
+ : VTableContextBase(/*MS=*/false), ComponentLayout(ComponentLayout) {}
ItaniumVTableContext::~ItaniumVTableContext() {}
@@ -2245,7 +2292,7 @@ ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
if (I != VirtualBaseClassOffsetOffsets.end())
return I->second;
- VCallAndVBaseOffsetBuilder Builder(RD, RD, /*Overriders=*/nullptr,
+ VCallAndVBaseOffsetBuilder Builder(*this, RD, RD, /*Overriders=*/nullptr,
BaseSubobject(RD, CharUnits::Zero()),
/*BaseIsVirtual=*/false,
/*OffsetInLayoutClass=*/CharUnits::Zero());
@@ -2494,8 +2541,9 @@ private:
BasesSetVectorTy VisitedBases;
AddMethods(BaseSubobject(MostDerivedClass, CharUnits::Zero()), 0, nullptr,
VisitedBases);
- assert((HasRTTIComponent ? Components.size() - 1 : Components.size()) &&
- "vftable can't be empty");
+ // Note that it is possible for the vftable to contain only an RTTI
+ // pointer, if all virtual functions are constewval.
+ assert(!Components.empty() && "vftable can't be empty");
assert(MethodVFTableLocations.empty());
for (const auto &I : MethodInfoMap) {
@@ -2874,7 +2922,7 @@ static void GroupNewVirtualOverloads(
if (Inserted)
Groups.push_back(MethodGroup());
if (const auto *MD = dyn_cast<CXXMethodDecl>(ND))
- if (MD->isVirtual())
+ if (MicrosoftVTableContext::hasVtableSlot(MD))
Groups[J->second].push_back(MD->getCanonicalDecl());
}
@@ -3470,7 +3518,7 @@ static const FullPathTy *selectBestPath(ASTContext &Context,
getOffsetOfFullPath(Context, TopLevelRD, SpecificPath);
FinalOverriders Overriders(TopLevelRD, CharUnits::Zero(), TopLevelRD);
for (const CXXMethodDecl *MD : Info.IntroducingObject->methods()) {
- if (!MD->isVirtual())
+ if (!MicrosoftVTableContext::hasVtableSlot(MD))
continue;
FinalOverriders::OverriderInfo OI =
Overriders.getOverrider(MD->getCanonicalDecl(), BaseOffset);
@@ -3609,7 +3657,7 @@ void MicrosoftVTableContext::dumpMethodLocations(
for (const auto &I : NewMethods) {
const CXXMethodDecl *MD = cast<const CXXMethodDecl>(I.first.getDecl());
- assert(MD->isVirtual());
+ assert(hasVtableSlot(MD));
std::string MethodName = PredefinedExpr::ComputeName(
PredefinedExpr::PrettyFunctionNoVirtual, MD);
@@ -3729,7 +3777,7 @@ MicrosoftVTableContext::getVFTableLayout(const CXXRecordDecl *RD,
MethodVFTableLocation
MicrosoftVTableContext::getMethodVFTableLocation(GlobalDecl GD) {
- assert(cast<CXXMethodDecl>(GD.getDecl())->isVirtual() &&
+ assert(hasVtableSlot(cast<CXXMethodDecl>(GD.getDecl())) &&
"Only use this method for virtual methods or dtors");
if (isa<CXXDestructorDecl>(GD.getDecl()))
assert(GD.getDtorType() == Dtor_Deleting);
diff --git a/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
index 0d1f713db8d3..e88da16dd3d4 100644
--- a/clang/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -43,6 +43,13 @@ typedef MatchFinder::MatchCallback MatchCallback;
// optimize this on.
static const unsigned MaxMemoizationEntries = 10000;
+enum class MatchType {
+ Ancestors,
+
+ Descendants,
+ Child,
+};
+
// We use memoization to avoid running the same matcher on the same
// AST node twice. This struct is the key for looking up match
// result. It consists of an ID of the MatcherInterface (for
@@ -57,14 +64,15 @@ static const unsigned MaxMemoizationEntries = 10000;
// provides enough benefit for the additional amount of code.
struct MatchKey {
DynTypedMatcher::MatcherIDType MatcherID;
- ast_type_traits::DynTypedNode Node;
+ DynTypedNode Node;
BoundNodesTreeBuilder BoundNodes;
- ast_type_traits::TraversalKind Traversal = ast_type_traits::TK_AsIs;
+ TraversalKind Traversal = TK_AsIs;
+ MatchType Type;
bool operator<(const MatchKey &Other) const {
- return std::tie(MatcherID, Node, BoundNodes, Traversal) <
- std::tie(Other.MatcherID, Other.Node, Other.BoundNodes,
- Other.Traversal);
+ return std::tie(Traversal, Type, MatcherID, Node, BoundNodes) <
+ std::tie(Other.Traversal, Other.Type, Other.MatcherID, Other.Node,
+ Other.BoundNodes);
}
};
@@ -87,8 +95,7 @@ public:
// matching the descendants.
MatchChildASTVisitor(const DynTypedMatcher *Matcher, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- ast_type_traits::TraversalKind Traversal,
- ASTMatchFinder::BindKind Bind)
+ TraversalKind Traversal, ASTMatchFinder::BindKind Bind)
: Matcher(Matcher), Finder(Finder), Builder(Builder), CurrentDepth(0),
MaxDepth(MaxDepth), Traversal(Traversal), Bind(Bind), Matches(false) {}
@@ -103,7 +110,7 @@ public:
// Traverse*(c) for each child c of 'node'.
// - Traverse*(c) in turn calls Traverse(c), completing the
// recursion.
- bool findMatch(const ast_type_traits::DynTypedNode &DynNode) {
+ bool findMatch(const DynTypedNode &DynNode) {
reset();
if (const Decl *D = DynNode.get<Decl>())
traverse(*D);
@@ -143,14 +150,16 @@ public:
Stmt *StmtToTraverse = StmtNode;
if (auto *ExprNode = dyn_cast_or_null<Expr>(StmtNode)) {
auto *LambdaNode = dyn_cast_or_null<LambdaExpr>(StmtNode);
- if (LambdaNode && Finder->getASTContext().getTraversalKind() ==
- ast_type_traits::TK_IgnoreUnlessSpelledInSource)
+ if (LambdaNode &&
+ Finder->getASTContext().getParentMapContext().getTraversalKind() ==
+ TK_IgnoreUnlessSpelledInSource)
StmtToTraverse = LambdaNode;
else
- StmtToTraverse = Finder->getASTContext().traverseIgnored(ExprNode);
+ StmtToTraverse =
+ Finder->getASTContext().getParentMapContext().traverseIgnored(
+ ExprNode);
}
- if (Traversal ==
- ast_type_traits::TraversalKind::TK_IgnoreImplicitCastsAndParentheses) {
+ if (Traversal == TraversalKind::TK_IgnoreImplicitCastsAndParentheses) {
if (Expr *ExprNode = dyn_cast_or_null<Expr>(StmtNode))
StmtToTraverse = ExprNode->IgnoreParenImpCasts();
}
@@ -216,8 +225,8 @@ public:
return traverse(*CtorInit);
}
bool TraverseLambdaExpr(LambdaExpr *Node) {
- if (Finder->getASTContext().getTraversalKind() !=
- ast_type_traits::TK_IgnoreUnlessSpelledInSource)
+ if (Finder->getASTContext().getParentMapContext().getTraversalKind() !=
+ TK_IgnoreUnlessSpelledInSource)
return VisitorBase::TraverseLambdaExpr(Node);
if (!Node)
return true;
@@ -308,7 +317,7 @@ private:
}
if (Bind != ASTMatchFinder::BK_All) {
BoundNodesTreeBuilder RecursiveBuilder(*Builder);
- if (Matcher->matches(ast_type_traits::DynTypedNode::create(Node), Finder,
+ if (Matcher->matches(DynTypedNode::create(Node), Finder,
&RecursiveBuilder)) {
Matches = true;
ResultBindings.addMatch(RecursiveBuilder);
@@ -316,7 +325,7 @@ private:
}
} else {
BoundNodesTreeBuilder RecursiveBuilder(*Builder);
- if (Matcher->matches(ast_type_traits::DynTypedNode::create(Node), Finder,
+ if (Matcher->matches(DynTypedNode::create(Node), Finder,
&RecursiveBuilder)) {
// After the first match the matcher succeeds.
Matches = true;
@@ -343,7 +352,7 @@ private:
BoundNodesTreeBuilder ResultBindings;
int CurrentDepth;
const int MaxDepth;
- const ast_type_traits::TraversalKind Traversal;
+ const TraversalKind Traversal;
const ASTMatchFinder::BindKind Bind;
bool Matches;
};
@@ -440,12 +449,10 @@ public:
bool TraverseConstructorInitializer(CXXCtorInitializer *CtorInit);
// Matches children or descendants of 'Node' with 'BaseMatcher'.
- bool memoizedMatchesRecursively(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx,
+ bool memoizedMatchesRecursively(const DynTypedNode &Node, ASTContext &Ctx,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- ast_type_traits::TraversalKind Traversal,
- BindKind Bind) {
+ TraversalKind Traversal, BindKind Bind) {
// For AST-nodes that don't have an identity, we can't memoize.
if (!Node.getMemoizationData() || !Builder->isComparable())
return matchesRecursively(Node, Matcher, Builder, MaxDepth, Traversal,
@@ -456,8 +463,9 @@ public:
Key.Node = Node;
// Note that we key on the bindings *before* the match.
Key.BoundNodes = *Builder;
- Key.Traversal = Ctx.getTraversalKind();
-
+ Key.Traversal = Ctx.getParentMapContext().getTraversalKind();
+ // Memoize result even doing a single-level match, it might be expensive.
+ Key.Type = MaxDepth == 1 ? MatchType::Child : MatchType::Descendants;
MemoizationMap::iterator I = ResultCache.find(Key);
if (I != ResultCache.end()) {
*Builder = I->second.Nodes;
@@ -477,11 +485,10 @@ public:
}
// Matches children or descendants of 'Node' with 'BaseMatcher'.
- bool matchesRecursively(const ast_type_traits::DynTypedNode &Node,
+ bool matchesRecursively(const DynTypedNode &Node,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, int MaxDepth,
- ast_type_traits::TraversalKind Traversal,
- BindKind Bind) {
+ TraversalKind Traversal, BindKind Bind) {
MatchChildASTVisitor Visitor(
&Matcher, this, Builder, MaxDepth, Traversal, Bind);
return Visitor.findMatch(Node);
@@ -498,10 +505,9 @@ public:
bool Directly) override;
// Implements ASTMatchFinder::matchesChildOf.
- bool matchesChildOf(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx, const DynTypedMatcher &Matcher,
- BoundNodesTreeBuilder *Builder,
- ast_type_traits::TraversalKind Traversal,
+ bool matchesChildOf(const DynTypedNode &Node, ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder, TraversalKind Traversal,
BindKind Bind) override {
if (ResultCache.size() > MaxMemoizationEntries)
ResultCache.clear();
@@ -509,19 +515,18 @@ public:
Bind);
}
// Implements ASTMatchFinder::matchesDescendantOf.
- bool matchesDescendantOf(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx, const DynTypedMatcher &Matcher,
+ bool matchesDescendantOf(const DynTypedNode &Node, ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
BindKind Bind) override {
if (ResultCache.size() > MaxMemoizationEntries)
ResultCache.clear();
return memoizedMatchesRecursively(Node, Ctx, Matcher, Builder, INT_MAX,
- ast_type_traits::TraversalKind::TK_AsIs,
- Bind);
+ TraversalKind::TK_AsIs, Bind);
}
// Implements ASTMatchFinder::matchesAncestorOf.
- bool matchesAncestorOf(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx, const DynTypedMatcher &Matcher,
+ bool matchesAncestorOf(const DynTypedNode &Node, ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
AncestorMatchMode MatchMode) override {
// Reset the cache outside of the recursive call to make sure we
@@ -534,7 +539,7 @@ public:
// Matches all registered matchers on the given node and calls the
// result callback for every node that matches.
- void match(const ast_type_traits::DynTypedNode &Node) {
+ void match(const DynTypedNode &Node) {
// FIXME: Improve this with a switch or a visitor pattern.
if (auto *N = Node.get<Decl>()) {
match(*N);
@@ -612,7 +617,7 @@ private:
}
}
- void matchWithFilter(const ast_type_traits::DynTypedNode &DynNode) {
+ void matchWithFilter(const DynTypedNode &DynNode) {
auto Kind = DynNode.getNodeKind();
auto it = MatcherFiltersMap.find(Kind);
const auto &Filter =
@@ -636,8 +641,7 @@ private:
}
}
- const std::vector<unsigned short> &
- getFilterForKind(ast_type_traits::ASTNodeKind Kind) {
+ const std::vector<unsigned short> &getFilterForKind(ASTNodeKind Kind) {
auto &Filter = MatcherFiltersMap[Kind];
auto &Matchers = this->Matchers->DeclOrStmt;
assert((Matchers.size() < USHRT_MAX) && "Too many matchers.");
@@ -652,10 +656,10 @@ private:
/// @{
/// Overloads to pair the different node types to their matchers.
void matchDispatch(const Decl *Node) {
- return matchWithFilter(ast_type_traits::DynTypedNode::create(*Node));
+ return matchWithFilter(DynTypedNode::create(*Node));
}
void matchDispatch(const Stmt *Node) {
- return matchWithFilter(ast_type_traits::DynTypedNode::create(*Node));
+ return matchWithFilter(DynTypedNode::create(*Node));
}
void matchDispatch(const Type *Node) {
@@ -692,12 +696,16 @@ private:
// Once there are multiple parents, the breadth first search order does not
// allow simple memoization on the ancestors. Thus, we only memoize as long
// as there is a single parent.
- bool memoizedMatchesAncestorOfRecursively(
- const ast_type_traits::DynTypedNode &Node, ASTContext &Ctx,
- const DynTypedMatcher &Matcher, BoundNodesTreeBuilder *Builder,
- AncestorMatchMode MatchMode) {
+ bool memoizedMatchesAncestorOfRecursively(const DynTypedNode &Node,
+ ASTContext &Ctx,
+ const DynTypedMatcher &Matcher,
+ BoundNodesTreeBuilder *Builder,
+ AncestorMatchMode MatchMode) {
// For AST-nodes that don't have an identity, we can't memoize.
- if (!Builder->isComparable())
+ // When doing a single-level match, we don't need to memoize because
+ // ParentMap (in ASTContext) already memoizes the result.
+ if (!Builder->isComparable() ||
+ MatchMode == AncestorMatchMode::AMM_ParentOnly)
return matchesAncestorOfRecursively(Node, Ctx, Matcher, Builder,
MatchMode);
@@ -705,7 +713,8 @@ private:
Key.MatcherID = Matcher.getID();
Key.Node = Node;
Key.BoundNodes = *Builder;
- Key.Traversal = Ctx.getTraversalKind();
+ Key.Traversal = Ctx.getParentMapContext().getTraversalKind();
+ Key.Type = MatchType::Ancestors;
// Note that we cannot use insert and reuse the iterator, as recursive
// calls to match might invalidate the result cache iterators.
@@ -727,8 +736,7 @@ private:
return CachedResult.ResultOfMatch;
}
- bool matchesAncestorOfRecursively(const ast_type_traits::DynTypedNode &Node,
- ASTContext &Ctx,
+ bool matchesAncestorOfRecursively(const DynTypedNode &Node, ASTContext &Ctx,
const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder,
AncestorMatchMode MatchMode) {
@@ -747,7 +755,7 @@ private:
return D->getKind() == Decl::TranslationUnit;
})) {
llvm::errs() << "Tried to match orphan node:\n";
- Node.dump(llvm::errs(), ActiveASTContext->getSourceManager());
+ Node.dump(llvm::errs(), *ActiveASTContext);
llvm_unreachable("Parent map should be complete!");
}
#endif
@@ -755,7 +763,7 @@ private:
}
if (Parents.size() == 1) {
// Only one parent - do recursive memoization.
- const ast_type_traits::DynTypedNode Parent = Parents[0];
+ const DynTypedNode Parent = Parents[0];
BoundNodesTreeBuilder BuilderCopy = *Builder;
if (Matcher.matches(Parent, this, &BuilderCopy)) {
*Builder = std::move(BuilderCopy);
@@ -770,8 +778,7 @@ private:
} else {
// Multiple parents - BFS over the rest of the nodes.
llvm::DenseSet<const void *> Visited;
- std::deque<ast_type_traits::DynTypedNode> Queue(Parents.begin(),
- Parents.end());
+ std::deque<DynTypedNode> Queue(Parents.begin(), Parents.end());
while (!Queue.empty()) {
BoundNodesTreeBuilder BuilderCopy = *Builder;
if (Matcher.matches(Queue.front(), this, &BuilderCopy)) {
@@ -861,8 +868,7 @@ private:
/// kind (and derived kinds) so it is a waste to try every matcher on every
/// node.
/// We precalculate a list of matchers that pass the toplevel restrict check.
- llvm::DenseMap<ast_type_traits::ASTNodeKind, std::vector<unsigned short>>
- MatcherFiltersMap;
+ llvm::DenseMap<ASTNodeKind, std::vector<unsigned short>> MatcherFiltersMap;
const MatchFinder::MatchFinderOptions &Options;
ASTContext *ActiveASTContext;
@@ -923,9 +929,8 @@ bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
if (!ClassDecl)
continue;
if (ClassDecl == Declaration) {
- // This can happen for recursive template definitions; if the
- // current declaration did not match, we can safely return false.
- return false;
+ // This can happen for recursive template definitions.
+ continue;
}
BoundNodesTreeBuilder Result(*Builder);
if (Base.matches(*ClassDecl, this, &Result)) {
@@ -1137,8 +1142,7 @@ std::unique_ptr<ASTConsumer> MatchFinder::newASTConsumer() {
return std::make_unique<internal::MatchASTConsumer>(this, ParsingDone);
}
-void MatchFinder::match(const clang::ast_type_traits::DynTypedNode &Node,
- ASTContext &Context) {
+void MatchFinder::match(const clang::DynTypedNode &Node, ASTContext &Context) {
internal::MatchASTVisitor Visitor(&Matchers, Options);
Visitor.set_active_ast_context(&Context);
Visitor.match(Node);
diff --git a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index 199a6d839e2e..4b9baf7a0e75 100644
--- a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -15,9 +15,11 @@
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Lex/Lexer.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/None.h"
@@ -27,6 +29,8 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -40,39 +44,56 @@ namespace ast_matchers {
AST_MATCHER_P(ObjCMessageExpr, hasAnySelectorMatcher, std::vector<std::string>,
Matches) {
- std::string SelString = Node.getSelector().getAsString();
- for (const std::string &S : Matches)
- if (S == SelString)
- return true;
- return false;
+ return llvm::is_contained(Matches, Node.getSelector().getAsString());
}
namespace internal {
-bool NotUnaryOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder,
+bool NotUnaryOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
-bool AllOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool AllOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
-bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool EachOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
-bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool AnyOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
-bool OptionallyVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+bool OptionallyVariadicOperator(const DynTypedNode &DynNode,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers);
+bool matchesAnyBase(const CXXRecordDecl &Node,
+ const Matcher<CXXBaseSpecifier> &BaseSpecMatcher,
+ ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder) {
+ if (!Node.hasDefinition())
+ return false;
+
+ CXXBasePaths Paths;
+ Paths.setOrigin(&Node);
+
+ const auto basePredicate =
+ [Finder, Builder, &BaseSpecMatcher](const CXXBaseSpecifier *BaseSpec,
+ CXXBasePath &IgnoredParam) {
+ BoundNodesTreeBuilder Result(*Builder);
+ if (BaseSpecMatcher.matches(*BaseSpec, Finder, Builder)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ return false;
+ };
+
+ return Node.lookupInBases(basePredicate, Paths,
+ /*LookupInDependent =*/true);
+}
+
void BoundNodesTreeBuilder::visitMatches(Visitor *ResultVisitor) {
if (Bindings.empty())
Bindings.push_back(BoundNodesMap());
@@ -84,7 +105,7 @@ void BoundNodesTreeBuilder::visitMatches(Visitor *ResultVisitor) {
namespace {
using VariadicOperatorFunction = bool (*)(
- const ast_type_traits::DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder, ArrayRef<DynTypedMatcher> InnerMatchers);
template <VariadicOperatorFunction Func>
@@ -93,8 +114,7 @@ public:
VariadicMatcher(std::vector<DynTypedMatcher> InnerMatchers)
: InnerMatchers(std::move(InnerMatchers)) {}
- bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+ bool dynMatches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
return Func(DynNode, Finder, Builder, InnerMatchers);
}
@@ -109,16 +129,14 @@ public:
IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher)
: ID(ID), InnerMatcher(std::move(InnerMatcher)) {}
- bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+ bool dynMatches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const override {
bool Result = InnerMatcher->dynMatches(DynNode, Finder, Builder);
if (Result) Builder->setBinding(ID, DynNode);
return Result;
}
- llvm::Optional<ast_type_traits::TraversalKind>
- TraversalKind() const override {
+ llvm::Optional<clang::TraversalKind> TraversalKind() const override {
return InnerMatcher->TraversalKind();
}
@@ -138,20 +156,45 @@ public:
Retain(); // Reference count will never become zero.
}
- bool dynMatches(const ast_type_traits::DynTypedNode &, ASTMatchFinder *,
+ bool dynMatches(const DynTypedNode &, ASTMatchFinder *,
BoundNodesTreeBuilder *) const override {
return true;
}
};
+/// A matcher that specifies a particular \c TraversalKind.
+///
+/// The kind provided to the constructor overrides any kind that may be
+/// specified by the `InnerMatcher`.
+class DynTraversalMatcherImpl : public DynMatcherInterface {
+public:
+ explicit DynTraversalMatcherImpl(
+ clang::TraversalKind TK,
+ IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher)
+ : TK(TK), InnerMatcher(std::move(InnerMatcher)) {}
+
+ bool dynMatches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const override {
+ return this->InnerMatcher->dynMatches(DynNode, Finder, Builder);
+ }
+
+ llvm::Optional<clang::TraversalKind> TraversalKind() const override {
+ return TK;
+ }
+
+private:
+ clang::TraversalKind TK;
+ IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher;
+};
+
} // namespace
static llvm::ManagedStatic<TrueMatcherImpl> TrueMatcherInstance;
-DynTypedMatcher DynTypedMatcher::constructVariadic(
- DynTypedMatcher::VariadicOperator Op,
- ast_type_traits::ASTNodeKind SupportedKind,
- std::vector<DynTypedMatcher> InnerMatchers) {
+DynTypedMatcher
+DynTypedMatcher::constructVariadic(DynTypedMatcher::VariadicOperator Op,
+ ASTNodeKind SupportedKind,
+ std::vector<DynTypedMatcher> InnerMatchers) {
assert(!InnerMatchers.empty() && "Array must not be empty.");
assert(llvm::all_of(InnerMatchers,
[SupportedKind](const DynTypedMatcher &M) {
@@ -172,8 +215,8 @@ DynTypedMatcher DynTypedMatcher::constructVariadic(
// invalid types earlier and we can elide the kind checks inside the
// matcher.
for (auto &IM : InnerMatchers) {
- RestrictKind = ast_type_traits::ASTNodeKind::getMostDerivedType(
- RestrictKind, IM.RestrictKind);
+ RestrictKind =
+ ASTNodeKind::getMostDerivedType(RestrictKind, IM.RestrictKind);
}
return DynTypedMatcher(
SupportedKind, RestrictKind,
@@ -204,40 +247,45 @@ DynTypedMatcher DynTypedMatcher::constructVariadic(
llvm_unreachable("Invalid Op value.");
}
-DynTypedMatcher DynTypedMatcher::constructRestrictedWrapper(
- const DynTypedMatcher &InnerMatcher,
- ast_type_traits::ASTNodeKind RestrictKind) {
+DynTypedMatcher
+DynTypedMatcher::constructRestrictedWrapper(const DynTypedMatcher &InnerMatcher,
+ ASTNodeKind RestrictKind) {
DynTypedMatcher Copy = InnerMatcher;
Copy.RestrictKind = RestrictKind;
return Copy;
}
-DynTypedMatcher DynTypedMatcher::trueMatcher(
- ast_type_traits::ASTNodeKind NodeKind) {
+DynTypedMatcher
+DynTypedMatcher::withTraversalKind(ast_type_traits::TraversalKind TK) {
+ auto Copy = *this;
+ Copy.Implementation =
+ new DynTraversalMatcherImpl(TK, std::move(Copy.Implementation));
+ return Copy;
+}
+
+DynTypedMatcher DynTypedMatcher::trueMatcher(ASTNodeKind NodeKind) {
return DynTypedMatcher(NodeKind, NodeKind, &*TrueMatcherInstance);
}
-bool DynTypedMatcher::canMatchNodesOfKind(
- ast_type_traits::ASTNodeKind Kind) const {
+bool DynTypedMatcher::canMatchNodesOfKind(ASTNodeKind Kind) const {
return RestrictKind.isBaseOf(Kind);
}
-DynTypedMatcher DynTypedMatcher::dynCastTo(
- const ast_type_traits::ASTNodeKind Kind) const {
+DynTypedMatcher DynTypedMatcher::dynCastTo(const ASTNodeKind Kind) const {
auto Copy = *this;
Copy.SupportedKind = Kind;
- Copy.RestrictKind =
- ast_type_traits::ASTNodeKind::getMostDerivedType(Kind, RestrictKind);
+ Copy.RestrictKind = ASTNodeKind::getMostDerivedType(Kind, RestrictKind);
return Copy;
}
-bool DynTypedMatcher::matches(const ast_type_traits::DynTypedNode &DynNode,
+bool DynTypedMatcher::matches(const DynTypedNode &DynNode,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const {
TraversalKindScope RAII(Finder->getASTContext(),
Implementation->TraversalKind());
- auto N = Finder->getASTContext().traverseIgnored(DynNode);
+ auto N =
+ Finder->getASTContext().getParentMapContext().traverseIgnored(DynNode);
if (RestrictKind.isBaseOf(N.getNodeKind()) &&
Implementation->dynMatches(N, Finder, Builder)) {
@@ -250,13 +298,14 @@ bool DynTypedMatcher::matches(const ast_type_traits::DynTypedNode &DynNode,
return false;
}
-bool DynTypedMatcher::matchesNoKindCheck(
- const ast_type_traits::DynTypedNode &DynNode, ASTMatchFinder *Finder,
- BoundNodesTreeBuilder *Builder) const {
+bool DynTypedMatcher::matchesNoKindCheck(const DynTypedNode &DynNode,
+ ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder) const {
TraversalKindScope raii(Finder->getASTContext(),
Implementation->TraversalKind());
- auto N = Finder->getASTContext().traverseIgnored(DynNode);
+ auto N =
+ Finder->getASTContext().getParentMapContext().traverseIgnored(DynNode);
assert(RestrictKind.isBaseOf(N.getNodeKind()));
if (Implementation->dynMatches(N, Finder, Builder)) {
@@ -277,10 +326,10 @@ llvm::Optional<DynTypedMatcher> DynTypedMatcher::tryBind(StringRef ID) const {
return std::move(Result);
}
-bool DynTypedMatcher::canConvertTo(ast_type_traits::ASTNodeKind To) const {
+bool DynTypedMatcher::canConvertTo(ASTNodeKind To) const {
const auto From = getSupportedKind();
- auto QualKind = ast_type_traits::ASTNodeKind::getFromNodeKind<QualType>();
- auto TypeKind = ast_type_traits::ASTNodeKind::getFromNodeKind<Type>();
+ auto QualKind = ASTNodeKind::getFromNodeKind<QualType>();
+ auto TypeKind = ASTNodeKind::getFromNodeKind<Type>();
/// Mimic the implicit conversions of Matcher<>.
/// - From Matcher<Type> to Matcher<QualType>
if (From.isSame(TypeKind) && To.isSame(QualKind)) return true;
@@ -292,8 +341,8 @@ void BoundNodesTreeBuilder::addMatch(const BoundNodesTreeBuilder &Other) {
Bindings.append(Other.Bindings.begin(), Other.Bindings.end());
}
-bool NotUnaryOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder, BoundNodesTreeBuilder *Builder,
+bool NotUnaryOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
+ BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
if (InnerMatchers.size() != 1)
return false;
@@ -312,22 +361,18 @@ bool NotUnaryOperator(const ast_type_traits::DynTypedNode &DynNode,
return !InnerMatchers[0].matches(DynNode, Finder, &Discard);
}
-bool AllOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool AllOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
// allOf leads to one matcher for each alternative in the first
// matcher combined with each alternative in the second matcher.
// Thus, we can reuse the same Builder.
- for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
- if (!InnerMatcher.matchesNoKindCheck(DynNode, Finder, Builder))
- return false;
- }
- return true;
+ return llvm::all_of(InnerMatchers, [&](const DynTypedMatcher &InnerMatcher) {
+ return InnerMatcher.matchesNoKindCheck(DynNode, Finder, Builder);
+ });
}
-bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool EachOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
BoundNodesTreeBuilder Result;
@@ -343,8 +388,7 @@ bool EachOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
return Matched;
}
-bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
- ASTMatchFinder *Finder,
+bool AnyOfVariadicOperator(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
@@ -357,31 +401,31 @@ bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
return false;
}
-bool OptionallyVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
+bool OptionallyVariadicOperator(const DynTypedNode &DynNode,
ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder,
ArrayRef<DynTypedMatcher> InnerMatchers) {
- BoundNodesTreeBuilder Result;
- for (const DynTypedMatcher &InnerMatcher : InnerMatchers) {
- BoundNodesTreeBuilder BuilderInner(*Builder);
- if (InnerMatcher.matches(DynNode, Finder, &BuilderInner))
- Result.addMatch(BuilderInner);
- }
- *Builder = std::move(Result);
+ if (InnerMatchers.size() != 1)
+ return false;
+
+ BoundNodesTreeBuilder Result(*Builder);
+ if (InnerMatchers[0].matches(DynNode, Finder, &Result))
+ *Builder = std::move(Result);
return true;
}
inline static
std::vector<std::string> vectorFromRefs(ArrayRef<const StringRef *> NameRefs) {
std::vector<std::string> Names;
+ Names.reserve(NameRefs.size());
for (auto *Name : NameRefs)
Names.emplace_back(*Name);
return Names;
}
Matcher<NamedDecl> hasAnyNameFunc(ArrayRef<const StringRef *> NameRefs) {
- std::vector<std::string> Names = vectorFromRefs(NameRefs);
- return internal::Matcher<NamedDecl>(new internal::HasNameMatcher(Names));
+ return internal::Matcher<NamedDecl>(
+ new internal::HasNameMatcher(vectorFromRefs(NameRefs)));
}
Matcher<ObjCMessageExpr> hasAnySelectorFunc(
@@ -389,10 +433,18 @@ Matcher<ObjCMessageExpr> hasAnySelectorFunc(
return hasAnySelectorMatcher(vectorFromRefs(NameRefs));
}
+HasOpNameMatcher hasAnyOperatorNameFunc(ArrayRef<const StringRef *> NameRefs) {
+ return HasOpNameMatcher(vectorFromRefs(NameRefs));
+}
+
+HasOverloadOpNameMatcher
+hasAnyOverloadedOperatorNameFunc(ArrayRef<const StringRef *> NameRefs) {
+ return HasOverloadOpNameMatcher(vectorFromRefs(NameRefs));
+}
+
HasNameMatcher::HasNameMatcher(std::vector<std::string> N)
- : UseUnqualifiedMatch(std::all_of(
- N.begin(), N.end(),
- [](StringRef Name) { return Name.find("::") == Name.npos; })),
+ : UseUnqualifiedMatch(llvm::all_of(
+ N, [](StringRef Name) { return Name.find("::") == Name.npos; })),
Names(std::move(N)) {
#ifndef NDEBUG
for (StringRef Name : Names)
@@ -450,6 +502,7 @@ namespace {
class PatternSet {
public:
PatternSet(ArrayRef<std::string> Names) {
+ Patterns.reserve(Names.size());
for (StringRef Name : Names)
Patterns.push_back({Name, Name.startswith("::")});
}
@@ -474,10 +527,10 @@ public:
/// A match will be a pattern that was fully consumed, that also matches the
/// 'fully qualified' requirement.
bool foundMatch(bool AllowFullyQualified) const {
- for (auto& P: Patterns)
- if (P.P.empty() && (AllowFullyQualified || !P.IsFullyQualified))
- return true;
- return false;
+ return llvm::any_of(Patterns, [&](const Pattern &Pattern) {
+ return Pattern.P.empty() &&
+ (AllowFullyQualified || !Pattern.IsFullyQualified);
+ });
}
private:
@@ -523,7 +576,13 @@ bool HasNameMatcher::matchesNodeFullFast(const NamedDecl &Node) const {
if (Ctx->isFunctionOrMethod())
return Patterns.foundMatch(/*AllowFullyQualified=*/false);
- for (; Ctx && isa<NamedDecl>(Ctx); Ctx = Ctx->getParent()) {
+ for (; Ctx; Ctx = Ctx->getParent()) {
+ // Linkage Spec can just be ignored
+ // FIXME: Any other DeclContext kinds that can be safely disregarded
+ if (isa<LinkageSpecDecl>(Ctx))
+ continue;
+ if (!isa<NamedDecl>(Ctx))
+ break;
if (Patterns.foundMatch(/*AllowFullyQualified=*/false))
return true;
@@ -592,6 +651,52 @@ bool HasNameMatcher::matchesNode(const NamedDecl &Node) const {
return matchesNodeFullFast(Node);
}
+// Checks whether \p Loc points to a token with source text of \p TokenText.
+static bool isTokenAtLoc(const SourceManager &SM, const LangOptions &LangOpts,
+ StringRef Text, SourceLocation Loc) {
+ llvm::SmallString<16> Buffer;
+ bool Invalid = false;
+ // Since `Loc` may point into an expansion buffer, which has no corresponding
+ // source, we need to look at the spelling location to read the actual source.
+ StringRef TokenText = Lexer::getSpelling(SM.getSpellingLoc(Loc), Buffer, SM,
+ LangOpts, &Invalid);
+ return !Invalid && Text == TokenText;
+}
+
+llvm::Optional<SourceLocation>
+getExpansionLocOfMacro(StringRef MacroName, SourceLocation Loc,
+ const ASTContext &Context) {
+ auto &SM = Context.getSourceManager();
+ const LangOptions &LangOpts = Context.getLangOpts();
+ while (Loc.isMacroID()) {
+ SrcMgr::ExpansionInfo Expansion =
+ SM.getSLocEntry(SM.getFileID(Loc)).getExpansion();
+ if (Expansion.isMacroArgExpansion())
+ // Check macro argument for an expansion of the given macro. For example,
+ // `F(G(3))`, where `MacroName` is `G`.
+ if (llvm::Optional<SourceLocation> ArgLoc = getExpansionLocOfMacro(
+ MacroName, Expansion.getSpellingLoc(), Context))
+ return ArgLoc;
+ Loc = Expansion.getExpansionLocStart();
+ if (isTokenAtLoc(SM, LangOpts, MacroName, Loc))
+ return Loc;
+ }
+ return llvm::None;
+}
+
+std::shared_ptr<llvm::Regex> createAndVerifyRegex(StringRef Regex,
+ llvm::Regex::RegexFlags Flags,
+ StringRef MatcherID) {
+ assert(!Regex.empty() && "Empty regex string");
+ auto SharedRegex = std::make_shared<llvm::Regex>(Regex, Flags);
+ std::string Error;
+ if (!SharedRegex->isValid(Error)) {
+ llvm::WithColor::error()
+ << "building matcher '" << MatcherID << "': " << Error << "\n";
+ llvm::WithColor::note() << " input was '" << Regex << "'\n";
+ }
+ return SharedRegex;
+}
} // end namespace internal
const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt>
@@ -647,6 +752,7 @@ const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
@@ -727,6 +833,8 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr>
materializeTemporaryExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
+ cxxNoexceptExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
@@ -768,6 +876,8 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral;
+const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
+ fixedPointLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
@@ -821,12 +931,18 @@ const internal::VariadicOperatorMatcherFunc<
const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf = {internal::DynTypedMatcher::VO_AllOf};
-const internal::VariadicOperatorMatcherFunc<
- 1, std::numeric_limits<unsigned>::max()>
- optionally = {internal::DynTypedMatcher::VO_Optionally};
+const internal::VariadicOperatorMatcherFunc<1, 1> optionally = {
+ internal::DynTypedMatcher::VO_Optionally};
const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName = {};
+
+const internal::VariadicFunction<internal::HasOpNameMatcher, StringRef,
+ internal::hasAnyOperatorNameFunc>
+ hasAnyOperatorName = {};
+const internal::VariadicFunction<internal::HasOverloadOpNameMatcher, StringRef,
+ internal::hasAnyOverloadedOperatorNameFunc>
+ hasAnyOverloadedOperatorName = {};
const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef,
internal::hasAnySelectorFunc>
hasAnySelector = {};
@@ -858,6 +974,8 @@ const AstTypeMatcher<BuiltinType> builtinType;
const AstTypeMatcher<ArrayType> arrayType;
const AstTypeMatcher<ComplexType> complexType;
const AstTypeMatcher<ConstantArrayType> constantArrayType;
+const AstTypeMatcher<DeducedTemplateSpecializationType>
+ deducedTemplateSpecializationType;
const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
const AstTypeMatcher<VariableArrayType> variableArrayType;
diff --git a/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp b/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
index 8656bca870ec..88c2279afb2e 100644
--- a/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
@@ -1,4 +1,4 @@
-//===--- Diagnostics.cpp - Helper class for error diagnostics -----*- C++ -*-===//
+//===--- Diagnostics.cpp - Helper class for error diagnostics ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -98,6 +98,8 @@ static StringRef errorTypeToFormatString(Diagnostics::ErrorType Type) {
return "Ambiguous matcher overload.";
case Diagnostics::ET_RegistryValueNotFound:
return "Value not found: $0";
+ case Diagnostics::ET_RegistryUnknownEnumWithReplace:
+ return "Unknown value '$1' for arg $0; did you mean '$2'";
case Diagnostics::ET_ParserStringError:
return "Error parsing string token: <$0>";
diff --git a/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp b/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
new file mode 100644
index 000000000000..989ee0fa75cd
--- /dev/null
+++ b/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
@@ -0,0 +1,172 @@
+//===--- Marshallers.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Marshallers.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Regex.h"
+#include <string>
+
+static llvm::Optional<std::string>
+getBestGuess(llvm::StringRef Search, llvm::ArrayRef<llvm::StringRef> Allowed,
+ llvm::StringRef DropPrefix = "", unsigned MaxEditDistance = 3) {
+ if (MaxEditDistance != ~0U)
+ ++MaxEditDistance;
+ llvm::StringRef Res;
+ for (const llvm::StringRef &Item : Allowed) {
+ if (Item.equals_lower(Search)) {
+ assert(!Item.equals(Search) && "This should be handled earlier on.");
+ MaxEditDistance = 1;
+ Res = Item;
+ continue;
+ }
+ unsigned Distance = Item.edit_distance(Search);
+ if (Distance < MaxEditDistance) {
+ MaxEditDistance = Distance;
+ Res = Item;
+ }
+ }
+ if (!Res.empty())
+ return Res.str();
+ if (!DropPrefix.empty()) {
+ --MaxEditDistance; // Treat dropping the prefix as 1 edit
+ for (const llvm::StringRef &Item : Allowed) {
+ auto NoPrefix = Item;
+ if (!NoPrefix.consume_front(DropPrefix))
+ continue;
+ if (NoPrefix.equals_lower(Search)) {
+ if (NoPrefix.equals(Search))
+ return Item.str();
+ MaxEditDistance = 1;
+ Res = Item;
+ continue;
+ }
+ unsigned Distance = NoPrefix.edit_distance(Search);
+ if (Distance < MaxEditDistance) {
+ MaxEditDistance = Distance;
+ Res = Item;
+ }
+ }
+ if (!Res.empty())
+ return Res.str();
+ }
+ return llvm::None;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ clang::attr::Kind>::getBestGuess(const VariantValue &Value) {
+ static constexpr llvm::StringRef Allowed[] = {
+#define ATTR(X) "attr::" #X,
+#include "clang/Basic/AttrList.inc"
+ };
+ if (Value.isString())
+ return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
+ "attr::");
+ return llvm::None;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ clang::CastKind>::getBestGuess(const VariantValue &Value) {
+ static constexpr llvm::StringRef Allowed[] = {
+#define CAST_OPERATION(Name) "CK_" #Name,
+#include "clang/AST/OperationKinds.def"
+ };
+ if (Value.isString())
+ return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
+ "CK_");
+ return llvm::None;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ clang::OpenMPClauseKind>::getBestGuess(const VariantValue &Value) {
+ static constexpr llvm::StringRef Allowed[] = {
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) #Enum,
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ };
+ if (Value.isString())
+ return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
+ "OMPC_");
+ return llvm::None;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ clang::UnaryExprOrTypeTrait>::getBestGuess(const VariantValue &Value) {
+ static constexpr llvm::StringRef Allowed[] = {
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) "UETT_" #Name,
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) "UETT_" #Name,
+#include "clang/Basic/TokenKinds.def"
+ };
+ if (Value.isString())
+ return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
+ "UETT_");
+ return llvm::None;
+}
+
+static constexpr std::pair<llvm::StringRef, llvm::Regex::RegexFlags>
+ RegexMap[] = {
+ {"NoFlags", llvm::Regex::RegexFlags::NoFlags},
+ {"IgnoreCase", llvm::Regex::RegexFlags::IgnoreCase},
+ {"Newline", llvm::Regex::RegexFlags::Newline},
+ {"BasicRegex", llvm::Regex::RegexFlags::BasicRegex},
+};
+
+llvm::Optional<llvm::Regex::RegexFlags> getRegexFlag(llvm::StringRef Flag) {
+ for (const auto &StringFlag : RegexMap) {
+ if (Flag == StringFlag.first)
+ return StringFlag.second;
+ }
+ return llvm::None;
+}
+
+llvm::Optional<llvm::StringRef> getCloseRegexMatch(llvm::StringRef Flag) {
+ for (const auto &StringFlag : RegexMap) {
+ if (Flag.edit_distance(StringFlag.first) < 3)
+ return StringFlag.first;
+ }
+ return llvm::None;
+}
+
+llvm::Optional<llvm::Regex::RegexFlags>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ llvm::Regex::RegexFlags>::getFlags(llvm::StringRef Flags) {
+ llvm::Optional<llvm::Regex::RegexFlags> Flag;
+ SmallVector<StringRef, 4> Split;
+ Flags.split(Split, '|', -1, false);
+ for (StringRef OrFlag : Split) {
+ if (llvm::Optional<llvm::Regex::RegexFlags> NextFlag =
+ getRegexFlag(OrFlag.trim()))
+ Flag = Flag.getValueOr(llvm::Regex::NoFlags) | *NextFlag;
+ else
+ return None;
+ }
+ return Flag;
+}
+
+llvm::Optional<std::string>
+clang::ast_matchers::dynamic::internal::ArgTypeTraits<
+ llvm::Regex::RegexFlags>::getBestGuess(const VariantValue &Value) {
+ if (!Value.isString())
+ return llvm::None;
+ SmallVector<StringRef, 4> Split;
+ llvm::StringRef(Value.getString()).split(Split, '|', -1, false);
+ for (llvm::StringRef &Flag : Split) {
+ if (llvm::Optional<llvm::StringRef> BestGuess =
+ getCloseRegexMatch(Flag.trim()))
+ Flag = *BestGuess;
+ else
+ return None;
+ }
+ if (Split.empty())
+ return None;
+ return llvm::join(Split, " | ");
+}
diff --git a/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index 9f46108d1848..33f6d1e4155c 100644
--- a/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -27,12 +27,15 @@
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
@@ -64,6 +67,10 @@ template <> struct ArgTypeTraits<std::string> {
static ArgKind getKind() {
return ArgKind(ArgKind::AK_String);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
+ }
};
template <>
@@ -80,7 +87,11 @@ template <class T> struct ArgTypeTraits<ast_matchers::internal::Matcher<T>> {
}
static ArgKind getKind() {
- return ArgKind(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ return ArgKind(ASTNodeKind::getFromNodeKind<T>());
+ }
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
}
};
@@ -94,6 +105,10 @@ template <> struct ArgTypeTraits<bool> {
static ArgKind getKind() {
return ArgKind(ArgKind::AK_Boolean);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
+ }
};
template <> struct ArgTypeTraits<double> {
@@ -106,6 +121,10 @@ template <> struct ArgTypeTraits<double> {
static ArgKind getKind() {
return ArgKind(ArgKind::AK_Double);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
+ }
};
template <> struct ArgTypeTraits<unsigned> {
@@ -118,6 +137,10 @@ template <> struct ArgTypeTraits<unsigned> {
static ArgKind getKind() {
return ArgKind(ArgKind::AK_Unsigned);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
+ return llvm::None;
+ }
};
template <> struct ArgTypeTraits<attr::Kind> {
@@ -141,13 +164,15 @@ public:
static ArgKind getKind() {
return ArgKind(ArgKind::AK_String);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
};
template <> struct ArgTypeTraits<CastKind> {
private:
static Optional<CastKind> getCastKind(llvm::StringRef AttrKind) {
return llvm::StringSwitch<Optional<CastKind>>(AttrKind)
-#define CAST_OPERATION(Name) .Case( #Name, CK_##Name)
+#define CAST_OPERATION(Name) .Case("CK_" #Name, CK_##Name)
#include "clang/AST/OperationKinds.def"
.Default(llvm::None);
}
@@ -164,15 +189,34 @@ public:
static ArgKind getKind() {
return ArgKind(ArgKind::AK_String);
}
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
+};
+
+template <> struct ArgTypeTraits<llvm::Regex::RegexFlags> {
+private:
+ static Optional<llvm::Regex::RegexFlags> getFlags(llvm::StringRef Flags);
+
+public:
+ static bool is(const VariantValue &Value) {
+ return Value.isString() && getFlags(Value.getString());
+ }
+
+ static llvm::Regex::RegexFlags get(const VariantValue &Value) {
+ return *getFlags(Value.getString());
+ }
+
+ static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
};
template <> struct ArgTypeTraits<OpenMPClauseKind> {
private:
static Optional<OpenMPClauseKind> getClauseKind(llvm::StringRef ClauseKind) {
return llvm::StringSwitch<Optional<OpenMPClauseKind>>(ClauseKind)
-#define OPENMP_CLAUSE(TextualSpelling, Class) \
- .Case("OMPC_" #TextualSpelling, OMPC_##TextualSpelling)
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) .Case(#Enum, llvm::omp::Clause::Enum)
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
.Default(llvm::None);
}
@@ -186,6 +230,35 @@ public:
}
static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
+};
+
+template <> struct ArgTypeTraits<UnaryExprOrTypeTrait> {
+private:
+ static Optional<UnaryExprOrTypeTrait>
+ getUnaryOrTypeTraitKind(llvm::StringRef ClauseKind) {
+ return llvm::StringSwitch<Optional<UnaryExprOrTypeTrait>>(ClauseKind)
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) \
+ .Case("UETT_" #Name, UETT_##Name)
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) \
+ .Case("UETT_" #Name, UETT_##Name)
+#include "clang/Basic/TokenKinds.def"
+ .Default(llvm::None);
+ }
+
+public:
+ static bool is(const VariantValue &Value) {
+ return Value.isString() && getUnaryOrTypeTraitKind(Value.getString());
+ }
+
+ static UnaryExprOrTypeTrait get(const VariantValue &Value) {
+ return *getUnaryOrTypeTraitKind(Value.getString());
+ }
+
+ static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
+
+ static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
};
/// Matcher descriptor interface.
@@ -211,7 +284,7 @@ public:
/// set of argument types accepted for argument \p ArgNo to \p ArgKinds.
// FIXME: We should provide the ability to constrain the output of this
// function based on the types of other matcher arguments.
- virtual void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ virtual void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &ArgKinds) const = 0;
/// Returns whether this matcher is convertible to the given type. If it is
@@ -221,20 +294,19 @@ public:
/// same matcher overload. Zero specificity indicates that this conversion
/// would produce a trivial matcher that will either always or never match.
/// Such matchers are excluded from code completion results.
- virtual bool isConvertibleTo(
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity = nullptr,
- ast_type_traits::ASTNodeKind *LeastDerivedKind = nullptr) const = 0;
+ virtual bool
+ isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity = nullptr,
+ ASTNodeKind *LeastDerivedKind = nullptr) const = 0;
/// Returns whether the matcher will, given a matcher of any type T, yield a
/// matcher of type T.
virtual bool isPolymorphic() const { return false; }
};
-inline bool isRetKindConvertibleTo(
- ArrayRef<ast_type_traits::ASTNodeKind> RetKinds,
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) {
- for (const ast_type_traits::ASTNodeKind &NodeKind : RetKinds) {
+inline bool isRetKindConvertibleTo(ArrayRef<ASTNodeKind> RetKinds,
+ ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) {
+ for (const ASTNodeKind &NodeKind : RetKinds) {
if (ArgKind(NodeKind).isConvertibleTo(Kind, Specificity)) {
if (LeastDerivedKind)
*LeastDerivedKind = NodeKind;
@@ -264,10 +336,10 @@ public:
/// \param RetKinds The list of matcher types to which the matcher is
/// convertible.
/// \param ArgKinds The types of the arguments this matcher takes.
- FixedArgCountMatcherDescriptor(
- MarshallerType Marshaller, void (*Func)(), StringRef MatcherName,
- ArrayRef<ast_type_traits::ASTNodeKind> RetKinds,
- ArrayRef<ArgKind> ArgKinds)
+ FixedArgCountMatcherDescriptor(MarshallerType Marshaller, void (*Func)(),
+ StringRef MatcherName,
+ ArrayRef<ASTNodeKind> RetKinds,
+ ArrayRef<ArgKind> ArgKinds)
: Marshaller(Marshaller), Func(Func), MatcherName(MatcherName),
RetKinds(RetKinds.begin(), RetKinds.end()),
ArgKinds(ArgKinds.begin(), ArgKinds.end()) {}
@@ -281,14 +353,13 @@ public:
bool isVariadic() const override { return false; }
unsigned getNumArgs() const override { return ArgKinds.size(); }
- void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &Kinds) const override {
Kinds.push_back(ArgKinds[ArgNo]);
}
- bool isConvertibleTo(
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
LeastDerivedKind);
}
@@ -297,7 +368,7 @@ private:
const MarshallerType Marshaller;
void (* const Func)();
const std::string MatcherName;
- const std::vector<ast_type_traits::ASTNodeKind> RetKinds;
+ const std::vector<ASTNodeKind> RetKinds;
const std::vector<ArgKind> ArgKinds;
};
@@ -321,7 +392,7 @@ static void mergePolyMatchers(const PolyMatcher &Poly,
/// polymorphic matcher. For the former, we just construct the VariantMatcher.
/// For the latter, we instantiate all the possible Matcher<T> of the poly
/// matcher.
-static VariantMatcher outvalueToVariantMatcher(const DynTypedMatcher &Matcher) {
+inline VariantMatcher outvalueToVariantMatcher(const DynTypedMatcher &Matcher) {
return VariantMatcher::SingleMatcher(Matcher);
}
@@ -336,36 +407,35 @@ static VariantMatcher outvalueToVariantMatcher(const T &PolyMatcher,
}
template <typename T>
-inline void buildReturnTypeVectorFromTypeList(
- std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
- RetTypes.push_back(
- ast_type_traits::ASTNodeKind::getFromNodeKind<typename T::head>());
+inline void
+buildReturnTypeVectorFromTypeList(std::vector<ASTNodeKind> &RetTypes) {
+ RetTypes.push_back(ASTNodeKind::getFromNodeKind<typename T::head>());
buildReturnTypeVectorFromTypeList<typename T::tail>(RetTypes);
}
template <>
inline void
buildReturnTypeVectorFromTypeList<ast_matchers::internal::EmptyTypeList>(
- std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {}
+ std::vector<ASTNodeKind> &RetTypes) {}
template <typename T>
struct BuildReturnTypeVector {
- static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
+ static void build(std::vector<ASTNodeKind> &RetTypes) {
buildReturnTypeVectorFromTypeList<typename T::ReturnTypes>(RetTypes);
}
};
template <typename T>
struct BuildReturnTypeVector<ast_matchers::internal::Matcher<T>> {
- static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
- RetTypes.push_back(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ static void build(std::vector<ASTNodeKind> &RetTypes) {
+ RetTypes.push_back(ASTNodeKind::getFromNodeKind<T>());
}
};
template <typename T>
struct BuildReturnTypeVector<ast_matchers::internal::BindableMatcher<T>> {
- static void build(std::vector<ast_type_traits::ASTNodeKind> &RetTypes) {
- RetTypes.push_back(ast_type_traits::ASTNodeKind::getFromNodeKind<T>());
+ static void build(std::vector<ASTNodeKind> &RetTypes) {
+ RetTypes.push_back(ASTNodeKind::getFromNodeKind<T>());
}
};
@@ -439,14 +509,13 @@ public:
bool isVariadic() const override { return true; }
unsigned getNumArgs() const override { return 0; }
- void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &Kinds) const override {
Kinds.push_back(ArgsKind);
}
- bool isConvertibleTo(
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
LeastDerivedKind);
}
@@ -454,7 +523,7 @@ public:
private:
const RunFunc Func;
const std::string MatcherName;
- std::vector<ast_type_traits::ASTNodeKind> RetKinds;
+ std::vector<ASTNodeKind> RetKinds;
const ArgKind ArgsKind;
};
@@ -466,12 +535,10 @@ public:
ast_matchers::internal::VariadicDynCastAllOfMatcher<BaseT, DerivedT> Func,
StringRef MatcherName)
: VariadicFuncMatcherDescriptor(Func, MatcherName),
- DerivedKind(ast_type_traits::ASTNodeKind::getFromNodeKind<DerivedT>()) {
- }
+ DerivedKind(ASTNodeKind::getFromNodeKind<DerivedT>()) {}
- bool
- isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
// If Kind is not a base of DerivedKind, either DerivedKind is a base of
// Kind (in which case the match will always succeed) or Kind and
// DerivedKind are unrelated (in which case it will always fail), so set
@@ -489,7 +556,7 @@ public:
}
private:
- const ast_type_traits::ASTNodeKind DerivedKind;
+ const ASTNodeKind DerivedKind;
};
/// Helper macros to check the arguments on all marshaller functions.
@@ -502,9 +569,16 @@ private:
#define CHECK_ARG_TYPE(index, type) \
if (!ArgTypeTraits<type>::is(Args[index].Value)) { \
- Error->addError(Args[index].Range, Error->ET_RegistryWrongArgType) \
- << (index + 1) << ArgTypeTraits<type>::getKind().asString() \
- << Args[index].Value.getTypeAsString(); \
+ if (llvm::Optional<std::string> BestGuess = \
+ ArgTypeTraits<type>::getBestGuess(Args[index].Value)) { \
+ Error->addError(Args[index].Range, \
+ Error->ET_RegistryUnknownEnumWithReplace) \
+ << index + 1 << Args[index].Value.getString() << *BestGuess; \
+ } else { \
+ Error->addError(Args[index].Range, Error->ET_RegistryWrongArgType) \
+ << (index + 1) << ArgTypeTraits<type>::getKind().asString() \
+ << Args[index].Value.getTypeAsString(); \
+ } \
return VariantMatcher(); \
}
@@ -635,7 +709,7 @@ public:
return Overload0NumArgs;
}
- void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &Kinds) const override {
for (const auto &O : Overloads) {
if (O->isConvertibleTo(ThisKind))
@@ -643,9 +717,8 @@ public:
}
}
- bool isConvertibleTo(
- ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
for (const auto &O : Overloads) {
if (O->isConvertibleTo(Kind, Specificity, LeastDerivedKind))
return true;
@@ -657,6 +730,71 @@ private:
std::vector<std::unique_ptr<MatcherDescriptor>> Overloads;
};
+template <typename ReturnType>
+class RegexMatcherDescriptor : public MatcherDescriptor {
+public:
+ RegexMatcherDescriptor(ReturnType (*WithFlags)(StringRef,
+ llvm::Regex::RegexFlags),
+ ReturnType (*NoFlags)(StringRef),
+ ArrayRef<ASTNodeKind> RetKinds)
+ : WithFlags(WithFlags), NoFlags(NoFlags),
+ RetKinds(RetKinds.begin(), RetKinds.end()) {}
+ bool isVariadic() const override { return true; }
+ unsigned getNumArgs() const override { return 0; }
+
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
+ std::vector<ArgKind> &Kinds) const override {
+ assert(ArgNo < 2);
+ Kinds.push_back(ArgKind::AK_String);
+ }
+
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
+ return isRetKindConvertibleTo(RetKinds, Kind, Specificity,
+ LeastDerivedKind);
+ }
+
+ VariantMatcher create(SourceRange NameRange, ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const override {
+ if (Args.size() < 1 || Args.size() > 2) {
+ Error->addError(NameRange, Diagnostics::ET_RegistryWrongArgCount)
+ << "1 or 2" << Args.size();
+ return VariantMatcher();
+ }
+ if (!ArgTypeTraits<StringRef>::is(Args[0].Value)) {
+ Error->addError(Args[0].Range, Error->ET_RegistryWrongArgType)
+ << 1 << ArgTypeTraits<StringRef>::getKind().asString()
+ << Args[0].Value.getTypeAsString();
+ return VariantMatcher();
+ }
+ if (Args.size() == 1) {
+ return outvalueToVariantMatcher(
+ NoFlags(ArgTypeTraits<StringRef>::get(Args[0].Value)));
+ }
+ if (!ArgTypeTraits<llvm::Regex::RegexFlags>::is(Args[1].Value)) {
+ if (llvm::Optional<std::string> BestGuess =
+ ArgTypeTraits<llvm::Regex::RegexFlags>::getBestGuess(
+ Args[1].Value)) {
+ Error->addError(Args[1].Range, Error->ET_RegistryUnknownEnumWithReplace)
+ << 2 << Args[1].Value.getString() << *BestGuess;
+ } else {
+ Error->addError(Args[1].Range, Error->ET_RegistryWrongArgType)
+ << 2 << ArgTypeTraits<llvm::Regex::RegexFlags>::getKind().asString()
+ << Args[1].Value.getTypeAsString();
+ }
+ return VariantMatcher();
+ }
+ return outvalueToVariantMatcher(
+ WithFlags(ArgTypeTraits<StringRef>::get(Args[0].Value),
+ ArgTypeTraits<llvm::Regex::RegexFlags>::get(Args[1].Value)));
+ }
+
+private:
+ ReturnType (*const WithFlags)(StringRef, llvm::Regex::RegexFlags);
+ ReturnType (*const NoFlags)(StringRef);
+ const std::vector<ASTNodeKind> RetKinds;
+};
+
/// Variadic operator marshaller function.
class VariadicOperatorMatcherDescriptor : public MatcherDescriptor {
public:
@@ -697,13 +835,13 @@ public:
bool isVariadic() const override { return true; }
unsigned getNumArgs() const override { return 0; }
- void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo,
+ void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &Kinds) const override {
Kinds.push_back(ThisKind);
}
- bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity,
- ast_type_traits::ASTNodeKind *LeastDerivedKind) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
+ ASTNodeKind *LeastDerivedKind) const override {
if (Specificity)
*Specificity = 1;
if (LeastDerivedKind)
@@ -727,7 +865,7 @@ private:
template <typename ReturnType>
std::unique_ptr<MatcherDescriptor>
makeMatcherAutoMarshall(ReturnType (*Func)(), StringRef MatcherName) {
- std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+ std::vector<ASTNodeKind> RetTypes;
BuildReturnTypeVector<ReturnType>::build(RetTypes);
return std::make_unique<FixedArgCountMatcherDescriptor>(
matcherMarshall0<ReturnType>, reinterpret_cast<void (*)()>(Func),
@@ -738,7 +876,7 @@ makeMatcherAutoMarshall(ReturnType (*Func)(), StringRef MatcherName) {
template <typename ReturnType, typename ArgType1>
std::unique_ptr<MatcherDescriptor>
makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1), StringRef MatcherName) {
- std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+ std::vector<ASTNodeKind> RetTypes;
BuildReturnTypeVector<ReturnType>::build(RetTypes);
ArgKind AK = ArgTypeTraits<ArgType1>::getKind();
return std::make_unique<FixedArgCountMatcherDescriptor>(
@@ -751,7 +889,7 @@ template <typename ReturnType, typename ArgType1, typename ArgType2>
std::unique_ptr<MatcherDescriptor>
makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
StringRef MatcherName) {
- std::vector<ast_type_traits::ASTNodeKind> RetTypes;
+ std::vector<ASTNodeKind> RetTypes;
BuildReturnTypeVector<ReturnType>::build(RetTypes);
ArgKind AKs[] = { ArgTypeTraits<ArgType1>::getKind(),
ArgTypeTraits<ArgType2>::getKind() };
@@ -760,6 +898,16 @@ makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AKs);
}
+template <typename ReturnType>
+std::unique_ptr<MatcherDescriptor> makeMatcherRegexMarshall(
+ ReturnType (*FuncFlags)(llvm::StringRef, llvm::Regex::RegexFlags),
+ ReturnType (*Func)(llvm::StringRef)) {
+ std::vector<ASTNodeKind> RetTypes;
+ BuildReturnTypeVector<ReturnType>::build(RetTypes);
+ return std::make_unique<RegexMatcherDescriptor<ReturnType>>(FuncFlags, Func,
+ RetTypes);
+}
+
/// Variadic overload.
template <typename ResultT, typename ArgT,
ResultT (*Func)(ArrayRef<const ArgT *>)>
diff --git a/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 1c0930c5983a..ec2215804c09 100644
--- a/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -90,14 +90,14 @@ void RegistryMaps::registerMatcher(
REGISTER_MATCHER_OVERLOAD(name); \
} while (false)
+#define REGISTER_REGEX_MATCHER(name) \
+ registerMatcher(#name, internal::makeMatcherRegexMarshall(name, name))
+
/// Generate a registry map with all the known matchers.
/// Please keep sorted alphabetically!
RegistryMaps::RegistryMaps() {
// TODO: Here is the list of the missing matchers, grouped by reason.
//
- // Need Variant/Parser fixes:
- // ofKind
- //
// Polymorphic + argument overload:
// findAll
//
@@ -124,6 +124,10 @@ RegistryMaps::RegistryMaps() {
};
REGISTER_MATCHER_OVERLOAD(equals);
+ REGISTER_REGEX_MATCHER(isExpansionInFileMatching);
+ REGISTER_REGEX_MATCHER(matchesName);
+ REGISTER_REGEX_MATCHER(matchesSelector);
+
REGISTER_MATCHER(accessSpecDecl);
REGISTER_MATCHER(addrLabelExpr);
REGISTER_MATCHER(alignOfExpr);
@@ -154,6 +158,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(characterLiteral);
REGISTER_MATCHER(chooseExpr);
REGISTER_MATCHER(classTemplateDecl);
+ REGISTER_MATCHER(classTemplatePartialSpecializationDecl);
REGISTER_MATCHER(classTemplateSpecializationDecl);
REGISTER_MATCHER(complexType);
REGISTER_MATCHER(compoundLiteralExpr);
@@ -183,6 +188,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(cxxMemberCallExpr);
REGISTER_MATCHER(cxxMethodDecl);
REGISTER_MATCHER(cxxNewExpr);
+ REGISTER_MATCHER(cxxNoexceptExpr);
REGISTER_MATCHER(cxxNullPtrLiteralExpr);
REGISTER_MATCHER(cxxOperatorCallExpr);
REGISTER_MATCHER(cxxRecordDecl);
@@ -201,6 +207,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(declStmt);
REGISTER_MATCHER(declaratorDecl);
REGISTER_MATCHER(decltypeType);
+ REGISTER_MATCHER(deducedTemplateSpecializationType);
REGISTER_MATCHER(defaultStmt);
REGISTER_MATCHER(dependentSizedArrayType);
REGISTER_MATCHER(designatedInitExpr);
@@ -237,11 +244,15 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(has);
REGISTER_MATCHER(hasAncestor);
REGISTER_MATCHER(hasAnyArgument);
+ REGISTER_MATCHER(hasAnyBase);
REGISTER_MATCHER(hasAnyClause);
REGISTER_MATCHER(hasAnyConstructorInitializer);
REGISTER_MATCHER(hasAnyDeclaration);
REGISTER_MATCHER(hasAnyName);
+ REGISTER_MATCHER(hasAnyOperatorName);
+ REGISTER_MATCHER(hasAnyOverloadedOperatorName);
REGISTER_MATCHER(hasAnyParameter);
+ REGISTER_MATCHER(hasAnyPlacementArg);
REGISTER_MATCHER(hasAnySelector);
REGISTER_MATCHER(hasAnySubstatement);
REGISTER_MATCHER(hasAnyTemplateArgument);
@@ -267,6 +278,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasDefinition);
REGISTER_MATCHER(hasDescendant);
REGISTER_MATCHER(hasDestinationType);
+ REGISTER_MATCHER(hasDirectBase);
REGISTER_MATCHER(hasDynamicExceptionSpec);
REGISTER_MATCHER(hasEitherOperand);
REGISTER_MATCHER(hasElementType);
@@ -292,6 +304,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasName);
REGISTER_MATCHER(hasNullSelector);
REGISTER_MATCHER(hasObjectExpression);
+ REGISTER_MATCHER(hasOperands);
REGISTER_MATCHER(hasOperatorName);
REGISTER_MATCHER(hasOverloadedOperatorName);
REGISTER_MATCHER(hasParameter);
@@ -303,6 +316,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasReceiverType);
REGISTER_MATCHER(hasReplacementType);
REGISTER_MATCHER(hasReturnValue);
+ REGISTER_MATCHER(hasPlacementArg);
REGISTER_MATCHER(hasSelector);
REGISTER_MATCHER(hasSingleDecl);
REGISTER_MATCHER(hasSize);
@@ -347,12 +361,14 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isArray);
REGISTER_MATCHER(isArrow);
REGISTER_MATCHER(isAssignmentOperator);
+ REGISTER_MATCHER(isAtPosition);
REGISTER_MATCHER(isBaseInitializer);
REGISTER_MATCHER(isBitField);
REGISTER_MATCHER(isCatchAll);
REGISTER_MATCHER(isClass);
REGISTER_MATCHER(isClassMessage);
REGISTER_MATCHER(isClassMethod);
+ REGISTER_MATCHER(isComparisonOperator);
REGISTER_MATCHER(isConst);
REGISTER_MATCHER(isConstQualified);
REGISTER_MATCHER(isConstexpr);
@@ -363,8 +379,9 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isDefinition);
REGISTER_MATCHER(isDelegatingConstructor);
REGISTER_MATCHER(isDeleted);
+ REGISTER_MATCHER(isEnum);
REGISTER_MATCHER(isExceptionVariable);
- REGISTER_MATCHER(isExpansionInFileMatching);
+ REGISTER_MATCHER(isExpandedFromMacro);
REGISTER_MATCHER(isExpansionInMainFile);
REGISTER_MATCHER(isExpansionInSystemHeader);
REGISTER_MATCHER(isExplicit);
@@ -372,6 +389,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isExpr);
REGISTER_MATCHER(isExternC);
REGISTER_MATCHER(isFinal);
+ REGISTER_MATCHER(isFirstPrivateKind);
REGISTER_MATCHER(isImplicit);
REGISTER_MATCHER(isInStdNamespace);
REGISTER_MATCHER(isInTemplateInstantiation);
@@ -391,7 +409,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isNoReturn);
REGISTER_MATCHER(isNoThrow);
REGISTER_MATCHER(isNoneKind);
- REGISTER_MATCHER(isOMPStructuredBlock);
REGISTER_MATCHER(isOverride);
REGISTER_MATCHER(isPrivate);
REGISTER_MATCHER(isProtected);
@@ -420,8 +437,6 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(labelStmt);
REGISTER_MATCHER(lambdaExpr);
REGISTER_MATCHER(linkageSpecDecl);
- REGISTER_MATCHER(matchesName);
- REGISTER_MATCHER(matchesSelector);
REGISTER_MATCHER(materializeTemporaryExpr);
REGISTER_MATCHER(member);
REGISTER_MATCHER(memberExpr);
@@ -452,6 +467,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(objcThrowStmt);
REGISTER_MATCHER(objcTryStmt);
REGISTER_MATCHER(ofClass);
+ REGISTER_MATCHER(ofKind);
REGISTER_MATCHER(ompDefaultClause);
REGISTER_MATCHER(ompExecutableDirective);
REGISTER_MATCHER(on);
@@ -492,6 +508,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(substTemplateTypeParmType);
REGISTER_MATCHER(switchCase);
REGISTER_MATCHER(switchStmt);
+ REGISTER_MATCHER(tagDecl);
REGISTER_MATCHER(tagType);
REGISTER_MATCHER(templateArgument);
REGISTER_MATCHER(templateArgumentCountIs);
@@ -652,7 +669,7 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
OS << "...";
OS << ")";
- std::string TypedText = Name;
+ std::string TypedText = std::string(Name);
TypedText += "(";
if (ArgsKinds.empty())
TypedText += ")";
diff --git a/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp b/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
index 118ca2a41cb1..866e2d0e3491 100644
--- a/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -1,4 +1,4 @@
-//===--- VariantValue.cpp - Polymorphic value type -*- C++ -*-===/
+//===--- VariantValue.cpp - Polymorphic value type --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -101,8 +101,7 @@ public:
return llvm::None;
}
- bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
- unsigned *Specificity) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const override {
return ArgKind(Matcher.getSupportedKind())
.isConvertibleTo(Kind, Specificity);
}
@@ -159,8 +158,7 @@ public:
return llvm::None;
}
- bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
- unsigned *Specificity) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const override {
unsigned MaxSpecificity = 0;
for (const DynTypedMatcher &Matcher : Matchers) {
unsigned ThisSpecificity;
@@ -202,8 +200,7 @@ public:
return Ops.constructVariadicOperator(Op, Args);
}
- bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind,
- unsigned *Specificity) const override {
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const override {
for (const VariantMatcher &Matcher : Args) {
if (!Matcher.isConvertibleTo(Kind, Specificity))
return false;
diff --git a/clang/lib/ASTMatchers/GtestMatchers.cpp b/clang/lib/ASTMatchers/GtestMatchers.cpp
new file mode 100644
index 000000000000..c99fdf6c0fcd
--- /dev/null
+++ b/clang/lib/ASTMatchers/GtestMatchers.cpp
@@ -0,0 +1,104 @@
+//===- GtestMatchers.cpp - AST Matchers for Gtest ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ASTMatchers/GtestMatchers.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Timer.h"
+#include <deque>
+#include <memory>
+#include <set>
+
+namespace clang {
+namespace ast_matchers {
+
+static DeclarationMatcher getComparisonDecl(GtestCmp Cmp) {
+ switch (Cmp) {
+ case GtestCmp::Eq:
+ return cxxMethodDecl(hasName("Compare"),
+ ofClass(cxxRecordDecl(isSameOrDerivedFrom(
+ hasName("::testing::internal::EqHelper")))));
+ case GtestCmp::Ne:
+ return functionDecl(hasName("::testing::internal::CmpHelperNE"));
+ case GtestCmp::Ge:
+ return functionDecl(hasName("::testing::internal::CmpHelperGE"));
+ case GtestCmp::Gt:
+ return functionDecl(hasName("::testing::internal::CmpHelperGT"));
+ case GtestCmp::Le:
+ return functionDecl(hasName("::testing::internal::CmpHelperLE"));
+ case GtestCmp::Lt:
+ return functionDecl(hasName("::testing::internal::CmpHelperLT"));
+ }
+ llvm_unreachable("Unhandled GtestCmp enum");
+}
+
+static llvm::StringRef getAssertMacro(GtestCmp Cmp) {
+ switch (Cmp) {
+ case GtestCmp::Eq:
+ return "ASSERT_EQ";
+ case GtestCmp::Ne:
+ return "ASSERT_NE";
+ case GtestCmp::Ge:
+ return "ASSERT_GE";
+ case GtestCmp::Gt:
+ return "ASSERT_GT";
+ case GtestCmp::Le:
+ return "ASSERT_LE";
+ case GtestCmp::Lt:
+ return "ASSERT_LT";
+ }
+ llvm_unreachable("Unhandled GtestCmp enum");
+}
+
+static llvm::StringRef getExpectMacro(GtestCmp Cmp) {
+ switch (Cmp) {
+ case GtestCmp::Eq:
+ return "EXPECT_EQ";
+ case GtestCmp::Ne:
+ return "EXPECT_NE";
+ case GtestCmp::Ge:
+ return "EXPECT_GE";
+ case GtestCmp::Gt:
+ return "EXPECT_GT";
+ case GtestCmp::Le:
+ return "EXPECT_LE";
+ case GtestCmp::Lt:
+ return "EXPECT_LT";
+ }
+ llvm_unreachable("Unhandled GtestCmp enum");
+}
+
+// In general, AST matchers cannot match calls to macros. However, we can
+// simulate such matches if the macro definition has identifiable elements that
+// themselves can be matched. In that case, we can match on those elements and
+// then check that the match occurs within an expansion of the desired
+// macro. The more uncommon the identified elements, the more efficient this
+// process will be.
+//
+// We use this approach to implement the derived matchers gtestAssert and
+// gtestExpect.
+internal::BindableMatcher<Stmt> gtestAssert(GtestCmp Cmp, StatementMatcher Left,
+ StatementMatcher Right) {
+ return callExpr(callee(getComparisonDecl(Cmp)),
+ isExpandedFromMacro(getAssertMacro(Cmp)),
+ hasArgument(2, Left), hasArgument(3, Right));
+}
+
+internal::BindableMatcher<Stmt> gtestExpect(GtestCmp Cmp, StatementMatcher Left,
+ StatementMatcher Right) {
+ return callExpr(callee(getComparisonDecl(Cmp)),
+ isExpandedFromMacro(getExpectMacro(Cmp)),
+ hasArgument(2, Left), hasArgument(3, Right));
+}
+
+} // end namespace ast_matchers
+} // end namespace clang
diff --git a/clang/lib/Analysis/AnalysisDeclContext.cpp b/clang/lib/Analysis/AnalysisDeclContext.cpp
index 9f58b5079c76..783de6442645 100644
--- a/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -50,18 +50,18 @@
using namespace clang;
-using ManagedAnalysisMap = llvm::DenseMap<const void *, ManagedAnalysis *>;
+using ManagedAnalysisMap = llvm::DenseMap<const void *, std::unique_ptr<ManagedAnalysis>>;
-AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *d,
- const CFG::BuildOptions &buildOptions)
- : Manager(Mgr), D(d), cfgBuildOptions(buildOptions) {
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *ADCMgr,
+ const Decl *D,
+ const CFG::BuildOptions &Options)
+ : ADCMgr(ADCMgr), D(D), cfgBuildOptions(Options) {
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
-AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
- const Decl *d)
- : Manager(Mgr), D(d) {
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *ADCMgr,
+ const Decl *D)
+ : ADCMgr(ADCMgr), D(D) {
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
@@ -96,8 +96,8 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
Stmt *Body = FD->getBody();
if (auto *CoroBody = dyn_cast_or_null<CoroutineBodyStmt>(Body))
Body = CoroBody->getBody();
- if (Manager && Manager->synthesizeBodies()) {
- Stmt *SynthesizedBody = Manager->getBodyFarm().getBody(FD);
+ if (ADCMgr && ADCMgr->synthesizeBodies()) {
+ Stmt *SynthesizedBody = ADCMgr->getBodyFarm().getBody(FD);
if (SynthesizedBody) {
Body = SynthesizedBody;
IsAutosynthesized = true;
@@ -107,8 +107,8 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
}
else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
Stmt *Body = MD->getBody();
- if (Manager && Manager->synthesizeBodies()) {
- Stmt *SynthesizedBody = Manager->getBodyFarm().getBody(MD);
+ if (ADCMgr && ADCMgr->synthesizeBodies()) {
+ Stmt *SynthesizedBody = ADCMgr->getBodyFarm().getBody(MD);
if (SynthesizedBody) {
Body = SynthesizedBody;
IsAutosynthesized = true;
@@ -309,19 +309,17 @@ AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
BodyFarm &AnalysisDeclContextManager::getBodyFarm() { return FunctionBodyFarm; }
const StackFrameContext *
-AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
- const CFGBlock *Blk, unsigned BlockCount,
- unsigned Idx) {
- return getLocationContextManager().getStackFrame(this, Parent, S, Blk,
- BlockCount, Idx);
+AnalysisDeclContext::getStackFrame(const LocationContext *ParentLC,
+ const Stmt *S, const CFGBlock *Blk,
+ unsigned BlockCount, unsigned Index) {
+ return getLocationContextManager().getStackFrame(this, ParentLC, S, Blk,
+ BlockCount, Index);
}
-const BlockInvocationContext *
-AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
- const BlockDecl *BD,
- const void *ContextData) {
- return getLocationContextManager().getBlockInvocationContext(this, parent,
- BD, ContextData);
+const BlockInvocationContext *AnalysisDeclContext::getBlockInvocationContext(
+ const LocationContext *ParentLC, const BlockDecl *BD, const void *Data) {
+ return getLocationContextManager().getBlockInvocationContext(this, ParentLC,
+ BD, Data);
}
bool AnalysisDeclContext::isInStdNamespace(const Decl *D) {
@@ -340,9 +338,10 @@ bool AnalysisDeclContext::isInStdNamespace(const Decl *D) {
}
LocationContextManager &AnalysisDeclContext::getLocationContextManager() {
- assert(Manager &&
- "Cannot create LocationContexts without an AnalysisDeclContextManager!");
- return Manager->getLocationContextManager();
+ assert(
+ ADCMgr &&
+ "Cannot create LocationContexts without an AnalysisDeclContextManager!");
+ return ADCMgr->getLocationContextManager();
}
//===----------------------------------------------------------------------===//
@@ -365,36 +364,14 @@ void StackFrameContext::Profile(llvm::FoldingSetNodeID &ID) {
BlockCount, Index);
}
-void ScopeContext::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAnalysisDeclContext(), getParent(), Enter);
-}
-
void BlockInvocationContext::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAnalysisDeclContext(), getParent(), BD, ContextData);
+ Profile(ID, getAnalysisDeclContext(), getParent(), BD, Data);
}
//===----------------------------------------------------------------------===//
// LocationContext creation.
//===----------------------------------------------------------------------===//
-template <typename LOC, typename DATA>
-const LOC*
-LocationContextManager::getLocationContext(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const DATA *d) {
- llvm::FoldingSetNodeID ID;
- LOC::Profile(ID, ctx, parent, d);
- void *InsertPos;
-
- LOC *L = cast_or_null<LOC>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
-
- if (!L) {
- L = new LOC(ctx, parent, d, ++NewID);
- Contexts.InsertNode(L, InsertPos);
- }
- return L;
-}
-
const StackFrameContext *LocationContextManager::getStackFrame(
AnalysisDeclContext *ctx, const LocationContext *parent, const Stmt *s,
const CFGBlock *blk, unsigned blockCount, unsigned idx) {
@@ -410,26 +387,17 @@ const StackFrameContext *LocationContextManager::getStackFrame(
return L;
}
-const ScopeContext *
-LocationContextManager::getScope(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const Stmt *s) {
- return getLocationContext<ScopeContext, Stmt>(ctx, parent, s);
-}
-
-const BlockInvocationContext *
-LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
- const LocationContext *parent,
- const BlockDecl *BD,
- const void *ContextData) {
+const BlockInvocationContext *LocationContextManager::getBlockInvocationContext(
+ AnalysisDeclContext *ADC, const LocationContext *ParentLC,
+ const BlockDecl *BD, const void *Data) {
llvm::FoldingSetNodeID ID;
- BlockInvocationContext::Profile(ID, ctx, parent, BD, ContextData);
+ BlockInvocationContext::Profile(ID, ADC, ParentLC, BD, Data);
void *InsertPos;
auto *L =
cast_or_null<BlockInvocationContext>(Contexts.FindNodeOrInsertPos(ID,
InsertPos));
if (!L) {
- L = new BlockInvocationContext(ctx, parent, BD, ContextData, ++NewID);
+ L = new BlockInvocationContext(ADC, ParentLC, BD, Data, ++NewID);
Contexts.InsertNode(L, InsertPos);
}
return L;
@@ -473,9 +441,7 @@ static void printLocation(raw_ostream &Out, const SourceManager &SM,
Loc.print(Out, SM);
}
-void LocationContext::dumpStack(raw_ostream &Out, const char *NL,
- std::function<void(const LocationContext *)>
- printMoreInfoPerContext) const {
+void LocationContext::dumpStack(raw_ostream &Out) const {
ASTContext &Ctx = getAnalysisDeclContext()->getASTContext();
PrintingPolicy PP(Ctx.getLangOpts());
PP.TerseOutput = 1;
@@ -498,9 +464,6 @@ void LocationContext::dumpStack(raw_ostream &Out, const char *NL,
printLocation(Out, SM, S->getBeginLoc());
}
break;
- case Scope:
- Out << "Entering scope";
- break;
case Block:
Out << "Invoking block";
if (const Decl *D = cast<BlockInvocationContext>(LCtx)->getDecl()) {
@@ -509,9 +472,7 @@ void LocationContext::dumpStack(raw_ostream &Out, const char *NL,
}
break;
}
- Out << NL;
-
- printMoreInfoPerContext(LCtx);
+ Out << '\n';
}
}
@@ -548,9 +509,6 @@ void LocationContext::printJson(raw_ostream &Out, const char *NL,
Out << ", \"items\": ";
break;
- case Scope:
- Out << "Entering scope\" ";
- break;
case Block:
Out << "Invoking block\" ";
if (const Decl *D = cast<BlockInvocationContext>(LCtx)->getDecl()) {
@@ -659,7 +617,7 @@ AnalysisDeclContext::getReferencedBlockVars(const BlockDecl *BD) {
return llvm::make_range(V->begin(), V->end());
}
-ManagedAnalysis *&AnalysisDeclContext::getAnalysisImpl(const void *tag) {
+std::unique_ptr<ManagedAnalysis> &AnalysisDeclContext::getAnalysisImpl(const void *tag) {
if (!ManagedAnalyses)
ManagedAnalyses = new ManagedAnalysisMap();
ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
@@ -675,12 +633,7 @@ ManagedAnalysis::~ManagedAnalysis() = default;
AnalysisDeclContext::~AnalysisDeclContext() {
delete forcedBlkExprs;
delete ReferencedBlockVars;
- // Release the managed analyses.
- if (ManagedAnalyses) {
- ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
- llvm::DeleteContainerSeconds(*M);
- delete M;
- }
+ delete (ManagedAnalysisMap*) ManagedAnalyses;
}
LocationContext::~LocationContext() = default;
diff --git a/clang/lib/Analysis/BodyFarm.cpp b/clang/lib/Analysis/BodyFarm.cpp
index 1a7891550542..f9f0553d28f0 100644
--- a/clang/lib/Analysis/BodyFarm.cpp
+++ b/clang/lib/Analysis/BodyFarm.cpp
@@ -114,21 +114,19 @@ private:
BinaryOperator *ASTMaker::makeAssignment(const Expr *LHS, const Expr *RHS,
QualType Ty) {
- return new (C) BinaryOperator(const_cast<Expr*>(LHS), const_cast<Expr*>(RHS),
- BO_Assign, Ty, VK_RValue,
- OK_Ordinary, SourceLocation(), FPOptions());
+ return BinaryOperator::Create(
+ C, const_cast<Expr *>(LHS), const_cast<Expr *>(RHS), BO_Assign, Ty,
+ VK_RValue, OK_Ordinary, SourceLocation(), FPOptionsOverride());
}
BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
BinaryOperator::Opcode Op) {
assert(BinaryOperator::isLogicalOp(Op) ||
BinaryOperator::isComparisonOp(Op));
- return new (C) BinaryOperator(const_cast<Expr*>(LHS),
- const_cast<Expr*>(RHS),
- Op,
- C.getLogicalOperationType(),
- VK_RValue,
- OK_Ordinary, SourceLocation(), FPOptions());
+ return BinaryOperator::Create(
+ C, const_cast<Expr *>(LHS), const_cast<Expr *>(RHS), Op,
+ C.getLogicalOperationType(), VK_RValue, OK_Ordinary, SourceLocation(),
+ FPOptionsOverride());
}
CompoundStmt *ASTMaker::makeCompound(ArrayRef<Stmt *> Stmts) {
@@ -147,9 +145,9 @@ DeclRefExpr *ASTMaker::makeDeclRefExpr(
}
UnaryOperator *ASTMaker::makeDereference(const Expr *Arg, QualType Ty) {
- return new (C) UnaryOperator(const_cast<Expr*>(Arg), UO_Deref, Ty,
+ return UnaryOperator::Create(C, const_cast<Expr *>(Arg), UO_Deref, Ty,
VK_LValue, OK_Ordinary, SourceLocation(),
- /*CanOverflow*/ false);
+ /*CanOverflow*/ false, FPOptionsOverride());
}
ImplicitCastExpr *ASTMaker::makeLvalueToRvalue(const Expr *Arg, QualType Ty) {
@@ -296,7 +294,8 @@ static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
/*Args=*/CallArgs,
/*QualType=*/C.VoidTy,
/*ExprValueType=*/VK_RValue,
- /*SourceLocation=*/SourceLocation(), FPOptions());
+ /*SourceLocation=*/SourceLocation(),
+ /*FPFeatures=*/FPOptionsOverride());
}
/// Create a fake body for std::call_once.
@@ -447,15 +446,16 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
QualType DerefType = Deref->getType();
// Negation predicate.
- UnaryOperator *FlagCheck = new (C) UnaryOperator(
+ UnaryOperator *FlagCheck = UnaryOperator::Create(
+ C,
/* input=*/
M.makeImplicitCast(M.makeLvalueToRvalue(Deref, DerefType), DerefType,
CK_IntegralToBoolean),
- /* opc=*/ UO_LNot,
- /* QualType=*/ C.IntTy,
- /* ExprValueKind=*/ VK_RValue,
- /* ExprObjectKind=*/ OK_Ordinary, SourceLocation(),
- /* CanOverflow*/ false);
+ /* opc=*/UO_LNot,
+ /* QualType=*/C.IntTy,
+ /* ExprValueKind=*/VK_RValue,
+ /* ExprObjectKind=*/OK_Ordinary, SourceLocation(),
+ /* CanOverflow*/ false, FPOptionsOverride());
// Create assignment.
BinaryOperator *FlagAssignment = M.makeAssignment(
@@ -518,9 +518,9 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
// (2) Create the assignment to the predicate.
Expr *DoneValue =
- new (C) UnaryOperator(M.makeIntegerLiteral(0, C.LongTy), UO_Not, C.LongTy,
- VK_RValue, OK_Ordinary, SourceLocation(),
- /*CanOverflow*/false);
+ UnaryOperator::Create(C, M.makeIntegerLiteral(0, C.LongTy), UO_Not,
+ C.LongTy, VK_RValue, OK_Ordinary, SourceLocation(),
+ /*CanOverflow*/ false, FPOptionsOverride());
BinaryOperator *B =
M.makeAssignment(
@@ -762,7 +762,7 @@ static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
return nullptr;
// Ignore weak variables, which have special behavior.
- if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
return nullptr;
// Look to see if Sema has synthesized a body for us. This happens in
diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp
index 4c1ea8995f9f..fc74226951a4 100644
--- a/clang/lib/Analysis/CFG.cpp
+++ b/clang/lib/Analysis/CFG.cpp
@@ -223,8 +223,6 @@ private:
///
class LocalScope {
public:
- friend class const_iterator;
-
using AutomaticVarsTy = BumpVector<VarDecl *>;
/// const_iterator - Iterates local scope backwards and jumps to previous
@@ -720,10 +718,10 @@ private:
// These sorts of call expressions don't have a common superclass,
// hence strict duck-typing.
template <typename CallLikeExpr,
- typename = typename std::enable_if<
- std::is_same<CallLikeExpr, CallExpr>::value ||
- std::is_same<CallLikeExpr, CXXConstructExpr>::value ||
- std::is_same<CallLikeExpr, ObjCMessageExpr>::value>>
+ typename = std::enable_if_t<
+ std::is_base_of<CallExpr, CallLikeExpr>::value ||
+ std::is_base_of<CXXConstructExpr, CallLikeExpr>::value ||
+ std::is_base_of<ObjCMessageExpr, CallLikeExpr>::value>>
void findConstructionContextsForArguments(CallLikeExpr *E) {
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Expr *Arg = E->getArg(i);
@@ -2839,11 +2837,30 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
/// DeclStmts and initializers in them.
CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
assert(DS->isSingleDecl() && "Can handle single declarations only.");
+
+ if (const auto *TND = dyn_cast<TypedefNameDecl>(DS->getSingleDecl())) {
+ // If we encounter a VLA, process its size expressions.
+ const Type *T = TND->getUnderlyingType().getTypePtr();
+ if (!T->isVariablyModifiedType())
+ return Block;
+
+ autoCreateBlock();
+ appendStmt(Block, DS);
+
+ CFGBlock *LastBlock = Block;
+ for (const VariableArrayType *VA = FindVA(T); VA != nullptr;
+ VA = FindVA(VA->getElementType().getTypePtr())) {
+ if (CFGBlock *NewBlock = addStmt(VA->getSizeExpr()))
+ LastBlock = NewBlock;
+ }
+ return LastBlock;
+ }
+
VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
if (!VD) {
- // Of everything that can be declared in a DeclStmt, only VarDecls impact
- // runtime semantics.
+ // Of everything that can be declared in a DeclStmt, only VarDecls and the
+ // exceptions above impact runtime semantics.
return Block;
}
@@ -2905,6 +2922,8 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
}
// If the type of VD is a VLA, then we must process its size expressions.
+ // FIXME: This does not find the VLA if it is embedded in other types,
+ // like here: `int (*p_vla)[x];`
for (const VariableArrayType* VA = FindVA(VD->getType().getTypePtr());
VA != nullptr; VA = FindVA(VA->getElementType().getTypePtr())) {
if (CFGBlock *newBlock = addStmt(VA->getSizeExpr()))
@@ -3997,6 +4016,11 @@ CFGBlock *CFGBuilder::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
}
// VLA types have expressions that must be evaluated.
+ // Evaluation is done only for `sizeof`.
+
+ if (E->getKind() != UETT_SizeOf)
+ return Block;
+
CFGBlock *lastBlock = Block;
if (E->isArgumentType()) {
diff --git a/clang/lib/Analysis/CallGraph.cpp b/clang/lib/Analysis/CallGraph.cpp
index 76be292dad8d..59cc939b6fd1 100644
--- a/clang/lib/Analysis/CallGraph.cpp
+++ b/clang/lib/Analysis/CallGraph.cpp
@@ -66,16 +66,16 @@ public:
return nullptr;
}
- void addCalledDecl(Decl *D) {
- if (G->includeInGraph(D)) {
+ void addCalledDecl(Decl *D, Expr *CallExpr) {
+ if (G->includeCalleeInGraph(D)) {
CallGraphNode *CalleeNode = G->getOrInsertNode(D);
- CallerNode->addCallee(CalleeNode);
+ CallerNode->addCallee({CalleeNode, CallExpr});
}
}
void VisitCallExpr(CallExpr *CE) {
if (Decl *D = getDeclFromCall(CE))
- addCalledDecl(D);
+ addCalledDecl(D, CE);
VisitChildren(CE);
}
@@ -89,14 +89,14 @@ public:
void VisitCXXNewExpr(CXXNewExpr *E) {
if (FunctionDecl *FD = E->getOperatorNew())
- addCalledDecl(FD);
+ addCalledDecl(FD, E);
VisitChildren(E);
}
void VisitCXXConstructExpr(CXXConstructExpr *E) {
CXXConstructorDecl *Ctor = E->getConstructor();
if (FunctionDecl *Def = Ctor->getDefinition())
- addCalledDecl(Def);
+ addCalledDecl(Def, E);
VisitChildren(E);
}
@@ -122,7 +122,7 @@ public:
else
D = IDecl->lookupPrivateClassMethod(Sel);
if (D) {
- addCalledDecl(D);
+ addCalledDecl(D, ME);
NumObjCCallEdges++;
}
}
@@ -157,6 +157,10 @@ bool CallGraph::includeInGraph(const Decl *D) {
if (!D->hasBody())
return false;
+ return includeCalleeInGraph(D);
+}
+
+bool CallGraph::includeCalleeInGraph(const Decl *D) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// We skip function template definitions, as their semantics is
// only determined when they are instantiated.
@@ -207,7 +211,7 @@ CallGraphNode *CallGraph::getOrInsertNode(Decl *F) {
Node = std::make_unique<CallGraphNode>(F);
// Make Root node a parent of all functions to make sure all are reachable.
if (F)
- Root->addCallee(Node.get());
+ Root->addCallee({Node.get(), /*Call=*/nullptr});
return Node.get();
}
@@ -230,8 +234,8 @@ void CallGraph::print(raw_ostream &OS) const {
OS << " calls: ";
for (CallGraphNode::const_iterator CI = N->begin(),
CE = N->end(); CI != CE; ++CI) {
- assert(*CI != Root && "No one can call the root node.");
- (*CI)->print(OS);
+ assert(CI->Callee != Root && "No one can call the root node.");
+ CI->Callee->print(OS);
OS << " ";
}
OS << '\n';
diff --git a/clang/lib/Analysis/CloneDetection.cpp b/clang/lib/Analysis/CloneDetection.cpp
index 5fb5840ce293..0a1122bd5a4a 100644
--- a/clang/lib/Analysis/CloneDetection.cpp
+++ b/clang/lib/Analysis/CloneDetection.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DataCollection.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
diff --git a/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index fb5a139e82ab..2f80285f17b4 100644
--- a/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -43,9 +43,6 @@ AST_MATCHER(CXXTypeidExpr, isPotentiallyEvaluated) {
return Node.isPotentiallyEvaluated();
}
-const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
- cxxNoexceptExpr;
-
const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt,
GenericSelectionExpr>
genericSelectionExpr;
@@ -76,10 +73,10 @@ const auto isMoveOnly = [] {
};
template <class T> struct NodeID;
-template <> struct NodeID<Expr> { static const std::string value; };
-template <> struct NodeID<Decl> { static const std::string value; };
-const std::string NodeID<Expr>::value = "expr";
-const std::string NodeID<Decl>::value = "decl";
+template <> struct NodeID<Expr> { static constexpr StringRef value = "expr"; };
+template <> struct NodeID<Decl> { static constexpr StringRef value = "decl"; };
+constexpr StringRef NodeID<Expr>::value;
+constexpr StringRef NodeID<Decl>::value;
template <class T, class F = const Stmt *(ExprMutationAnalyzer::*)(const T *)>
const Stmt *tryEachMatch(ArrayRef<ast_matchers::BoundNodes> Matches,
@@ -204,14 +201,15 @@ const Stmt *ExprMutationAnalyzer::findDeclPointeeMutation(
const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// LHS of any assignment operators.
- const auto AsAssignmentLhs =
- binaryOperator(isAssignmentOperator(),
- hasLHS(maybeEvalCommaExpr(equalsNode(Exp))));
+ const auto AsAssignmentLhs = binaryOperator(
+ isAssignmentOperator(),
+ hasLHS(maybeEvalCommaExpr(ignoringParenImpCasts(equalsNode(Exp)))));
// Operand of increment/decrement operators.
const auto AsIncDecOperand =
unaryOperator(anyOf(hasOperatorName("++"), hasOperatorName("--")),
- hasUnaryOperand(maybeEvalCommaExpr(equalsNode(Exp))));
+ hasUnaryOperand(maybeEvalCommaExpr(
+ ignoringParenImpCasts(equalsNode(Exp)))));
// Invoking non-const member function.
// A member function is assumed to be non-const when it is unresolved.
@@ -283,13 +281,15 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
const auto AsNonConstRefReturn = returnStmt(hasReturnValue(
maybeEvalCommaExpr(equalsNode(Exp))));
- const auto Matches =
- match(findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand, AsNonConstThis,
- AsAmpersandOperand, AsPointerFromArrayDecay,
- AsOperatorArrowThis, AsNonConstRefArg,
- AsLambdaRefCaptureInit, AsNonConstRefReturn))
- .bind("stmt")),
- Stm, Context);
+ const auto Matches = match(
+ traverse(
+ ast_type_traits::TK_AsIs,
+ findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand, AsNonConstThis,
+ AsAmpersandOperand, AsPointerFromArrayDecay,
+ AsOperatorArrowThis, AsNonConstRefArg,
+ AsLambdaRefCaptureInit, AsNonConstRefReturn))
+ .bind("stmt"))),
+ Stm, Context);
return selectFirst<Stmt>("stmt", Matches);
}
@@ -388,12 +388,15 @@ const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) {
const auto IsInstantiated = hasDeclaration(isInstantiated());
const auto FuncDecl = hasDeclaration(functionDecl().bind("func"));
const auto Matches = match(
- findAll(expr(anyOf(callExpr(NonConstRefParam, IsInstantiated, FuncDecl,
+ traverse(
+ ast_type_traits::TK_AsIs,
+ findAll(
+ expr(anyOf(callExpr(NonConstRefParam, IsInstantiated, FuncDecl,
unless(callee(namedDecl(hasAnyName(
"::std::move", "::std::forward"))))),
cxxConstructExpr(NonConstRefParam, IsInstantiated,
FuncDecl)))
- .bind(NodeID<Expr>::value)),
+ .bind(NodeID<Expr>::value))),
Stm, Context);
for (const auto &Nodes : Matches) {
const auto *Exp = Nodes.getNodeAs<Expr>(NodeID<Expr>::value);
diff --git a/clang/lib/Analysis/LiveVariables.cpp b/clang/lib/Analysis/LiveVariables.cpp
index 2cd607d8a493..d24c40b457b4 100644
--- a/clang/lib/Analysis/LiveVariables.cpp
+++ b/clang/lib/Analysis/LiveVariables.cpp
@@ -13,12 +13,10 @@
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/PriorityQueue.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <vector>
@@ -26,51 +24,6 @@
using namespace clang;
namespace {
-
-class DataflowWorklist {
- llvm::BitVector enqueuedBlocks;
- PostOrderCFGView *POV;
- llvm::PriorityQueue<const CFGBlock *, SmallVector<const CFGBlock *, 20>,
- PostOrderCFGView::BlockOrderCompare> worklist;
-
-public:
- DataflowWorklist(const CFG &cfg, AnalysisDeclContext &Ctx)
- : enqueuedBlocks(cfg.getNumBlockIDs()),
- POV(Ctx.getAnalysis<PostOrderCFGView>()),
- worklist(POV->getComparator()) {}
-
- void enqueueBlock(const CFGBlock *block);
- void enqueuePredecessors(const CFGBlock *block);
-
- const CFGBlock *dequeue();
-};
-
-}
-
-void DataflowWorklist::enqueueBlock(const clang::CFGBlock *block) {
- if (block && !enqueuedBlocks[block->getBlockID()]) {
- enqueuedBlocks[block->getBlockID()] = true;
- worklist.push(block);
- }
-}
-
-void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) {
- for (CFGBlock::const_pred_iterator I = block->pred_begin(),
- E = block->pred_end(); I != E; ++I) {
- enqueueBlock(*I);
- }
-}
-
-const CFGBlock *DataflowWorklist::dequeue() {
- if (worklist.empty())
- return nullptr;
- const CFGBlock *b = worklist.top();
- worklist.pop();
- enqueuedBlocks[b->getBlockID()] = false;
- return b;
-}
-
-namespace {
class LiveVariablesImpl {
public:
AnalysisDeclContext &analysisContext;
@@ -136,7 +89,7 @@ namespace {
}
return A;
}
-}
+} // namespace
void LiveVariables::Observer::anchor() { }
@@ -218,7 +171,7 @@ public:
void VisitUnaryOperator(UnaryOperator *UO);
void Visit(Stmt *S);
};
-}
+} // namespace
static const VariableArrayType *FindVA(QualType Ty) {
const Type *ty = Ty.getTypePtr();
@@ -537,9 +490,8 @@ LiveVariables::~LiveVariables() {
delete (LiveVariablesImpl*) impl;
}
-LiveVariables *
-LiveVariables::computeLiveness(AnalysisDeclContext &AC,
- bool killAtAssign) {
+std::unique_ptr<LiveVariables>
+LiveVariables::computeLiveness(AnalysisDeclContext &AC, bool killAtAssign) {
// No CFG? Bail out.
CFG *cfg = AC.getCFG();
@@ -555,7 +507,7 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC,
// Construct the dataflow worklist. Enqueue the exit block as the
// start of the analysis.
- DataflowWorklist worklist(*cfg, AC);
+ BackwardDataflowWorklist worklist(*cfg, AC);
llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs());
// FIXME: we should enqueue using post order.
@@ -612,7 +564,7 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC,
worklist.enqueuePredecessors(block);
}
- return new LiveVariables(LV);
+ return std::unique_ptr<LiveVariables>(new LiveVariables(LV));
}
void LiveVariables::dumpBlockLiveness(const SourceManager &M) {
diff --git a/clang/lib/Analysis/PathDiagnostic.cpp b/clang/lib/Analysis/PathDiagnostic.cpp
index 53235ba07699..c88e6c1e1535 100644
--- a/clang/lib/Analysis/PathDiagnostic.cpp
+++ b/clang/lib/Analysis/PathDiagnostic.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/AnalysisDeclContext.h"
@@ -909,7 +910,7 @@ static void describeClass(raw_ostream &Out, const CXXRecordDecl *D,
Out << Prefix << '\'' << *D;
if (const auto T = dyn_cast<ClassTemplateSpecializationDecl>(D))
describeTemplateParameters(Out, T->getTemplateArgs().asArray(),
- D->getASTContext().getLangOpts(), "<", ">");
+ D->getLangOpts(), "<", ">");
Out << '\'';
}
@@ -975,8 +976,8 @@ static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
if (const auto FD = dyn_cast<FunctionDecl>(D))
if (const TemplateArgumentList *TAList =
FD->getTemplateSpecializationArgs())
- describeTemplateParameters(Out, TAList->asArray(),
- FD->getASTContext().getLangOpts(), "<", ">");
+ describeTemplateParameters(Out, TAList->asArray(), FD->getLangOpts(), "<",
+ ">");
Out << '\'';
return true;
diff --git a/clang/lib/Analysis/PostOrderCFGView.cpp b/clang/lib/Analysis/PostOrderCFGView.cpp
index f79d0007cb3d..0c09c0f97ff6 100644
--- a/clang/lib/Analysis/PostOrderCFGView.cpp
+++ b/clang/lib/Analysis/PostOrderCFGView.cpp
@@ -29,11 +29,12 @@ PostOrderCFGView::PostOrderCFGView(const CFG *cfg) {
}
}
-PostOrderCFGView *PostOrderCFGView::create(AnalysisDeclContext &ctx) {
+std::unique_ptr<PostOrderCFGView>
+PostOrderCFGView::create(AnalysisDeclContext &ctx) {
const CFG *cfg = ctx.getCFG();
if (!cfg)
return nullptr;
- return new PostOrderCFGView(cfg);
+ return std::make_unique<PostOrderCFGView>(cfg);
}
const void *PostOrderCFGView::getTag() { static int x; return &x; }
diff --git a/clang/lib/Analysis/ProgramPoint.cpp b/clang/lib/Analysis/ProgramPoint.cpp
index 0783fbed5315..2a91749affd2 100644
--- a/clang/lib/Analysis/ProgramPoint.cpp
+++ b/clang/lib/Analysis/ProgramPoint.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/ProgramPoint.h"
+#include "clang/AST/ASTContext.h"
#include "clang/Basic/JsonSupport.h"
using namespace clang;
diff --git a/clang/lib/Analysis/ReachableCode.cpp b/clang/lib/Analysis/ReachableCode.cpp
index 369879ad65f5..221d137dadb8 100644
--- a/clang/lib/Analysis/ReachableCode.cpp
+++ b/clang/lib/Analysis/ReachableCode.cpp
@@ -138,10 +138,10 @@ static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
static SourceLocation getTopMostMacro(SourceLocation Loc, SourceManager &SM) {
assert(Loc.isMacroID());
SourceLocation Last;
- while (Loc.isMacroID()) {
+ do {
Last = Loc;
Loc = SM.getImmediateMacroCallerLoc(Loc);
- }
+ } while (Loc.isMacroID());
return Last;
}
diff --git a/clang/lib/Analysis/RetainSummaryManager.cpp b/clang/lib/Analysis/RetainSummaryManager.cpp
index 6f46917b2dfc..9f45a8efe546 100644
--- a/clang/lib/Analysis/RetainSummaryManager.cpp
+++ b/clang/lib/Analysis/RetainSummaryManager.cpp
@@ -140,12 +140,15 @@ RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
static bool isSubclass(const Decl *D,
StringRef ClassName) {
using namespace ast_matchers;
- DeclarationMatcher SubclassM = cxxRecordDecl(isSameOrDerivedFrom(ClassName));
+ DeclarationMatcher SubclassM =
+ cxxRecordDecl(isSameOrDerivedFrom(std::string(ClassName)));
return !(match(SubclassM, *D, D->getASTContext()).empty());
}
static bool isOSObjectSubclass(const Decl *D) {
- return D && isSubclass(D, "OSMetaClassBase");
+ // OSSymbols are particular OSObjects that are allocated globally
+ // and therefore aren't really refcounted, so we ignore them.
+ return D && isSubclass(D, "OSMetaClassBase") && !isSubclass(D, "OSSymbol");
}
static bool isOSObjectDynamicCast(StringRef S) {
@@ -662,6 +665,7 @@ RetainSummaryManager::getSummary(AnyCall C,
switch (C.getKind()) {
case AnyCall::Function:
case AnyCall::Constructor:
+ case AnyCall::InheritedConstructor:
case AnyCall::Allocator:
case AnyCall::Deallocator:
Summ = getFunctionSummary(cast_or_null<FunctionDecl>(C.getDecl()));
diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp
index 48f4106b6bae..1208eaf93e25 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -905,11 +905,7 @@ public:
ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc)
: FactEntry(CE, LK_Exclusive, Loc, false) {}
- void addExclusiveLock(const CapabilityExpr &M) {
- UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
- }
-
- void addSharedLock(const CapabilityExpr &M) {
+ void addLock(const CapabilityExpr &M) {
UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
}
@@ -999,7 +995,10 @@ private:
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
!Cp, LK_Exclusive, loc));
} else if (Handler) {
- Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc);
+ SourceLocation PrevLoc;
+ if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
+ PrevLoc = Neg->loc();
+ Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc, PrevLoc);
}
}
};
@@ -1249,8 +1248,7 @@ static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
}
template <typename AttrTy>
-static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value,
- StringRef>::type
+static std::enable_if_t<!has_arg_iterator_range<AttrTy>::value, StringRef>
ClassifyDiagnostic(const AttrTy *A) {
if (const ValueDecl *VD = getValueDecl(A->getArg()))
return ClassifyDiagnostic(VD);
@@ -1258,8 +1256,7 @@ ClassifyDiagnostic(const AttrTy *A) {
}
template <typename AttrTy>
-static typename std::enable_if<has_arg_iterator_range<AttrTy>::value,
- StringRef>::type
+static std::enable_if_t<has_arg_iterator_range<AttrTy>::value, StringRef>
ClassifyDiagnostic(const AttrTy *A) {
for (const auto *Arg : A->args()) {
if (const ValueDecl *VD = getValueDecl(Arg))
@@ -1328,7 +1325,10 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
const FactEntry *LDat = FSet.findLock(FactMan, Cp);
if (!LDat) {
- Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc);
+ SourceLocation PrevLoc;
+ if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
+ PrevLoc = Neg->loc();
+ Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc, PrevLoc);
return;
}
@@ -1803,7 +1803,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
SourceLocation Loc = Exp->getExprLoc();
CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
- CapExprSet ScopedExclusiveReqs, ScopedSharedReqs;
+ CapExprSet ScopedReqsAndExcludes;
StringRef CapDiagKind = "mutex";
// Figure out if we're constructing an object of scoped lockable class
@@ -1894,19 +1894,20 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
POK_FunctionCall, ClassifyDiagnostic(A),
Exp->getExprLoc());
// use for adopting a lock
- if (isScopedVar) {
- Analyzer->getMutexIDs(A->isShared() ? ScopedSharedReqs
- : ScopedExclusiveReqs,
- A, Exp, D, VD);
- }
+ if (isScopedVar)
+ Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
}
break;
}
case attr::LocksExcluded: {
const auto *A = cast<LocksExcludedAttr>(At);
- for (auto *Arg : A->args())
+ for (auto *Arg : A->args()) {
warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
+ // use for deferring a lock
+ if (isScopedVar)
+ Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
+ }
break;
}
@@ -1946,13 +1947,11 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, MLoc);
for (const auto &M : ExclusiveLocksToAdd)
- ScopedEntry->addExclusiveLock(M);
- for (const auto &M : ScopedExclusiveReqs)
- ScopedEntry->addExclusiveLock(M);
+ ScopedEntry->addLock(M);
for (const auto &M : SharedLocksToAdd)
- ScopedEntry->addSharedLock(M);
- for (const auto &M : ScopedSharedReqs)
- ScopedEntry->addSharedLock(M);
+ ScopedEntry->addLock(M);
+ for (const auto &M : ScopedReqsAndExcludes)
+ ScopedEntry->addLock(M);
for (const auto &M : ExclusiveLocksToRemove)
ScopedEntry->addExclusiveUnlock(M);
for (const auto &M : SharedLocksToRemove)
@@ -2141,12 +2140,14 @@ void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
// handle constructors that involve temporaries
if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
- E = EWC->getSubExpr();
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
- if (ICE->getCastKind() == CK_NoOp)
- E = ICE->getSubExpr();
+ E = EWC->getSubExpr()->IgnoreParens();
+ if (auto *CE = dyn_cast<CastExpr>(E))
+ if (CE->getCastKind() == CK_NoOp ||
+ CE->getCastKind() == CK_ConstructorConversion ||
+ CE->getCastKind() == CK_UserDefinedConversion)
+ E = CE->getSubExpr()->IgnoreParens();
if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
- E = BTE->getSubExpr();
+ E = BTE->getSubExpr()->IgnoreParens();
if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) {
const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
diff --git a/clang/lib/Analysis/UninitializedValues.cpp b/clang/lib/Analysis/UninitializedValues.cpp
index 8a233d4a44f1..67cd39728c35 100644
--- a/clang/lib/Analysis/UninitializedValues.cpp
+++ b/clang/lib/Analysis/UninitializedValues.cpp
@@ -24,6 +24,7 @@
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
+#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
@@ -213,68 +214,6 @@ ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
}
//------------------------------------------------------------------------====//
-// Worklist: worklist for dataflow analysis.
-//====------------------------------------------------------------------------//
-
-namespace {
-
-class DataflowWorklist {
- PostOrderCFGView::iterator PO_I, PO_E;
- SmallVector<const CFGBlock *, 20> worklist;
- llvm::BitVector enqueuedBlocks;
-
-public:
- DataflowWorklist(const CFG &cfg, PostOrderCFGView &view)
- : PO_I(view.begin()), PO_E(view.end()),
- enqueuedBlocks(cfg.getNumBlockIDs(), true) {
- // Treat the first block as already analyzed.
- if (PO_I != PO_E) {
- assert(*PO_I == &cfg.getEntry());
- enqueuedBlocks[(*PO_I)->getBlockID()] = false;
- ++PO_I;
- }
- }
-
- void enqueueSuccessors(const CFGBlock *block);
- const CFGBlock *dequeue();
-};
-
-} // namespace
-
-void DataflowWorklist::enqueueSuccessors(const CFGBlock *block) {
- for (CFGBlock::const_succ_iterator I = block->succ_begin(),
- E = block->succ_end(); I != E; ++I) {
- const CFGBlock *Successor = *I;
- if (!Successor || enqueuedBlocks[Successor->getBlockID()])
- continue;
- worklist.push_back(Successor);
- enqueuedBlocks[Successor->getBlockID()] = true;
- }
-}
-
-const CFGBlock *DataflowWorklist::dequeue() {
- const CFGBlock *B = nullptr;
-
- // First dequeue from the worklist. This can represent
- // updates along backedges that we want propagated as quickly as possible.
- if (!worklist.empty())
- B = worklist.pop_back_val();
-
- // Next dequeue from the initial reverse post order. This is the
- // theoretical ideal in the presence of no back edges.
- else if (PO_I != PO_E) {
- B = *PO_I;
- ++PO_I;
- }
- else
- return nullptr;
-
- assert(enqueuedBlocks[B->getBlockID()] == true);
- enqueuedBlocks[B->getBlockID()] = false;
- return B;
-}
-
-//------------------------------------------------------------------------====//
// Classification of DeclRefExprs as use or initialization.
//====------------------------------------------------------------------------//
@@ -329,6 +268,7 @@ public:
Init,
Use,
SelfInit,
+ ConstRefUse,
Ignore
};
@@ -465,6 +405,15 @@ static bool isPointerToConst(const QualType &QT) {
return QT->isAnyPointerType() && QT->getPointeeType().isConstQualified();
}
+static bool hasTrivialBody(CallExpr *CE) {
+ if (FunctionDecl *FD = CE->getDirectCallee()) {
+ if (FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
+ return FTD->getTemplatedDecl()->hasTrivialBody();
+ return FD->hasTrivialBody();
+ }
+ return false;
+}
+
void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
// Classify arguments to std::move as used.
if (CE->isCallToStdMove()) {
@@ -473,15 +422,17 @@ void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
classify(CE->getArg(0), Use);
return;
}
-
- // If a value is passed by const pointer or by const reference to a function,
+ bool isTrivialBody = hasTrivialBody(CE);
+ // If a value is passed by const pointer to a function,
// we should not assume that it is initialized by the call, and we
// conservatively do not assume that it is used.
+ // If a value is passed by const reference to a function,
+ // it should already be initialized.
for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end();
I != E; ++I) {
if ((*I)->isGLValue()) {
if ((*I)->getType().isConstQualified())
- classify((*I), Ignore);
+ classify((*I), isTrivialBody ? Ignore : ConstRefUse);
} else if (isPointerToConst((*I)->getType())) {
const Expr *Ex = stripCasts(DC->getParentASTContext(), *I);
const auto *UO = dyn_cast<UnaryOperator>(Ex);
@@ -530,12 +481,14 @@ public:
handler(handler) {}
void reportUse(const Expr *ex, const VarDecl *vd);
+ void reportConstRefUse(const Expr *ex, const VarDecl *vd);
void VisitBinaryOperator(BinaryOperator *bo);
void VisitBlockExpr(BlockExpr *be);
void VisitCallExpr(CallExpr *ce);
void VisitDeclRefExpr(DeclRefExpr *dr);
void VisitDeclStmt(DeclStmt *ds);
+ void VisitGCCAsmStmt(GCCAsmStmt *as);
void VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS);
void VisitObjCMessageExpr(ObjCMessageExpr *ME);
void VisitOMPExecutableDirective(OMPExecutableDirective *ED);
@@ -636,6 +589,28 @@ public:
continue;
}
+ if (AtPredExit == MayUninitialized) {
+ // If the predecessor's terminator is an "asm goto" that initializes
+ // the variable, then it won't be counted as "initialized" on the
+ // non-fallthrough paths.
+ CFGTerminator term = Pred->getTerminator();
+ if (const auto *as = dyn_cast_or_null<GCCAsmStmt>(term.getStmt())) {
+ const CFGBlock *fallthrough = *Pred->succ_begin();
+ if (as->isAsmGoto() &&
+ llvm::any_of(as->outputs(), [&](const Expr *output) {
+ return vd == findVar(output).getDecl() &&
+ llvm::any_of(as->labels(),
+ [&](const AddrLabelExpr *label) {
+ return label->getLabel()->getStmt() == B->Label &&
+ B != fallthrough;
+ });
+ })) {
+ Use.setUninitAfterDecl();
+ continue;
+ }
+ }
+ }
+
unsigned &SV = SuccsVisited[Pred->getBlockID()];
if (!SV) {
// When visiting the first successor of a block, mark all NULL
@@ -705,6 +680,12 @@ void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
handler.handleUseOfUninitVariable(vd, getUninitUse(ex, vd, v));
}
+void TransferFunctions::reportConstRefUse(const Expr *ex, const VarDecl *vd) {
+ Value v = vals[vd];
+ if (isAlwaysUninit(v))
+ handler.handleConstRefUseOfUninitVariable(vd, getUninitUse(ex, vd, v));
+}
+
void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS) {
// This represents an initialization of the 'element' value.
if (const auto *DS = dyn_cast<DeclStmt>(FS->getElement())) {
@@ -772,7 +753,10 @@ void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
vals[cast<VarDecl>(dr->getDecl())] = Initialized;
break;
case ClassifyRefs::SelfInit:
- handler.handleSelfInit(cast<VarDecl>(dr->getDecl()));
+ handler.handleSelfInit(cast<VarDecl>(dr->getDecl()));
+ break;
+ case ClassifyRefs::ConstRefUse:
+ reportConstRefUse(dr, cast<VarDecl>(dr->getDecl()));
break;
}
}
@@ -821,6 +805,20 @@ void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
}
}
+void TransferFunctions::VisitGCCAsmStmt(GCCAsmStmt *as) {
+ // An "asm goto" statement is a terminator that may initialize some variables.
+ if (!as->isAsmGoto())
+ return;
+
+ for (const Expr *o : as->outputs())
+ if (const VarDecl *VD = findVar(o).getDecl())
+ if (vals[VD] != Initialized)
+ // If the variable isn't initialized by the time we get here, then we
+ // mark it as potentially uninitialized for those cases where it's used
+ // on an indirect path, where it's not guaranteed to be defined.
+ vals[VD] = MayUninitialized;
+}
+
void TransferFunctions::VisitObjCMessageExpr(ObjCMessageExpr *ME) {
// If the Objective-C message expression is an implicit no-return that
// is not modeled in the CFG, set the tracked dataflow values to Unknown.
@@ -858,6 +856,10 @@ static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
if (Optional<CFGStmt> cs = I.getAs<CFGStmt>())
tf.Visit(const_cast<Stmt *>(cs->getStmt()));
}
+ CFGTerminator terminator = block->getTerminator();
+ if (auto *as = dyn_cast_or_null<GCCAsmStmt>(terminator.getStmt()))
+ if (as->isAsmGoto())
+ tf.Visit(as);
return vals.updateValueVectorWithScratch(block);
}
@@ -887,6 +889,12 @@ struct PruneBlocksHandler : public UninitVariablesHandler {
hadAnyUse = true;
}
+ void handleConstRefUseOfUninitVariable(const VarDecl *vd,
+ const UninitUse &use) override {
+ hadUse[currentBlock] = true;
+ hadAnyUse = true;
+ }
+
/// Called when the uninitialized variable analysis detects the
/// idiom 'int x = x'. All other uses of 'x' within the initializer
/// are handled by handleUseOfUninitVariable.
@@ -924,7 +932,7 @@ void clang::runUninitializedVariablesAnalysis(
}
// Proceed with the workist.
- DataflowWorklist worklist(cfg, *ac.getAnalysis<PostOrderCFGView>());
+ ForwardDataflowWorklist worklist(cfg, ac);
llvm::BitVector previouslyVisited(cfg.getNumBlockIDs());
worklist.enqueueSuccessors(&cfg.getEntry());
llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false);
diff --git a/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp b/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp
index 77de3630ae7e..32fba9c93752 100644
--- a/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp
+++ b/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp
@@ -21,7 +21,7 @@ void registerMyChecker(CheckerManager &Mgr) {
<< '\n';
}
-bool shouldRegisterMyChecker(const LangOptions &LO) { return true; }
+bool shouldRegisterMyChecker(const CheckerManager &mgr) { return true; }
} // end anonymous namespace
diff --git a/clang/lib/Basic/Attributes.cpp b/clang/lib/Basic/Attributes.cpp
index 74cc3d1d03da..ff6dbf870fcf 100644
--- a/clang/lib/Basic/Attributes.cpp
+++ b/clang/lib/Basic/Attributes.cpp
@@ -36,10 +36,14 @@ const char *attr::getSubjectMatchRuleSpelling(attr::SubjectMatchRule Rule) {
}
static StringRef
-normalizeAttrScopeName(StringRef ScopeName,
+normalizeAttrScopeName(const IdentifierInfo *Scope,
AttributeCommonInfo::Syntax SyntaxUsed) {
+ if (!Scope)
+ return "";
+
// Normalize the "__gnu__" scope name to be "gnu" and the "_Clang" scope name
// to be "clang".
+ StringRef ScopeName = Scope->getName();
if (SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
SyntaxUsed == AttributeCommonInfo::AS_C2x) {
if (ScopeName == "__gnu__")
@@ -50,7 +54,7 @@ normalizeAttrScopeName(StringRef ScopeName,
return ScopeName;
}
-static StringRef normalizeAttrName(StringRef AttrName,
+static StringRef normalizeAttrName(const IdentifierInfo *Name,
StringRef NormalizedScopeName,
AttributeCommonInfo::Syntax SyntaxUsed) {
// Normalize the attribute name, __foo__ becomes foo. This is only allowable
@@ -61,6 +65,7 @@ static StringRef normalizeAttrName(StringRef AttrName,
SyntaxUsed == AttributeCommonInfo::AS_C2x) &&
(NormalizedScopeName.empty() || NormalizedScopeName == "gnu" ||
NormalizedScopeName == "clang"));
+ StringRef AttrName = Name->getName();
if (ShouldNormalize && AttrName.size() >= 4 && AttrName.startswith("__") &&
AttrName.endswith("__"))
AttrName = AttrName.slice(2, AttrName.size() - 2);
@@ -74,35 +79,41 @@ bool AttributeCommonInfo::isGNUScope() const {
#include "clang/Sema/AttrParsedAttrKinds.inc"
+static SmallString<64> normalizeName(const IdentifierInfo *Name,
+ const IdentifierInfo *Scope,
+ AttributeCommonInfo::Syntax SyntaxUsed) {
+ StringRef ScopeName = normalizeAttrScopeName(Scope, SyntaxUsed);
+ StringRef AttrName = normalizeAttrName(Name, ScopeName, SyntaxUsed);
+
+ SmallString<64> FullName = ScopeName;
+ if (!ScopeName.empty()) {
+ assert(SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
+ SyntaxUsed == AttributeCommonInfo::AS_C2x);
+ FullName += "::";
+ }
+ FullName += AttrName;
+
+ return FullName;
+}
+
AttributeCommonInfo::Kind
AttributeCommonInfo::getParsedKind(const IdentifierInfo *Name,
const IdentifierInfo *ScopeName,
Syntax SyntaxUsed) {
- StringRef AttrName = Name->getName();
-
- SmallString<64> FullName;
- if (ScopeName)
- FullName += normalizeAttrScopeName(ScopeName->getName(), SyntaxUsed);
-
- AttrName = normalizeAttrName(AttrName, FullName, SyntaxUsed);
-
- // Ensure that in the case of C++11 attributes, we look for '::foo' if it is
- // unscoped.
- if (ScopeName || SyntaxUsed == AS_CXX11 || SyntaxUsed == AS_C2x)
- FullName += "::";
- FullName += AttrName;
+ return ::getAttrKind(normalizeName(Name, ScopeName, SyntaxUsed), SyntaxUsed);
+}
- return ::getAttrKind(FullName, SyntaxUsed);
+std::string AttributeCommonInfo::getNormalizedFullName() const {
+ return static_cast<std::string>(
+ normalizeName(getAttrName(), getScopeName(), getSyntax()));
}
unsigned AttributeCommonInfo::calculateAttributeSpellingListIndex() const {
// Both variables will be used in tablegen generated
// attribute spell list index matching code.
auto Syntax = static_cast<AttributeCommonInfo::Syntax>(getSyntax());
- StringRef Scope =
- getScopeName() ? normalizeAttrScopeName(getScopeName()->getName(), Syntax)
- : "";
- StringRef Name = normalizeAttrName(getAttrName()->getName(), Scope, Syntax);
+ StringRef Scope = normalizeAttrScopeName(getScopeName(), Syntax);
+ StringRef Name = normalizeAttrName(getAttrName(), Scope, Syntax);
#include "clang/Sema/AttrSpellingListIndex.inc"
}
diff --git a/clang/lib/Basic/CodeGenOptions.cpp b/clang/lib/Basic/CodeGenOptions.cpp
index fa186380f109..4fc7a535c9eb 100644
--- a/clang/lib/Basic/CodeGenOptions.cpp
+++ b/clang/lib/Basic/CodeGenOptions.cpp
@@ -17,7 +17,7 @@ CodeGenOptions::CodeGenOptions() {
#include "clang/Basic/CodeGenOptions.def"
RelocationModel = llvm::Reloc::PIC_;
- memcpy(CoverageVersion, "402*", 4);
+ memcpy(CoverageVersion, "408*", 4);
}
bool CodeGenOptions::isNoBuiltinFunc(const char *Name) const {
diff --git a/clang/lib/Basic/Cuda.cpp b/clang/lib/Basic/Cuda.cpp
index f2b6c8cd3ee9..709185707bd9 100644
--- a/clang/lib/Basic/Cuda.cpp
+++ b/clang/lib/Basic/Cuda.cpp
@@ -2,6 +2,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/VersionTuple.h"
@@ -27,12 +28,16 @@ const char *CudaVersionToString(CudaVersion V) {
return "10.0";
case CudaVersion::CUDA_101:
return "10.1";
+ case CudaVersion::CUDA_102:
+ return "10.2";
+ case CudaVersion::CUDA_110:
+ return "11.0";
}
llvm_unreachable("invalid enum");
}
-CudaVersion CudaStringToVersion(llvm::StringRef S) {
- return llvm::StringSwitch<CudaVersion>(S)
+CudaVersion CudaStringToVersion(const llvm::Twine &S) {
+ return llvm::StringSwitch<CudaVersion>(S.str())
.Case("7.0", CudaVersion::CUDA_70)
.Case("7.5", CudaVersion::CUDA_75)
.Case("8.0", CudaVersion::CUDA_80)
@@ -40,253 +45,88 @@ CudaVersion CudaStringToVersion(llvm::StringRef S) {
.Case("9.1", CudaVersion::CUDA_91)
.Case("9.2", CudaVersion::CUDA_92)
.Case("10.0", CudaVersion::CUDA_100)
- .Case("10.1", CudaVersion::CUDA_101);
+ .Case("10.1", CudaVersion::CUDA_101)
+ .Case("10.2", CudaVersion::CUDA_102)
+ .Case("11.0", CudaVersion::CUDA_110)
+ .Default(CudaVersion::UNKNOWN);
}
-const char *CudaArchToString(CudaArch A) {
- switch (A) {
- case CudaArch::LAST:
- break;
- case CudaArch::UNKNOWN:
- return "unknown";
- case CudaArch::SM_20:
- return "sm_20";
- case CudaArch::SM_21:
- return "sm_21";
- case CudaArch::SM_30:
- return "sm_30";
- case CudaArch::SM_32:
- return "sm_32";
- case CudaArch::SM_35:
- return "sm_35";
- case CudaArch::SM_37:
- return "sm_37";
- case CudaArch::SM_50:
- return "sm_50";
- case CudaArch::SM_52:
- return "sm_52";
- case CudaArch::SM_53:
- return "sm_53";
- case CudaArch::SM_60:
- return "sm_60";
- case CudaArch::SM_61:
- return "sm_61";
- case CudaArch::SM_62:
- return "sm_62";
- case CudaArch::SM_70:
- return "sm_70";
- case CudaArch::SM_72:
- return "sm_72";
- case CudaArch::SM_75:
- return "sm_75";
- case CudaArch::GFX600: // tahiti
- return "gfx600";
- case CudaArch::GFX601: // pitcairn, verde, oland,hainan
- return "gfx601";
- case CudaArch::GFX700: // kaveri
- return "gfx700";
- case CudaArch::GFX701: // hawaii
- return "gfx701";
- case CudaArch::GFX702: // 290,290x,R390,R390x
- return "gfx702";
- case CudaArch::GFX703: // kabini mullins
- return "gfx703";
- case CudaArch::GFX704: // bonaire
- return "gfx704";
- case CudaArch::GFX801: // carrizo
- return "gfx801";
- case CudaArch::GFX802: // tonga,iceland
- return "gfx802";
- case CudaArch::GFX803: // fiji,polaris10
- return "gfx803";
- case CudaArch::GFX810: // stoney
- return "gfx810";
- case CudaArch::GFX900: // vega, instinct
- return "gfx900";
- case CudaArch::GFX902: // TBA
- return "gfx902";
- case CudaArch::GFX904: // TBA
- return "gfx904";
- case CudaArch::GFX906: // TBA
- return "gfx906";
- case CudaArch::GFX908: // TBA
- return "gfx908";
- case CudaArch::GFX909: // TBA
- return "gfx909";
- case CudaArch::GFX1010: // TBA
- return "gfx1010";
- case CudaArch::GFX1011: // TBA
- return "gfx1011";
- case CudaArch::GFX1012: // TBA
- return "gfx1012";
- }
- llvm_unreachable("invalid enum");
-}
+struct CudaArchToStringMap {
+ CudaArch arch;
+ const char *arch_name;
+ const char *virtual_arch_name;
+};
-CudaArch StringToCudaArch(llvm::StringRef S) {
- return llvm::StringSwitch<CudaArch>(S)
- .Case("sm_20", CudaArch::SM_20)
- .Case("sm_21", CudaArch::SM_21)
- .Case("sm_30", CudaArch::SM_30)
- .Case("sm_32", CudaArch::SM_32)
- .Case("sm_35", CudaArch::SM_35)
- .Case("sm_37", CudaArch::SM_37)
- .Case("sm_50", CudaArch::SM_50)
- .Case("sm_52", CudaArch::SM_52)
- .Case("sm_53", CudaArch::SM_53)
- .Case("sm_60", CudaArch::SM_60)
- .Case("sm_61", CudaArch::SM_61)
- .Case("sm_62", CudaArch::SM_62)
- .Case("sm_70", CudaArch::SM_70)
- .Case("sm_72", CudaArch::SM_72)
- .Case("sm_75", CudaArch::SM_75)
- .Case("gfx600", CudaArch::GFX600)
- .Case("gfx601", CudaArch::GFX601)
- .Case("gfx700", CudaArch::GFX700)
- .Case("gfx701", CudaArch::GFX701)
- .Case("gfx702", CudaArch::GFX702)
- .Case("gfx703", CudaArch::GFX703)
- .Case("gfx704", CudaArch::GFX704)
- .Case("gfx801", CudaArch::GFX801)
- .Case("gfx802", CudaArch::GFX802)
- .Case("gfx803", CudaArch::GFX803)
- .Case("gfx810", CudaArch::GFX810)
- .Case("gfx900", CudaArch::GFX900)
- .Case("gfx902", CudaArch::GFX902)
- .Case("gfx904", CudaArch::GFX904)
- .Case("gfx906", CudaArch::GFX906)
- .Case("gfx908", CudaArch::GFX908)
- .Case("gfx909", CudaArch::GFX909)
- .Case("gfx1010", CudaArch::GFX1010)
- .Case("gfx1011", CudaArch::GFX1011)
- .Case("gfx1012", CudaArch::GFX1012)
- .Default(CudaArch::UNKNOWN);
-}
+#define SM2(sm, ca) \
+ { CudaArch::SM_##sm, "sm_" #sm, ca }
+#define SM(sm) SM2(sm, "compute_" #sm)
+#define GFX(gpu) \
+ { CudaArch::GFX##gpu, "gfx" #gpu, "compute_amdgcn" }
+CudaArchToStringMap arch_names[] = {
+ // clang-format off
+ SM2(20, "compute_20"), SM2(21, "compute_20"), // Fermi
+ SM(30), SM(32), SM(35), SM(37), // Kepler
+ SM(50), SM(52), SM(53), // Maxwell
+ SM(60), SM(61), SM(62), // Pascal
+ SM(70), SM(72), // Volta
+ SM(75), // Turing
+ SM(80), // Ampere
+ GFX(600), // tahiti
+ GFX(601), // pitcairn, verde, oland,hainan
+ GFX(700), // kaveri
+ GFX(701), // hawaii
+ GFX(702), // 290,290x,R390,R390x
+ GFX(703), // kabini mullins
+ GFX(704), // bonaire
+ GFX(801), // carrizo
+ GFX(802), // tonga,iceland
+ GFX(803), // fiji,polaris10
+ GFX(810), // stoney
+ GFX(900), // vega, instinct
+ GFX(902), GFX(904), GFX(906), GFX(908), GFX(909),
+ GFX(1010), GFX(1011), GFX(1012),
+ // clang-format on
+};
+#undef SM
+#undef SM2
+#undef GFX
-const char *CudaVirtualArchToString(CudaVirtualArch A) {
- switch (A) {
- case CudaVirtualArch::UNKNOWN:
+const char *CudaArchToString(CudaArch A) {
+ auto result = std::find_if(
+ std::begin(arch_names), std::end(arch_names),
+ [A](const CudaArchToStringMap &map) { return A == map.arch; });
+ if (result == std::end(arch_names))
return "unknown";
- case CudaVirtualArch::COMPUTE_20:
- return "compute_20";
- case CudaVirtualArch::COMPUTE_30:
- return "compute_30";
- case CudaVirtualArch::COMPUTE_32:
- return "compute_32";
- case CudaVirtualArch::COMPUTE_35:
- return "compute_35";
- case CudaVirtualArch::COMPUTE_37:
- return "compute_37";
- case CudaVirtualArch::COMPUTE_50:
- return "compute_50";
- case CudaVirtualArch::COMPUTE_52:
- return "compute_52";
- case CudaVirtualArch::COMPUTE_53:
- return "compute_53";
- case CudaVirtualArch::COMPUTE_60:
- return "compute_60";
- case CudaVirtualArch::COMPUTE_61:
- return "compute_61";
- case CudaVirtualArch::COMPUTE_62:
- return "compute_62";
- case CudaVirtualArch::COMPUTE_70:
- return "compute_70";
- case CudaVirtualArch::COMPUTE_72:
- return "compute_72";
- case CudaVirtualArch::COMPUTE_75:
- return "compute_75";
- case CudaVirtualArch::COMPUTE_AMDGCN:
- return "compute_amdgcn";
- }
- llvm_unreachable("invalid enum");
+ return result->arch_name;
}
-CudaVirtualArch StringToCudaVirtualArch(llvm::StringRef S) {
- return llvm::StringSwitch<CudaVirtualArch>(S)
- .Case("compute_20", CudaVirtualArch::COMPUTE_20)
- .Case("compute_30", CudaVirtualArch::COMPUTE_30)
- .Case("compute_32", CudaVirtualArch::COMPUTE_32)
- .Case("compute_35", CudaVirtualArch::COMPUTE_35)
- .Case("compute_37", CudaVirtualArch::COMPUTE_37)
- .Case("compute_50", CudaVirtualArch::COMPUTE_50)
- .Case("compute_52", CudaVirtualArch::COMPUTE_52)
- .Case("compute_53", CudaVirtualArch::COMPUTE_53)
- .Case("compute_60", CudaVirtualArch::COMPUTE_60)
- .Case("compute_61", CudaVirtualArch::COMPUTE_61)
- .Case("compute_62", CudaVirtualArch::COMPUTE_62)
- .Case("compute_70", CudaVirtualArch::COMPUTE_70)
- .Case("compute_72", CudaVirtualArch::COMPUTE_72)
- .Case("compute_75", CudaVirtualArch::COMPUTE_75)
- .Case("compute_amdgcn", CudaVirtualArch::COMPUTE_AMDGCN)
- .Default(CudaVirtualArch::UNKNOWN);
+const char *CudaArchToVirtualArchString(CudaArch A) {
+ auto result = std::find_if(
+ std::begin(arch_names), std::end(arch_names),
+ [A](const CudaArchToStringMap &map) { return A == map.arch; });
+ if (result == std::end(arch_names))
+ return "unknown";
+ return result->virtual_arch_name;
}
-CudaVirtualArch VirtualArchForCudaArch(CudaArch A) {
- switch (A) {
- case CudaArch::LAST:
- break;
- case CudaArch::UNKNOWN:
- return CudaVirtualArch::UNKNOWN;
- case CudaArch::SM_20:
- case CudaArch::SM_21:
- return CudaVirtualArch::COMPUTE_20;
- case CudaArch::SM_30:
- return CudaVirtualArch::COMPUTE_30;
- case CudaArch::SM_32:
- return CudaVirtualArch::COMPUTE_32;
- case CudaArch::SM_35:
- return CudaVirtualArch::COMPUTE_35;
- case CudaArch::SM_37:
- return CudaVirtualArch::COMPUTE_37;
- case CudaArch::SM_50:
- return CudaVirtualArch::COMPUTE_50;
- case CudaArch::SM_52:
- return CudaVirtualArch::COMPUTE_52;
- case CudaArch::SM_53:
- return CudaVirtualArch::COMPUTE_53;
- case CudaArch::SM_60:
- return CudaVirtualArch::COMPUTE_60;
- case CudaArch::SM_61:
- return CudaVirtualArch::COMPUTE_61;
- case CudaArch::SM_62:
- return CudaVirtualArch::COMPUTE_62;
- case CudaArch::SM_70:
- return CudaVirtualArch::COMPUTE_70;
- case CudaArch::SM_72:
- return CudaVirtualArch::COMPUTE_72;
- case CudaArch::SM_75:
- return CudaVirtualArch::COMPUTE_75;
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX904:
- case CudaArch::GFX906:
- case CudaArch::GFX908:
- case CudaArch::GFX909:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
- return CudaVirtualArch::COMPUTE_AMDGCN;
- }
- llvm_unreachable("invalid enum");
+CudaArch StringToCudaArch(llvm::StringRef S) {
+ auto result = std::find_if(
+ std::begin(arch_names), std::end(arch_names),
+ [S](const CudaArchToStringMap &map) { return S == map.arch_name; });
+ if (result == std::end(arch_names))
+ return CudaArch::UNKNOWN;
+ return result->arch;
}
CudaVersion MinVersionForCudaArch(CudaArch A) {
- switch (A) {
- case CudaArch::LAST:
- break;
- case CudaArch::UNKNOWN:
+ if (A == CudaArch::UNKNOWN)
return CudaVersion::UNKNOWN;
+
+ // AMD GPUs do not depend on CUDA versions.
+ if (IsAMDGpuArch(A))
+ return CudaVersion::CUDA_70;
+
+ switch (A) {
case CudaArch::SM_20:
case CudaArch::SM_21:
case CudaArch::SM_30:
@@ -307,60 +147,30 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
return CudaVersion::CUDA_91;
case CudaArch::SM_75:
return CudaVersion::CUDA_100;
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX904:
- case CudaArch::GFX906:
- case CudaArch::GFX908:
- case CudaArch::GFX909:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
- return CudaVersion::CUDA_70;
+ case CudaArch::SM_80:
+ return CudaVersion::CUDA_110;
+ default:
+ llvm_unreachable("invalid enum");
}
- llvm_unreachable("invalid enum");
}
CudaVersion MaxVersionForCudaArch(CudaArch A) {
+ // AMD GPUs do not depend on CUDA versions.
+ if (IsAMDGpuArch(A))
+ return CudaVersion::LATEST;
+
switch (A) {
case CudaArch::UNKNOWN:
return CudaVersion::UNKNOWN;
case CudaArch::SM_20:
case CudaArch::SM_21:
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
return CudaVersion::CUDA_80;
default:
return CudaVersion::LATEST;
}
}
-static CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
+CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
int IVer =
Version.getMajor() * 10 + Version.getMinor().getValueOr(0);
switch(IVer) {
@@ -380,6 +190,10 @@ static CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
return CudaVersion::CUDA_100;
case 101:
return CudaVersion::CUDA_101;
+ case 102:
+ return CudaVersion::CUDA_102;
+ case 110:
+ return CudaVersion::CUDA_110;
default:
return CudaVersion::UNKNOWN;
}
diff --git a/clang/lib/Basic/Diagnostic.cpp b/clang/lib/Basic/Diagnostic.cpp
index f258b37f2fa6..661eabf9bc7c 100644
--- a/clang/lib/Basic/Diagnostic.cpp
+++ b/clang/lib/Basic/Diagnostic.cpp
@@ -61,6 +61,12 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
return DB;
}
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ llvm::Error &&E) {
+ DB.AddString(toString(std::move(E)));
+ return DB;
+}
+
static void DummyArgToStringFn(DiagnosticsEngine::ArgumentKind AK, intptr_t QT,
StringRef Modifier, StringRef Argument,
ArrayRef<DiagnosticsEngine::ArgumentValue> PrevArgs,
diff --git a/clang/lib/Basic/DiagnosticIDs.cpp b/clang/lib/Basic/DiagnosticIDs.cpp
index e30e3753d193..8c7e63e06301 100644
--- a/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/clang/lib/Basic/DiagnosticIDs.cpp
@@ -85,6 +85,7 @@ VALIDATE_DIAG_SIZE(LEX)
VALIDATE_DIAG_SIZE(PARSE)
VALIDATE_DIAG_SIZE(AST)
VALIDATE_DIAG_SIZE(COMMENT)
+VALIDATE_DIAG_SIZE(CROSSTU)
VALIDATE_DIAG_SIZE(SEMA)
VALIDATE_DIAG_SIZE(ANALYSIS)
VALIDATE_DIAG_SIZE(REFACTORING)
@@ -289,7 +290,7 @@ namespace clang {
unsigned getOrCreateDiagID(DiagnosticIDs::Level L, StringRef Message,
DiagnosticIDs &Diags) {
- DiagDesc D(L, Message);
+ DiagDesc D(L, std::string(Message));
// Check to see if it already exists.
std::map<DiagDesc, unsigned>::iterator I = DiagIDs.lower_bound(D);
if (I != DiagIDs.end() && I->first == D)
diff --git a/clang/lib/Basic/ExpressionTraits.cpp b/clang/lib/Basic/ExpressionTraits.cpp
new file mode 100644
index 000000000000..5fde1940038f
--- /dev/null
+++ b/clang/lib/Basic/ExpressionTraits.cpp
@@ -0,0 +1,36 @@
+//===--- ExpressionTraits.cpp - Expression Traits Support -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the expression traits support functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/ExpressionTraits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+using namespace clang;
+
+static constexpr const char *ExpressionTraitNames[] = {
+#define EXPRESSION_TRAIT(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *ExpressionTraitSpellings[] = {
+#define EXPRESSION_TRAIT(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+};
+
+const char *clang::getTraitName(ExpressionTrait T) {
+ assert(T <= ET_Last && "invalid enum value!");
+ return ExpressionTraitNames[T];
+}
+
+const char *clang::getTraitSpelling(ExpressionTrait T) {
+ assert(T <= ET_Last && "invalid enum value!");
+ return ExpressionTraitSpellings[T];
+}
diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp
index 079a4bbfc82f..e92e9d5911c0 100644
--- a/clang/lib/Basic/FileManager.cpp
+++ b/clang/lib/Basic/FileManager.cpp
@@ -454,11 +454,12 @@ void FileManager::fillRealPathName(FileEntry *UFE, llvm::StringRef FileName) {
// misleading. We need to clean up the interface here.
makeAbsolutePath(AbsPath);
llvm::sys::path::remove_dots(AbsPath, /*remove_dot_dot=*/true);
- UFE->RealPathName = AbsPath.str();
+ UFE->RealPathName = std::string(AbsPath.str());
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
-FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile) {
+FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
+ bool RequiresNullTerminator) {
uint64_t FileSize = Entry->getSize();
// If there's a high enough chance that the file have changed since we
// got its size, force a stat before opening it.
@@ -468,28 +469,29 @@ FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile) {
StringRef Filename = Entry->getName();
// If the file is already open, use the open file descriptor.
if (Entry->File) {
- auto Result =
- Entry->File->getBuffer(Filename, FileSize,
- /*RequiresNullTerminator=*/true, isVolatile);
+ auto Result = Entry->File->getBuffer(Filename, FileSize,
+ RequiresNullTerminator, isVolatile);
Entry->closeFile();
return Result;
}
// Otherwise, open the file.
- return getBufferForFileImpl(Filename, FileSize, isVolatile);
+ return getBufferForFileImpl(Filename, FileSize, isVolatile,
+ RequiresNullTerminator);
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
FileManager::getBufferForFileImpl(StringRef Filename, int64_t FileSize,
- bool isVolatile) {
+ bool isVolatile,
+ bool RequiresNullTerminator) {
if (FileSystemOpts.WorkingDir.empty())
- return FS->getBufferForFile(Filename, FileSize,
- /*RequiresNullTerminator=*/true, isVolatile);
+ return FS->getBufferForFile(Filename, FileSize, RequiresNullTerminator,
+ isVolatile);
SmallString<128> FilePath(Filename);
FixupRelativePath(FilePath);
- return FS->getBufferForFile(FilePath, FileSize,
- /*RequiresNullTerminator=*/true, isVolatile);
+ return FS->getBufferForFile(FilePath, FileSize, RequiresNullTerminator,
+ isVolatile);
}
/// getStatValue - Get the 'stat' information for the specified path,
@@ -513,7 +515,7 @@ FileManager::getStatValue(StringRef Path, llvm::vfs::Status &Status,
StatCache.get(), *FS);
}
-std::error_code
+std::error_code
FileManager::getNoncachedStatValue(StringRef Path,
llvm::vfs::Status &Result) {
SmallString<128> FilePath(Path);
diff --git a/clang/lib/Basic/FixedPoint.cpp b/clang/lib/Basic/FixedPoint.cpp
index 05600dfc6d21..ed8b92c98fdb 100644
--- a/clang/lib/Basic/FixedPoint.cpp
+++ b/clang/lib/Basic/FixedPoint.cpp
@@ -173,6 +173,142 @@ APFixedPoint APFixedPoint::add(const APFixedPoint &Other,
return APFixedPoint(Result, CommonFXSema);
}
+APFixedPoint APFixedPoint::sub(const APFixedPoint &Other,
+ bool *Overflow) const {
+ auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
+ APFixedPoint ConvertedThis = convert(CommonFXSema);
+ APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
+ llvm::APSInt ThisVal = ConvertedThis.getValue();
+ llvm::APSInt OtherVal = ConvertedOther.getValue();
+ bool Overflowed = false;
+
+ llvm::APSInt Result;
+ if (CommonFXSema.isSaturated()) {
+ Result = CommonFXSema.isSigned() ? ThisVal.ssub_sat(OtherVal)
+ : ThisVal.usub_sat(OtherVal);
+ } else {
+ Result = ThisVal.isSigned() ? ThisVal.ssub_ov(OtherVal, Overflowed)
+ : ThisVal.usub_ov(OtherVal, Overflowed);
+ }
+
+ if (Overflow)
+ *Overflow = Overflowed;
+
+ return APFixedPoint(Result, CommonFXSema);
+}
+
+APFixedPoint APFixedPoint::mul(const APFixedPoint &Other,
+ bool *Overflow) const {
+ auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
+ APFixedPoint ConvertedThis = convert(CommonFXSema);
+ APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
+ llvm::APSInt ThisVal = ConvertedThis.getValue();
+ llvm::APSInt OtherVal = ConvertedOther.getValue();
+ bool Overflowed = false;
+
+ // Widen the LHS and RHS so we can perform a full multiplication.
+ unsigned Wide = CommonFXSema.getWidth() * 2;
+ if (CommonFXSema.isSigned()) {
+ ThisVal = ThisVal.sextOrSelf(Wide);
+ OtherVal = OtherVal.sextOrSelf(Wide);
+ } else {
+ ThisVal = ThisVal.zextOrSelf(Wide);
+ OtherVal = OtherVal.zextOrSelf(Wide);
+ }
+
+ // Perform the full multiplication and downscale to get the same scale.
+ //
+ // Note that the right shifts here perform an implicit downwards rounding.
+ // This rounding could discard bits that would technically place the result
+ // outside the representable range. We interpret the spec as allowing us to
+ // perform the rounding step first, avoiding the overflow case that would
+ // arise.
+ llvm::APSInt Result;
+ if (CommonFXSema.isSigned())
+ Result = ThisVal.smul_ov(OtherVal, Overflowed)
+ .ashr(CommonFXSema.getScale());
+ else
+ Result = ThisVal.umul_ov(OtherVal, Overflowed)
+ .lshr(CommonFXSema.getScale());
+ assert(!Overflowed && "Full multiplication cannot overflow!");
+ Result.setIsSigned(CommonFXSema.isSigned());
+
+ // If our result lies outside of the representative range of the common
+ // semantic, we either have overflow or saturation.
+ llvm::APSInt Max = APFixedPoint::getMax(CommonFXSema).getValue()
+ .extOrTrunc(Wide);
+ llvm::APSInt Min = APFixedPoint::getMin(CommonFXSema).getValue()
+ .extOrTrunc(Wide);
+ if (CommonFXSema.isSaturated()) {
+ if (Result < Min)
+ Result = Min;
+ else if (Result > Max)
+ Result = Max;
+ } else
+ Overflowed = Result < Min || Result > Max;
+
+ if (Overflow)
+ *Overflow = Overflowed;
+
+ return APFixedPoint(Result.sextOrTrunc(CommonFXSema.getWidth()),
+ CommonFXSema);
+}
+
+APFixedPoint APFixedPoint::div(const APFixedPoint &Other,
+ bool *Overflow) const {
+ auto CommonFXSema = Sema.getCommonSemantics(Other.getSemantics());
+ APFixedPoint ConvertedThis = convert(CommonFXSema);
+ APFixedPoint ConvertedOther = Other.convert(CommonFXSema);
+ llvm::APSInt ThisVal = ConvertedThis.getValue();
+ llvm::APSInt OtherVal = ConvertedOther.getValue();
+ bool Overflowed = false;
+
+ // Widen the LHS and RHS so we can perform a full division.
+ unsigned Wide = CommonFXSema.getWidth() * 2;
+ if (CommonFXSema.isSigned()) {
+ ThisVal = ThisVal.sextOrSelf(Wide);
+ OtherVal = OtherVal.sextOrSelf(Wide);
+ } else {
+ ThisVal = ThisVal.zextOrSelf(Wide);
+ OtherVal = OtherVal.zextOrSelf(Wide);
+ }
+
+ // Upscale to compensate for the loss of precision from division, and
+ // perform the full division.
+ ThisVal = ThisVal.shl(CommonFXSema.getScale());
+ llvm::APSInt Result;
+ if (CommonFXSema.isSigned()) {
+ llvm::APInt Rem;
+ llvm::APInt::sdivrem(ThisVal, OtherVal, Result, Rem);
+ // If the quotient is negative and the remainder is nonzero, round
+ // towards negative infinity by subtracting epsilon from the result.
+ if (ThisVal.isNegative() != OtherVal.isNegative() && !Rem.isNullValue())
+ Result = Result - 1;
+ } else
+ Result = ThisVal.udiv(OtherVal);
+ Result.setIsSigned(CommonFXSema.isSigned());
+
+ // If our result lies outside of the representative range of the common
+ // semantic, we either have overflow or saturation.
+ llvm::APSInt Max = APFixedPoint::getMax(CommonFXSema).getValue()
+ .extOrTrunc(Wide);
+ llvm::APSInt Min = APFixedPoint::getMin(CommonFXSema).getValue()
+ .extOrTrunc(Wide);
+ if (CommonFXSema.isSaturated()) {
+ if (Result < Min)
+ Result = Min;
+ else if (Result > Max)
+ Result = Max;
+ } else
+ Overflowed = Result < Min || Result > Max;
+
+ if (Overflow)
+ *Overflow = Overflowed;
+
+ return APFixedPoint(Result.sextOrTrunc(CommonFXSema.getWidth()),
+ CommonFXSema);
+}
+
void APFixedPoint::toString(llvm::SmallVectorImpl<char> &Str) const {
llvm::APSInt Val = getValue();
unsigned Scale = getScale();
diff --git a/clang/lib/Basic/IdentifierTable.cpp b/clang/lib/Basic/IdentifierTable.cpp
index 4aebea19924f..36b26d9b7c68 100644
--- a/clang/lib/Basic/IdentifierTable.cpp
+++ b/clang/lib/Basic/IdentifierTable.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/FoldingSet.h"
@@ -32,6 +33,12 @@
using namespace clang;
+// A check to make sure the ObjCOrBuiltinID has sufficient room to store the
+// largest possible target/aux-target combination. If we exceed this, we likely
+// need to just change the ObjCOrBuiltinIDBits value in IdentifierTable.h.
+static_assert(2 * LargestBuiltinID < (2 << (ObjCOrBuiltinIDBits - 1)),
+ "Insufficient ObjCOrBuiltinID Bits");
+
//===----------------------------------------------------------------------===//
// IdentifierTable Implementation
//===----------------------------------------------------------------------===//
@@ -97,10 +104,10 @@ namespace {
KEYZVECTOR = 0x40000,
KEYCOROUTINES = 0x80000,
KEYMODULES = 0x100000,
- KEYCXX2A = 0x200000,
+ KEYCXX20 = 0x200000,
KEYOPENCLCXX = 0x400000,
KEYMSCOMPAT = 0x800000,
- KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX2A,
+ KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX20,
KEYALL = (0xffffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
};
@@ -122,7 +129,7 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (Flags == KEYALL) return KS_Enabled;
if (LangOpts.CPlusPlus && (Flags & KEYCXX)) return KS_Enabled;
if (LangOpts.CPlusPlus11 && (Flags & KEYCXX11)) return KS_Enabled;
- if (LangOpts.CPlusPlus2a && (Flags & KEYCXX2A)) return KS_Enabled;
+ if (LangOpts.CPlusPlus20 && (Flags & KEYCXX20)) return KS_Enabled;
if (LangOpts.C99 && (Flags & KEYC99)) return KS_Enabled;
if (LangOpts.GNUKeywords && (Flags & KEYGNU)) return KS_Extension;
if (LangOpts.MicrosoftExt && (Flags & KEYMS)) return KS_Extension;
@@ -142,10 +149,12 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
// We treat bridge casts as objective-C keywords so we can warn on them
// in non-arc mode.
if (LangOpts.ObjC && (Flags & KEYOBJC)) return KS_Enabled;
- if (LangOpts.ConceptsTS && (Flags & KEYCONCEPTS)) return KS_Enabled;
+ if (LangOpts.CPlusPlus20 && (Flags & KEYCONCEPTS)) return KS_Enabled;
if (LangOpts.Coroutines && (Flags & KEYCOROUTINES)) return KS_Enabled;
if (LangOpts.ModulesTS && (Flags & KEYMODULES)) return KS_Enabled;
if (LangOpts.CPlusPlus && (Flags & KEYALLCXX)) return KS_Future;
+ if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus20 && (Flags & CHAR8SUPPORT))
+ return KS_Future;
return KS_Disabled;
}
@@ -257,7 +266,7 @@ bool IdentifierInfo::isCPlusPlusKeyword(const LangOptions &LangOpts) const {
LangOptions LangOptsNoCPP = LangOpts;
LangOptsNoCPP.CPlusPlus = false;
LangOptsNoCPP.CPlusPlus11 = false;
- LangOptsNoCPP.CPlusPlus2a = false;
+ LangOptsNoCPP.CPlusPlus20 = false;
return !isKeyword(LangOptsNoCPP);
}
@@ -463,7 +472,7 @@ std::string MultiKeywordSelector::getName() const {
OS << ':';
}
- return OS.str();
+ return std::string(OS.str());
}
std::string Selector::getAsString() const {
@@ -476,7 +485,7 @@ std::string Selector::getAsString() const {
if (getNumArgs() == 0) {
assert(II && "If the number of arguments is 0 then II is guaranteed to "
"not be null.");
- return II->getName();
+ return std::string(II->getName());
}
if (!II)
diff --git a/clang/lib/Basic/LangOptions.cpp b/clang/lib/Basic/LangOptions.cpp
index 516b1ff1b7e2..c08670c87fb6 100644
--- a/clang/lib/Basic/LangOptions.cpp
+++ b/clang/lib/Basic/LangOptions.cpp
@@ -24,7 +24,7 @@ void LangOptions::resetNonModularOptions() {
#define LANGOPT(Name, Bits, Default, Description)
#define BENIGN_LANGOPT(Name, Bits, Default, Description) Name = Default;
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
- Name = Default;
+ Name = static_cast<unsigned>(Default);
#include "clang/Basic/LangOptions.def"
// These options do not affect AST generation.
@@ -47,3 +47,23 @@ VersionTuple LangOptions::getOpenCLVersionTuple() const {
const int Ver = OpenCLCPlusPlus ? OpenCLCPlusPlusVersion : OpenCLVersion;
return VersionTuple(Ver / 100, (Ver % 100) / 10);
}
+
+FPOptions FPOptions::defaultWithoutTrailingStorage(const LangOptions &LO) {
+ FPOptions result(LO);
+ return result;
+}
+
+LLVM_DUMP_METHOD void FPOptions::dump() {
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ llvm::errs() << "\n " #NAME " " << get##NAME();
+#include "clang/Basic/FPOptions.def"
+ llvm::errs() << "\n";
+}
+
+LLVM_DUMP_METHOD void FPOptionsOverride::dump() {
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ if (has##NAME##Override()) \
+ llvm::errs() << "\n " #NAME " Override is " << get##NAME##Override();
+#include "clang/Basic/FPOptions.def"
+ llvm::errs() << "\n";
+}
diff --git a/clang/lib/Basic/Module.cpp b/clang/lib/Basic/Module.cpp
index 541431dbbe7d..b3daaa3a4442 100644
--- a/clang/lib/Basic/Module.cpp
+++ b/clang/lib/Basic/Module.cpp
@@ -37,26 +37,21 @@ using namespace clang;
Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
bool IsFramework, bool IsExplicit, unsigned VisibilityID)
: Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent),
- VisibilityID(VisibilityID), IsMissingRequirement(false),
+ VisibilityID(VisibilityID), IsUnimportable(false),
HasIncompatibleModuleFile(false), IsAvailable(true),
IsFromModuleFile(false), IsFramework(IsFramework), IsExplicit(IsExplicit),
IsSystem(false), IsExternC(false), IsInferred(false),
InferSubmodules(false), InferExplicitSubmodules(false),
InferExportWildcard(false), ConfigMacrosExhaustive(false),
NoUndeclaredIncludes(false), ModuleMapIsPrivate(false),
- NameVisibility(Hidden) {
+ HasUmbrellaDir(false), NameVisibility(Hidden) {
if (Parent) {
- if (!Parent->isAvailable())
- IsAvailable = false;
- if (Parent->IsSystem)
- IsSystem = true;
- if (Parent->IsExternC)
- IsExternC = true;
- if (Parent->NoUndeclaredIncludes)
- NoUndeclaredIncludes = true;
- if (Parent->ModuleMapIsPrivate)
- ModuleMapIsPrivate = true;
- IsMissingRequirement = Parent->IsMissingRequirement;
+ IsAvailable = Parent->isAvailable();
+ IsUnimportable = Parent->isUnimportable();
+ IsSystem = Parent->IsSystem;
+ IsExternC = Parent->IsExternC;
+ NoUndeclaredIncludes = Parent->NoUndeclaredIncludes;
+ ModuleMapIsPrivate = Parent->ModuleMapIsPrivate;
Parent->SubModuleIndex[Name] = Parent->SubModules.size();
Parent->SubModules.push_back(this);
@@ -132,25 +127,42 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
return HasFeature;
}
-bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
- Requirement &Req,
- UnresolvedHeaderDirective &MissingHeader,
- Module *&ShadowingModule) const {
- if (IsAvailable)
- return true;
+bool Module::isUnimportable(const LangOptions &LangOpts,
+ const TargetInfo &Target, Requirement &Req,
+ Module *&ShadowingModule) const {
+ if (!IsUnimportable)
+ return false;
for (const Module *Current = this; Current; Current = Current->Parent) {
if (Current->ShadowingModule) {
ShadowingModule = Current->ShadowingModule;
- return false;
+ return true;
}
for (unsigned I = 0, N = Current->Requirements.size(); I != N; ++I) {
if (hasFeature(Current->Requirements[I].first, LangOpts, Target) !=
Current->Requirements[I].second) {
Req = Current->Requirements[I];
- return false;
+ return true;
}
}
+ }
+
+ llvm_unreachable("could not find a reason why module is unimportable");
+}
+
+bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
+ Requirement &Req,
+ UnresolvedHeaderDirective &MissingHeader,
+ Module *&ShadowingModule) const {
+ if (IsAvailable)
+ return true;
+
+ if (isUnimportable(LangOpts, Target, Req, ShadowingModule))
+ return false;
+
+ // FIXME: All missing headers are listed on the top-level module. Should we
+ // just look there?
+ for (const Module *Current = this; Current; Current = Current->Parent) {
if (!Current->MissingHeaders.empty()) {
MissingHeader = Current->MissingHeaders.front();
return false;
@@ -239,7 +251,12 @@ Module::DirectoryName Module::getUmbrellaDir() const {
if (Header U = getUmbrellaHeader())
return {"", U.Entry->getDir()};
- return {UmbrellaAsWritten, Umbrella.dyn_cast<const DirectoryEntry *>()};
+ return {UmbrellaAsWritten, static_cast<const DirectoryEntry *>(Umbrella)};
+}
+
+void Module::addTopHeader(const FileEntry *File) {
+ assert(File);
+ TopHeaders.insert(File);
}
ArrayRef<const FileEntry *> Module::getTopHeaders(FileManager &FileMgr) {
@@ -276,18 +293,18 @@ bool Module::directlyUses(const Module *Requested) const {
void Module::addRequirement(StringRef Feature, bool RequiredState,
const LangOptions &LangOpts,
const TargetInfo &Target) {
- Requirements.push_back(Requirement(Feature, RequiredState));
+ Requirements.push_back(Requirement(std::string(Feature), RequiredState));
// If this feature is currently available, we're done.
if (hasFeature(Feature, LangOpts, Target) == RequiredState)
return;
- markUnavailable(/*MissingRequirement*/true);
+ markUnavailable(/*Unimportable*/true);
}
-void Module::markUnavailable(bool MissingRequirement) {
- auto needUpdate = [MissingRequirement](Module *M) {
- return M->IsAvailable || (!M->IsMissingRequirement && MissingRequirement);
+void Module::markUnavailable(bool Unimportable) {
+ auto needUpdate = [Unimportable](Module *M) {
+ return M->IsAvailable || (!M->IsUnimportable && Unimportable);
};
if (!needUpdate(this))
@@ -303,7 +320,7 @@ void Module::markUnavailable(bool MissingRequirement) {
continue;
Current->IsAvailable = false;
- Current->IsMissingRequirement |= MissingRequirement;
+ Current->IsUnimportable |= Unimportable;
for (submodule_iterator Sub = Current->submodule_begin(),
SubEnd = Current->submodule_end();
Sub != SubEnd; ++Sub) {
@@ -637,8 +654,8 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
SmallVector<Module *, 16> Exports;
V.M->getExportedModules(Exports);
for (Module *E : Exports) {
- // Don't recurse to unavailable submodules.
- if (E->isAvailable())
+ // Don't import non-importable modules.
+ if (!E->isUnimportable())
VisitModule({E, &V});
}
@@ -653,3 +670,18 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
};
VisitModule({M, nullptr});
}
+
+ASTSourceDescriptor::ASTSourceDescriptor(Module &M)
+ : Signature(M.Signature), ClangModule(&M) {
+ if (M.Directory)
+ Path = M.Directory->getName();
+ if (auto *File = M.getASTFile())
+ ASTFile = File->getName();
+}
+
+std::string ASTSourceDescriptor::getModuleName() const {
+ if (ClangModule)
+ return ClangModule->Name;
+ else
+ return std::string(PCHModuleName);
+}
diff --git a/clang/lib/Basic/OpenMPKinds.cpp b/clang/lib/Basic/OpenMPKinds.cpp
index 414ebb52c0c7..cae61ad4f2e3 100644
--- a/clang/lib/Basic/OpenMPKinds.cpp
+++ b/clang/lib/Basic/OpenMPKinds.cpp
@@ -20,94 +20,14 @@
using namespace clang;
using namespace llvm::omp;
-OpenMPContextSelectorSetKind
-clang::getOpenMPContextSelectorSet(llvm::StringRef Str) {
- return llvm::StringSwitch<OpenMPContextSelectorSetKind>(Str)
-#define OPENMP_CONTEXT_SELECTOR_SET(Name) .Case(#Name, OMP_CTX_SET_##Name)
-#include "clang/Basic/OpenMPKinds.def"
- .Default(OMP_CTX_SET_unknown);
-}
-
-llvm::StringRef
-clang::getOpenMPContextSelectorSetName(OpenMPContextSelectorSetKind Kind) {
- switch (Kind) {
- case OMP_CTX_SET_unknown:
- return "unknown";
-#define OPENMP_CONTEXT_SELECTOR_SET(Name) \
- case OMP_CTX_SET_##Name: \
- return #Name;
-#include "clang/Basic/OpenMPKinds.def"
- break;
- }
- llvm_unreachable("Invalid OpenMP context selector set kind");
-}
-
-OpenMPContextSelectorKind clang::getOpenMPContextSelector(llvm::StringRef Str) {
- return llvm::StringSwitch<OpenMPContextSelectorKind>(Str)
-#define OPENMP_CONTEXT_SELECTOR(Name) .Case(#Name, OMP_CTX_##Name)
-#include "clang/Basic/OpenMPKinds.def"
- .Default(OMP_CTX_unknown);
-}
-
-llvm::StringRef
-clang::getOpenMPContextSelectorName(OpenMPContextSelectorKind Kind) {
- switch (Kind) {
- case OMP_CTX_unknown:
- return "unknown";
-#define OPENMP_CONTEXT_SELECTOR(Name) \
- case OMP_CTX_##Name: \
- return #Name;
-#include "clang/Basic/OpenMPKinds.def"
- break;
- }
- llvm_unreachable("Invalid OpenMP context selector kind");
-}
-
-OpenMPClauseKind clang::getOpenMPClauseKind(StringRef Str) {
- // 'flush' clause cannot be specified explicitly, because this is an implicit
- // clause for 'flush' directive. If the 'flush' clause is explicitly specified
- // the Parser should generate a warning about extra tokens at the end of the
- // directive.
- if (Str == "flush")
- return OMPC_unknown;
- return llvm::StringSwitch<OpenMPClauseKind>(Str)
-#define OPENMP_CLAUSE(Name, Class) .Case(#Name, OMPC_##Name)
-#include "clang/Basic/OpenMPKinds.def"
- .Case("uniform", OMPC_uniform)
- .Case("device_type", OMPC_device_type)
- .Case("match", OMPC_match)
- .Default(OMPC_unknown);
-}
-
-const char *clang::getOpenMPClauseName(OpenMPClauseKind Kind) {
- assert(Kind <= OMPC_unknown);
- switch (Kind) {
- case OMPC_unknown:
- return "unknown";
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_##Name: \
- return #Name;
-#include "clang/Basic/OpenMPKinds.def"
- case OMPC_uniform:
- return "uniform";
- case OMPC_threadprivate:
- return "threadprivate or thread local";
- case OMPC_device_type:
- return "device_type";
- case OMPC_match:
- return "match";
- }
- llvm_unreachable("Invalid OpenMP clause kind");
-}
-
unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
StringRef Str) {
switch (Kind) {
case OMPC_default:
- return llvm::StringSwitch<OpenMPDefaultClauseKind>(Str)
-#define OPENMP_DEFAULT_KIND(Name) .Case(#Name, OMPC_DEFAULT_##Name)
-#include "clang/Basic/OpenMPKinds.def"
- .Default(OMPC_DEFAULT_unknown);
+ return llvm::StringSwitch<unsigned>(Str)
+#define OMP_DEFAULT_KIND(Enum, Name) .Case(Name, unsigned(Enum))
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ .Default(unsigned(llvm::omp::OMP_DEFAULT_unknown));
case OMPC_proc_bind:
return llvm::StringSwitch<unsigned>(Str)
#define OMP_PROC_BIND_KIND(Enum, Name, Value) .Case(Name, Value)
@@ -180,6 +100,26 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
#define OPENMP_LASTPRIVATE_KIND(Name) .Case(#Name, OMPC_LASTPRIVATE_##Name)
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_LASTPRIVATE_unknown);
+ case OMPC_order:
+ return llvm::StringSwitch<OpenMPOrderClauseKind>(Str)
+#define OPENMP_ORDER_KIND(Name) .Case(#Name, OMPC_ORDER_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_ORDER_unknown);
+ case OMPC_update:
+ return llvm::StringSwitch<OpenMPDependClauseKind>(Str)
+#define OPENMP_DEPEND_KIND(Name) .Case(#Name, OMPC_DEPEND_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_DEPEND_unknown);
+ case OMPC_device:
+ return llvm::StringSwitch<OpenMPDeviceClauseModifier>(Str)
+#define OPENMP_DEVICE_MODIFIER(Name) .Case(#Name, OMPC_DEVICE_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_DEVICE_unknown);
+ case OMPC_reduction:
+ return llvm::StringSwitch<OpenMPReductionClauseModifier>(Str)
+#define OPENMP_REDUCTION_MODIFIER(Name) .Case(#Name, OMPC_REDUCTION_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_REDUCTION_unknown);
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -193,7 +133,6 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_private:
case OMPC_firstprivate:
case OMPC_shared:
- case OMPC_reduction:
case OMPC_task_reduction:
case OMPC_in_reduction:
case OMPC_aligned:
@@ -204,12 +143,15 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_untied:
case OMPC_mergeable:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
- case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
- case OMPC_device:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_threads:
case OMPC_simd:
case OMPC_num_teams:
@@ -221,6 +163,7 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_hint:
case OMPC_uniform:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -228,6 +171,14 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_dynamic_allocators:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ break;
+ default:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
@@ -237,13 +188,11 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
unsigned Type) {
switch (Kind) {
case OMPC_default:
- switch (Type) {
- case OMPC_DEFAULT_unknown:
- return "unknown";
-#define OPENMP_DEFAULT_KIND(Name) \
- case OMPC_DEFAULT_##Name: \
- return #Name;
-#include "clang/Basic/OpenMPKinds.def"
+ switch (llvm::omp::DefaultKind(Type)) {
+#define OMP_DEFAULT_KIND(Enum, Name) \
+ case Enum: \
+ return Name;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'default' clause type");
case OMPC_proc_bind:
@@ -382,6 +331,46 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'lastprivate' clause type");
+ case OMPC_order:
+ switch (Type) {
+ case OMPC_ORDER_unknown:
+ return "unknown";
+#define OPENMP_ORDER_KIND(Name) \
+ case OMPC_ORDER_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'order' clause type");
+ case OMPC_update:
+ switch (Type) {
+ case OMPC_DEPEND_unknown:
+ return "unknown";
+#define OPENMP_DEPEND_KIND(Name) \
+ case OMPC_DEPEND_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'depend' clause type");
+ case OMPC_device:
+ switch (Type) {
+ case OMPC_DEVICE_unknown:
+ return "unknown";
+#define OPENMP_DEVICE_MODIFIER(Name) \
+ case OMPC_DEVICE_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'device' clause modifier");
+ case OMPC_reduction:
+ switch (Type) {
+ case OMPC_REDUCTION_unknown:
+ return "unknown";
+#define OPENMP_REDUCTION_MODIFIER(Name) \
+ case OMPC_REDUCTION_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'reduction' clause modifier");
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -395,7 +384,6 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_private:
case OMPC_firstprivate:
case OMPC_shared:
- case OMPC_reduction:
case OMPC_task_reduction:
case OMPC_in_reduction:
case OMPC_aligned:
@@ -406,12 +394,15 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_untied:
case OMPC_mergeable:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
- case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
- case OMPC_device:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_threads:
case OMPC_simd:
case OMPC_num_teams:
@@ -423,6 +414,7 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_hint:
case OMPC_uniform:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -430,536 +422,17 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_dynamic_allocators:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
break;
- }
- llvm_unreachable("Invalid OpenMP simple clause kind");
-}
-
-bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
- OpenMPClauseKind CKind,
- unsigned OpenMPVersion) {
- assert(unsigned(DKind) <= unsigned(OMPD_unknown));
- assert(CKind <= OMPC_unknown);
- // Nontemporal clause is not supported in OpenMP < 5.0.
- if (OpenMPVersion < 50 && CKind == OMPC_nontemporal)
- return false;
- switch (DKind) {
- case OMPD_parallel:
- switch (CKind) {
-#define OPENMP_PARALLEL_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_simd:
- if (OpenMPVersion < 50 && CKind == OMPC_if)
- return false;
- switch (CKind) {
-#define OPENMP_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_for:
- switch (CKind) {
-#define OPENMP_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_for_simd:
- if (OpenMPVersion < 50 && CKind == OMPC_if)
- return false;
- switch (CKind) {
-#define OPENMP_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_sections:
- switch (CKind) {
-#define OPENMP_SECTIONS_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_single:
- switch (CKind) {
-#define OPENMP_SINGLE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_for:
- switch (CKind) {
-#define OPENMP_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_master:
- switch (CKind) {
-#define OPENMP_PARALLEL_MASTER_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_sections:
- switch (CKind) {
-#define OPENMP_PARALLEL_SECTIONS_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_task:
- switch (CKind) {
-#define OPENMP_TASK_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_flush:
- return CKind == OMPC_flush;
- break;
- case OMPD_atomic:
- switch (CKind) {
-#define OPENMP_ATOMIC_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target:
- switch (CKind) {
-#define OPENMP_TARGET_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_requires:
- switch (CKind) {
-#define OPENMP_REQUIRES_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_data:
- switch (CKind) {
-#define OPENMP_TARGET_DATA_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_enter_data:
- switch (CKind) {
-#define OPENMP_TARGET_ENTER_DATA_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_exit_data:
- switch (CKind) {
-#define OPENMP_TARGET_EXIT_DATA_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_parallel:
- switch (CKind) {
-#define OPENMP_TARGET_PARALLEL_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_parallel_for:
- switch (CKind) {
-#define OPENMP_TARGET_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_update:
- switch (CKind) {
-#define OPENMP_TARGET_UPDATE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams:
- switch (CKind) {
-#define OPENMP_TEAMS_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_cancel:
- switch (CKind) {
-#define OPENMP_CANCEL_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_ordered:
- switch (CKind) {
-#define OPENMP_ORDERED_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_taskloop:
- switch (CKind) {
-#define OPENMP_TASKLOOP_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_taskloop_simd:
- switch (CKind) {
-#define OPENMP_TASKLOOP_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_master_taskloop:
- switch (CKind) {
-#define OPENMP_MASTER_TASKLOOP_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_master_taskloop_simd:
- switch (CKind) {
-#define OPENMP_MASTER_TASKLOOP_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_master_taskloop:
- switch (CKind) {
-#define OPENMP_PARALLEL_MASTER_TASKLOOP_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_parallel_master_taskloop_simd:
- switch (CKind) {
-#define OPENMP_PARALLEL_MASTER_TASKLOOP_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_critical:
- switch (CKind) {
-#define OPENMP_CRITICAL_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_distribute:
- switch (CKind) {
-#define OPENMP_DISTRIBUTE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_distribute_parallel_for:
- switch (CKind) {
-#define OPENMP_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_distribute_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_distribute_simd:
- if (OpenMPVersion < 50 && CKind == OMPC_if)
- return false;
- switch (CKind) {
-#define OPENMP_DISTRIBUTE_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_TARGET_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_simd:
- switch (CKind) {
-#define OPENMP_TARGET_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams_distribute:
- switch (CKind) {
-#define OPENMP_TEAMS_DISTRIBUTE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams_distribute_simd:
- if (OpenMPVersion < 50 && CKind == OMPC_if)
- return false;
- switch (CKind) {
-#define OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams_distribute_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_teams_distribute_parallel_for:
- switch (CKind) {
-#define OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams_distribute:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams_distribute_parallel_for:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams_distribute_parallel_for_simd:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_target_teams_distribute_simd:
- switch (CKind) {
-#define OPENMP_TARGET_TEAMS_DISTRIBUTE_SIMD_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_taskgroup:
- switch (CKind) {
-#define OPENMP_TASKGROUP_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_declare_mapper:
- switch (CKind) {
-#define OPENMP_DECLARE_MAPPER_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_allocate:
- switch (CKind) {
-#define OPENMP_ALLOCATE_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_declare_variant:
- switch (CKind) {
-#define OPENMP_DECLARE_VARIANT_CLAUSE(Name) \
- case OMPC_##Name: \
- return true;
-#include "clang/Basic/OpenMPKinds.def"
- default:
- break;
- }
- break;
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_unknown:
- case OMPD_threadprivate:
- case OMPD_section:
- case OMPD_master:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_cancellation_point:
- case OMPD_declare_reduction:
- case OMPD_declare_simd:
+ default:
break;
}
- return false;
+ llvm_unreachable("Invalid OpenMP simple clause kind");
}
bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
@@ -1111,7 +584,7 @@ bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
void clang::getOpenMPCaptureRegions(
SmallVectorImpl<OpenMPDirectiveKind> &CaptureRegions,
OpenMPDirectiveKind DKind) {
- assert(DKind <= OMPD_unknown);
+ assert(unsigned(DKind) < llvm::omp::Directive_enumSize);
switch (DKind) {
case OMPD_parallel:
case OMPD_parallel_for:
@@ -1199,6 +672,8 @@ void clang::getOpenMPCaptureRegions(
case OMPD_cancellation_point:
case OMPD_cancel:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
@@ -1206,8 +681,11 @@ void clang::getOpenMPCaptureRegions(
case OMPD_end_declare_target:
case OMPD_requires:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
}
diff --git a/clang/lib/Basic/SanitizerBlacklist.cpp b/clang/lib/Basic/SanitizerBlacklist.cpp
index 4f71349350fd..feb7cbda39b7 100644
--- a/clang/lib/Basic/SanitizerBlacklist.cpp
+++ b/clang/lib/Basic/SanitizerBlacklist.cpp
@@ -10,7 +10,12 @@
// sanitizers.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Basic/SanitizerBlacklist.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SanitizerSpecialCaseList.h"
+#include "clang/Basic/Sanitizers.h"
+#include "clang/Basic/SourceManager.h"
using namespace clang;
@@ -20,6 +25,8 @@ SanitizerBlacklist::SanitizerBlacklist(
BlacklistPaths, SM.getFileManager().getVirtualFileSystem())),
SM(SM) {}
+SanitizerBlacklist::~SanitizerBlacklist() = default;
+
bool SanitizerBlacklist::isBlacklistedGlobal(SanitizerMask Mask,
StringRef GlobalName,
StringRef Category) const {
diff --git a/clang/lib/Basic/SourceManager.cpp b/clang/lib/Basic/SourceManager.cpp
index 73f2ae96d4a3..0a76c78cd44f 100644
--- a/clang/lib/Basic/SourceManager.cpp
+++ b/clang/lib/Basic/SourceManager.cpp
@@ -17,12 +17,12 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManagerInternals.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/Compiler.h"
@@ -389,6 +389,14 @@ void SourceManager::clearIDTables() {
createExpansionLoc(SourceLocation(), SourceLocation(), SourceLocation(), 1);
}
+bool SourceManager::isMainFile(FileEntryRef SourceFile) {
+ assert(MainFileID.isValid() && "expected initialized SourceManager");
+ auto FE = getFileEntryRefForID(MainFileID);
+ if (!FE)
+ return false;
+ return FE->getUID() == SourceFile.getUID();
+}
+
void SourceManager::initializeForReplay(const SourceManager &Old) {
assert(MainFileID.isInvalid() && "expected uninitialized SourceManager");
@@ -560,6 +568,70 @@ FileID SourceManager::getNextFileID(FileID FID) const {
// Methods to create new FileID's and macro expansions.
//===----------------------------------------------------------------------===//
+/// Create a new FileID that represents the specified file
+/// being \#included from the specified IncludePosition.
+///
+/// This translates NULL into standard input.
+FileID SourceManager::createFileID(const FileEntry *SourceFile,
+ SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset) {
+ assert(SourceFile && "Null source file!");
+ const SrcMgr::ContentCache *IR =
+ getOrCreateContentCache(SourceFile, isSystem(FileCharacter));
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
+ return createFileID(IR, SourceFile->getName(), IncludePos, FileCharacter,
+ LoadedID, LoadedOffset);
+}
+
+FileID SourceManager::createFileID(FileEntryRef SourceFile,
+ SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset) {
+ const SrcMgr::ContentCache *IR = getOrCreateContentCache(
+ &SourceFile.getFileEntry(), isSystem(FileCharacter));
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
+ return createFileID(IR, SourceFile.getName(), IncludePos, FileCharacter,
+ LoadedID, LoadedOffset);
+}
+
+/// Create a new FileID that represents the specified memory buffer.
+///
+/// This does no caching of the buffer and takes ownership of the
+/// MemoryBuffer, so only pass a MemoryBuffer to this once.
+FileID SourceManager::createFileID(std::unique_ptr<llvm::MemoryBuffer> Buffer,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset,
+ SourceLocation IncludeLoc) {
+ StringRef Name = Buffer->getBufferIdentifier();
+ return createFileID(
+ createMemBufferContentCache(Buffer.release(), /*DoNotFree*/ false),
+ Name, IncludeLoc, FileCharacter, LoadedID, LoadedOffset);
+}
+
+/// Create a new FileID that represents the specified memory buffer.
+///
+/// This does not take ownership of the MemoryBuffer. The memory buffer must
+/// outlive the SourceManager.
+FileID SourceManager::createFileID(UnownedTag, const llvm::MemoryBuffer *Buffer,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset,
+ SourceLocation IncludeLoc) {
+ return createFileID(createMemBufferContentCache(Buffer, /*DoNotFree*/ true),
+ Buffer->getBufferIdentifier(), IncludeLoc,
+ FileCharacter, LoadedID, LoadedOffset);
+}
+
+/// Get the FileID for \p SourceFile if it exists. Otherwise, create a
+/// new FileID for the \p SourceFile.
+FileID
+SourceManager::getOrCreateFileID(const FileEntry *SourceFile,
+ SrcMgr::CharacteristicKind FileCharacter) {
+ FileID ID = translateFile(SourceFile);
+ return ID.isValid() ? ID : createFileID(SourceFile, SourceLocation(),
+ FileCharacter);
+}
+
/// createFileID - Create a new FileID for the specified ContentCache and
/// include position. This works regardless of whether the ContentCache
/// corresponds to a file or some other input source.
@@ -577,13 +649,15 @@ FileID SourceManager::createFileID(const ContentCache *File, StringRef Filename,
SLocEntryLoaded[Index] = true;
return FileID::get(LoadedID);
}
+ unsigned FileSize = File->getSize();
+ if (!(NextLocalOffset + FileSize + 1 > NextLocalOffset &&
+ NextLocalOffset + FileSize + 1 <= CurrentLoadedOffset)) {
+ Diag.Report(IncludePos, diag::err_include_too_large);
+ return FileID();
+ }
LocalSLocEntryTable.push_back(
SLocEntry::get(NextLocalOffset,
FileInfo::get(IncludePos, File, FileCharacter, Filename)));
- unsigned FileSize = File->getSize();
- assert(NextLocalOffset + FileSize + 1 > NextLocalOffset &&
- NextLocalOffset + FileSize + 1 <= CurrentLoadedOffset &&
- "Ran out of source locations!");
// We do a +1 here because we want a SourceLocation that means "the end of the
// file", e.g. for the "no newline at the end of the file" diagnostic.
NextLocalOffset += FileSize + 1;
@@ -699,6 +773,18 @@ void SourceManager::setFileIsTransient(const FileEntry *File) {
const_cast<SrcMgr::ContentCache *>(CC)->IsTransient = true;
}
+Optional<FileEntryRef> SourceManager::getFileEntryRefForID(FileID FID) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return None;
+
+ const SrcMgr::ContentCache *Content = Entry.getFile().getContentCache();
+ if (!Content || !Content->OrigEntry)
+ return None;
+ return FileEntryRef(Entry.getFile().getName(), *Content->OrigEntry);
+}
+
StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
bool MyInvalid = false;
const SLocEntry &SLoc = getSLocEntry(FID, &MyInvalid);
@@ -775,11 +861,8 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
--I;
if (I->getOffset() <= SLocOffset) {
FileID Res = FileID::get(int(I - LocalSLocEntryTable.begin()));
-
- // If this isn't an expansion, remember it. We have good locality across
- // FileID lookups.
- if (!I->isExpansion())
- LastFileIDLookup = Res;
+ // Remember it. We have good locality across FileID lookups.
+ LastFileIDLookup = Res;
NumLinearScans += NumProbes+1;
return Res;
}
@@ -796,11 +879,8 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
unsigned LessIndex = 0;
NumProbes = 0;
while (true) {
- bool Invalid = false;
unsigned MiddleIndex = (GreaterIndex-LessIndex)/2+LessIndex;
- unsigned MidOffset = getLocalSLocEntry(MiddleIndex, &Invalid).getOffset();
- if (Invalid)
- return FileID::get(0);
+ unsigned MidOffset = getLocalSLocEntry(MiddleIndex).getOffset();
++NumProbes;
@@ -812,15 +892,12 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
}
// If the middle index contains the value, succeed and return.
- // FIXME: This could be made faster by using a function that's aware of
- // being in the local area.
- if (isOffsetInFileID(FileID::get(MiddleIndex), SLocOffset)) {
+ if (MiddleIndex + 1 == LocalSLocEntryTable.size() ||
+ SLocOffset < getLocalSLocEntry(MiddleIndex + 1).getOffset()) {
FileID Res = FileID::get(MiddleIndex);
- // If this isn't a macro expansion, remember it. We have good locality
- // across FileID lookups.
- if (!LocalSLocEntryTable[MiddleIndex].isExpansion())
- LastFileIDLookup = Res;
+ // Remember it. We have good locality across FileID lookups.
+ LastFileIDLookup = Res;
NumBinaryProbes += NumProbes;
return Res;
}
@@ -858,9 +935,7 @@ FileID SourceManager::getFileIDLoaded(unsigned SLocOffset) const {
const SrcMgr::SLocEntry &E = getLoadedSLocEntry(I);
if (E.getOffset() <= SLocOffset) {
FileID Res = FileID::get(-int(I) - 2);
-
- if (!E.isExpansion())
- LastFileIDLookup = Res;
+ LastFileIDLookup = Res;
NumLinearScans += NumProbes + 1;
return Res;
}
@@ -893,8 +968,7 @@ FileID SourceManager::getFileIDLoaded(unsigned SLocOffset) const {
if (isOffsetInFileID(FileID::get(-int(MiddleIndex) - 2), SLocOffset)) {
FileID Res = FileID::get(-int(MiddleIndex) - 2);
- if (!E.isExpansion())
- LastFileIDLookup = Res;
+ LastFileIDLookup = Res;
NumBinaryProbes += NumProbes;
return Res;
}
@@ -990,6 +1064,13 @@ SourceLocation SourceManager::getImmediateSpellingLoc(SourceLocation Loc) const{
return Loc.getLocWithOffset(LocInfo.second);
}
+/// Return the filename of the file containing a SourceLocation.
+StringRef SourceManager::getFilename(SourceLocation SpellingLoc) const {
+ if (const FileEntry *F = getFileEntryForID(getFileID(SpellingLoc)))
+ return F->getName();
+ return StringRef();
+}
+
/// getImmediateExpansionRange - Loc is required to be an expansion location.
/// Return the start/end of the expansion information.
CharSourceRange
@@ -1602,11 +1683,7 @@ FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
// The location we're looking for isn't in the main file; look
// through all of the local source locations.
for (unsigned I = 0, N = local_sloc_entry_size(); I != N; ++I) {
- bool Invalid = false;
- const SLocEntry &SLoc = getLocalSLocEntry(I, &Invalid);
- if (Invalid)
- return FileID();
-
+ const SLocEntry &SLoc = getLocalSLocEntry(I);
if (SLoc.isFile() && SLoc.getFile().getContentCache() &&
SLoc.getFile().getContentCache()->OrigEntry == SourceFile)
return FileID::get(I);
@@ -1715,15 +1792,23 @@ void SourceManager::computeMacroArgsCache(MacroArgsMap &MacroArgsCache,
return;
if (Entry.isFile()) {
SourceLocation IncludeLoc = Entry.getFile().getIncludeLoc();
- if (IncludeLoc.isInvalid())
+ bool IncludedInFID =
+ (IncludeLoc.isValid() && isInFileID(IncludeLoc, FID)) ||
+ // Predefined header doesn't have a valid include location in main
+ // file, but any files created by it should still be skipped when
+ // computing macro args expanded in the main file.
+ (FID == MainFileID && Entry.getFile().Filename == "<built-in>");
+ if (IncludedInFID) {
+ // Skip the files/macros of the #include'd file, we only care about
+ // macros that lexed macro arguments from our file.
+ if (Entry.getFile().NumCreatedFIDs)
+ ID += Entry.getFile().NumCreatedFIDs - 1 /*because of next ++ID*/;
continue;
- if (!isInFileID(IncludeLoc, FID))
- return; // No more files/macros that may be "contained" in this file.
-
- // Skip the files/macros of the #include'd file, we only care about macros
- // that lexed macro arguments from our file.
- if (Entry.getFile().NumCreatedFIDs)
- ID += Entry.getFile().NumCreatedFIDs - 1/*because of next ++ID*/;
+ } else if (IncludeLoc.isValid()) {
+ // If file was included but not from FID, there is no more files/macros
+ // that may be "contained" in this file.
+ return;
+ }
continue;
}
diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp
index 3a21a19e1f19..eccdc21d724a 100644
--- a/clang/lib/Basic/TargetInfo.cpp
+++ b/clang/lib/Basic/TargetInfo.cpp
@@ -36,6 +36,8 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
HasLegalHalfType = false;
HasFloat128 = false;
HasFloat16 = false;
+ HasBFloat16 = false;
+ HasStrictFP = false;
PointerWidth = PointerAlign = 32;
BoolWidth = BoolAlign = 8;
IntWidth = IntAlign = 32;
@@ -113,6 +115,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
HasBuiltinMSVaList = false;
IsRenderScriptTarget = false;
HasAArch64SVETypes = false;
+ ARMCDECoprocMask = 0;
// Default to no types using fpret.
RealTypeUsesObjCFPRet = 0;
@@ -132,6 +135,8 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
// Default to an unknown platform name.
PlatformName = "unknown";
PlatformMinVersion = VersionTuple();
+
+ MaxOpenCLWorkGroupSize = 1024;
}
// Out of line virtual dtor for TargetInfo.
@@ -262,7 +267,8 @@ TargetInfo::IntType TargetInfo::getLeastIntTypeByWidth(unsigned BitWidth,
return NoInt;
}
-TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth) const {
+TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth,
+ bool ExplicitIEEE) const {
if (getFloatWidth() == BitWidth)
return Float;
if (getDoubleWidth() == BitWidth)
@@ -274,6 +280,10 @@ TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth) const {
return LongDouble;
break;
case 128:
+ // The caller explicitly asked for an IEEE compliant type but we still
+ // have to check if the target supports it.
+ if (ExplicitIEEE)
+ return hasFloat128Type() ? Float128 : NoFloat;
if (&getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble() ||
&getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
return LongDouble;
@@ -379,6 +389,20 @@ void TargetInfo::adjust(LangOptions &Opts) {
LongDoubleFormat = &llvm::APFloat::IEEEquad();
}
+ if (Opts.DoubleSize) {
+ if (Opts.DoubleSize == 32) {
+ DoubleWidth = 32;
+ LongDoubleWidth = 32;
+ DoubleFormat = &llvm::APFloat::IEEEsingle();
+ LongDoubleFormat = &llvm::APFloat::IEEEsingle();
+ } else if (Opts.DoubleSize == 64) {
+ DoubleWidth = 64;
+ LongDoubleWidth = 64;
+ DoubleFormat = &llvm::APFloat::IEEEdouble();
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ }
+ }
+
if (Opts.LongDoubleSize) {
if (Opts.LongDoubleSize == DoubleWidth) {
LongDoubleWidth = DoubleWidth;
diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp
index c063f8ca4472..6bbcafa27dfe 100644
--- a/clang/lib/Basic/Targets.cpp
+++ b/clang/lib/Basic/Targets.cpp
@@ -33,6 +33,7 @@
#include "Targets/Sparc.h"
#include "Targets/SystemZ.h"
#include "Targets/TCE.h"
+#include "Targets/VE.h"
#include "Targets/WebAssembly.h"
#include "Targets/X86.h"
#include "Targets/XCore.h"
@@ -117,6 +118,9 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new XCoreTargetInfo(Triple, Opts);
case llvm::Triple::hexagon:
+ if (os == llvm::Triple::Linux &&
+ Triple.getEnvironment() == llvm::Triple::Musl)
+ return new LinuxTargetInfo<HexagonTargetInfo>(Triple, Opts);
return new HexagonTargetInfo(Triple, Opts);
case llvm::Triple::lanai:
@@ -477,6 +481,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new OpenBSDI386TargetInfo(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return new FuchsiaTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::KFreeBSD:
return new KFreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Minix:
@@ -608,6 +614,9 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<RenderScript32TargetInfo>(Triple, Opts);
case llvm::Triple::renderscript64:
return new LinuxTargetInfo<RenderScript64TargetInfo>(Triple, Opts);
+
+ case llvm::Triple::ve:
+ return new LinuxTargetInfo<VETargetInfo>(Triple, Opts);
}
}
} // namespace targets
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index cba3e3ada7ea..25c02cb888c1 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "AArch64.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/ArrayRef.h"
@@ -28,6 +29,10 @@ const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
{#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+#include "clang/Basic/BuiltinsSVE.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
{#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
@@ -65,6 +70,9 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
+
// Make __builtin_ms_va_list available.
HasBuiltinMSVaList = true;
@@ -117,15 +125,15 @@ bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
return false;
BPI.SignReturnAddr =
- llvm::StringSwitch<CodeGenOptions::SignReturnAddressScope>(PBP.Scope)
- .Case("non-leaf", CodeGenOptions::SignReturnAddressScope::NonLeaf)
- .Case("all", CodeGenOptions::SignReturnAddressScope::All)
- .Default(CodeGenOptions::SignReturnAddressScope::None);
+ llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
+ .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
+ .Case("all", LangOptions::SignReturnAddressScopeKind::All)
+ .Default(LangOptions::SignReturnAddressScopeKind::None);
if (PBP.Key == "a_key")
- BPI.SignKey = CodeGenOptions::SignReturnAddressKeyValue::AKey;
+ BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
else
- BPI.SignKey = CodeGenOptions::SignReturnAddressKeyValue::BKey;
+ BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
return true;
@@ -147,6 +155,7 @@ void AArch64TargetInfo::fillValidCPUList(
void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ // FIXME: Armv8.1 makes __ARM_FEATURE_CRC32 mandatory. Handle it here.
Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
}
@@ -167,17 +176,26 @@ void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Also include the Armv8.3 defines
- // FIXME: Armv8.4 makes some extensions mandatory. Handle them here.
+ // FIXME: Armv8.4 makes __ARM_FEATURE_ATOMICS, defined in GCC, mandatory.
+ // Add and handle it here.
getTargetDefinesARMV83A(Opts, Builder);
}
void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Also include the Armv8.4 defines
- // FIXME: Armv8.5 makes some extensions mandatory. Handle them here.
getTargetDefinesARMV84A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.5 defines
+ // FIXME: Armv8.6 makes the following extensions mandatory:
+ // - __ARM_FEATURE_BF16
+ // - __ARM_FEATURE_MATMUL_INT8
+ // Handle them here.
+ getTargetDefinesARMV85A(Opts, Builder);
+}
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
@@ -194,6 +212,13 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__LP64__");
}
+ std::string CodeModel = getTargetOpts().CodeModel;
+ if (CodeModel == "default")
+ CodeModel = "small";
+ for (char &c : CodeModel)
+ c = toupper(c);
+ Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
+
// ACLE predefines. Many can only have one possible value on v8 AArch64.
Builder.defineMacro("__ARM_ACLE", "200");
Builder.defineMacro("__ARM_ARCH", "8");
@@ -235,6 +260,24 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_NEON_FP", "0xE");
}
+ if (FPU & SveMode)
+ Builder.defineMacro("__ARM_FEATURE_SVE", "1");
+
+ if (HasSVE2)
+ Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
+
+ if (HasSVE2 && HasSVE2AES)
+ Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
+
+ if (HasSVE2 && HasSVE2BitPerm)
+ Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
+
+ if (HasSVE2 && HasSVE2SHA3)
+ Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
+
+ if (HasSVE2 && HasSVE2SM4)
+ Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
+
if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
@@ -258,9 +301,53 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasTME)
Builder.defineMacro("__ARM_FEATURE_TME", "1");
+ if (HasMatMul)
+ Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
+
+ if (HasBFloat16) {
+ Builder.defineMacro("__ARM_FEATURE_BF16", "1");
+ Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
+ Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
+ Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
+ }
+
+ if ((FPU & SveMode) && HasBFloat16) {
+ Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
+ }
+
+ if ((FPU & SveMode) && HasMatmulFP64)
+ Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
+
+ if ((FPU & SveMode) && HasMatmulFP32)
+ Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
+
+ if ((FPU & SveMode) && HasMatMul)
+ Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
+
if ((FPU & NeonMode) && HasFP16FML)
Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
+ if (Opts.hasSignReturnAddress()) {
+ // Bitmask:
+ // 0: Protection using the A key
+ // 1: Protection using the B key
+ // 2: Protection including leaf functions
+ unsigned Value = 0;
+
+ if (Opts.isSignReturnAddressWithAKey())
+ Value |= (1 << 0);
+ else
+ Value |= (1 << 1);
+
+ if (Opts.isSignReturnAddressScopeAll())
+ Value |= (1 << 2);
+
+ Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
+ }
+
+ if (Opts.BranchTargetEnforcement)
+ Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
+
switch (ArchKind) {
default:
break;
@@ -279,6 +366,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::AArch64::ArchKind::ARMV8_5A:
getTargetDefinesARMV85A(Opts, Builder);
break;
+ case llvm::AArch64::ArchKind::ARMV8_6A:
+ getTargetDefinesARMV86A(Opts, Builder);
+ break;
}
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
@@ -296,7 +386,11 @@ ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
(Feature == "neon" && (FPU & NeonMode)) ||
- (Feature == "sve" && (FPU & SveMode));
+ ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
+ Feature == "sve2-aes" || Feature == "sve2-sha3" ||
+ Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
+ Feature == "i8mm" || Feature == "bf16") &&
+ (FPU & SveMode));
}
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
@@ -310,13 +404,62 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFP16FML = false;
HasMTE = false;
HasTME = false;
+ HasMatMul = false;
+ HasBFloat16 = false;
+ HasSVE2 = false;
+ HasSVE2AES = false;
+ HasSVE2SHA3 = false;
+ HasSVE2SM4 = false;
+ HasSVE2BitPerm = false;
+ HasMatmulFP64 = false;
+ HasMatmulFP32 = false;
+
ArchKind = llvm::AArch64::ArchKind::ARMV8A;
for (const auto &Feature : Features) {
if (Feature == "+neon")
FPU |= NeonMode;
- if (Feature == "+sve")
+ if (Feature == "+sve") {
FPU |= SveMode;
+ HasFullFP16 = 1;
+ }
+ if (Feature == "+sve2") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ }
+ if (Feature == "+sve2-aes") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ HasSVE2AES = 1;
+ }
+ if (Feature == "+sve2-sha3") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ HasSVE2SHA3 = 1;
+ }
+ if (Feature == "+sve2-sm4") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ HasSVE2SM4 = 1;
+ }
+ if (Feature == "+sve2-bitperm") {
+ FPU |= SveMode;
+ HasFullFP16 = 1;
+ HasSVE2 = 1;
+ HasSVE2BitPerm = 1;
+ }
+ if (Feature == "+f32mm") {
+ FPU |= SveMode;
+ HasMatmulFP32 = true;
+ }
+ if (Feature == "+f64mm") {
+ FPU |= SveMode;
+ HasMatmulFP64 = true;
+ }
if (Feature == "+crc")
HasCRC = true;
if (Feature == "+crypto")
@@ -333,6 +476,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
if (Feature == "+v8.5a")
ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
+ if (Feature == "+v8.6a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
if (Feature == "+fullfp16")
HasFullFP16 = true;
if (Feature == "+dotprod")
@@ -343,6 +488,10 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasMTE = true;
if (Feature == "+tme")
HasTME = true;
+ if (Feature == "+i8mm")
+ HasMatMul = true;
+ if (Feature == "+bf16")
+ HasBFloat16 = true;
}
setDataLayout();
@@ -479,17 +628,29 @@ bool AArch64TargetInfo::validateAsmConstraint(
Info.setAllowsRegister();
return true;
case 'U':
+ if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
+ // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
+ Info.setAllowsRegister();
+ Name += 2;
+ return true;
+ }
// Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
// Utf: A memory address suitable for ldp/stp in TF mode.
// Usa: An absolute symbolic address.
// Ush: The high part (bits 32:12) of a pc-relative symbolic address.
- llvm_unreachable("FIXME: Unimplemented support for U* constraints.");
+
+ // Better to return an error saying that it's an unrecognised constraint
+ // even if this is a valid constraint in gcc.
+ return false;
case 'z': // Zero register, wzr or xzr
Info.setAllowsRegister();
return true;
case 'x': // Floating point and SIMD registers (V0-V15)
Info.setAllowsRegister();
return true;
+ case 'y': // SVE registers (V0-V7)
+ Info.setAllowsRegister();
+ return true;
}
return false;
}
diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h
index 5e78237743c9..d1982897d84e 100644
--- a/clang/lib/Basic/Targets/AArch64.h
+++ b/clang/lib/Basic/Targets/AArch64.h
@@ -36,6 +36,14 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasFP16FML;
bool HasMTE;
bool HasTME;
+ bool HasMatMul;
+ bool HasSVE2;
+ bool HasSVE2AES;
+ bool HasSVE2SHA3;
+ bool HasSVE2SM4;
+ bool HasSVE2BitPerm;
+ bool HasMatmulFP64;
+ bool HasMatmulFP32;
llvm::AArch64::ArchKind ArchKind;
@@ -70,6 +78,8 @@ public:
MacroBuilder &Builder) const;
void getTargetDefinesARMV85A(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ void getTargetDefinesARMV86A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -87,6 +97,21 @@ public:
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+
+ std::string convertConstraint(const char *&Constraint) const override {
+ std::string R;
+ switch (*Constraint) {
+ case 'U': // Three-character constraint; add "@3" hint for later parsing.
+ R = std::string("@3") + std::string(Constraint, 3);
+ Constraint += 2;
+ break;
+ default:
+ R = TargetInfo::convertConstraint(Constraint);
+ break;
+ }
+ return R;
+ }
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override;
bool
@@ -101,7 +126,10 @@ public:
int getEHDataRegisterNumber(unsigned RegNo) const override;
+ const char *getBFloat16Mangling() const override { return "u6__bf16"; };
bool hasInt128Type() const override;
+
+ bool hasExtIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY AArch64leTargetInfo : public AArch64TargetInfo {
diff --git a/clang/lib/Basic/Targets/AMDGPU.cpp b/clang/lib/Basic/Targets/AMDGPU.cpp
index 135ad3f97ce1..db7db8d36d03 100644
--- a/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -17,6 +17,7 @@
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Frontend/OpenMP/OMPGridValues.h"
#include "llvm/IR/DataLayout.h"
using namespace clang;
@@ -124,7 +125,36 @@ const char *const AMDGPUTargetInfo::GCCRegNames[] = {
"s113", "s114", "s115", "s116", "s117", "s118", "s119", "s120", "s121",
"s122", "s123", "s124", "s125", "s126", "s127", "exec", "vcc", "scc",
"m0", "flat_scratch", "exec_lo", "exec_hi", "vcc_lo", "vcc_hi",
- "flat_scratch_lo", "flat_scratch_hi"
+ "flat_scratch_lo", "flat_scratch_hi",
+ "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8",
+ "a9", "a10", "a11", "a12", "a13", "a14", "a15", "a16", "a17",
+ "a18", "a19", "a20", "a21", "a22", "a23", "a24", "a25", "a26",
+ "a27", "a28", "a29", "a30", "a31", "a32", "a33", "a34", "a35",
+ "a36", "a37", "a38", "a39", "a40", "a41", "a42", "a43", "a44",
+ "a45", "a46", "a47", "a48", "a49", "a50", "a51", "a52", "a53",
+ "a54", "a55", "a56", "a57", "a58", "a59", "a60", "a61", "a62",
+ "a63", "a64", "a65", "a66", "a67", "a68", "a69", "a70", "a71",
+ "a72", "a73", "a74", "a75", "a76", "a77", "a78", "a79", "a80",
+ "a81", "a82", "a83", "a84", "a85", "a86", "a87", "a88", "a89",
+ "a90", "a91", "a92", "a93", "a94", "a95", "a96", "a97", "a98",
+ "a99", "a100", "a101", "a102", "a103", "a104", "a105", "a106", "a107",
+ "a108", "a109", "a110", "a111", "a112", "a113", "a114", "a115", "a116",
+ "a117", "a118", "a119", "a120", "a121", "a122", "a123", "a124", "a125",
+ "a126", "a127", "a128", "a129", "a130", "a131", "a132", "a133", "a134",
+ "a135", "a136", "a137", "a138", "a139", "a140", "a141", "a142", "a143",
+ "a144", "a145", "a146", "a147", "a148", "a149", "a150", "a151", "a152",
+ "a153", "a154", "a155", "a156", "a157", "a158", "a159", "a160", "a161",
+ "a162", "a163", "a164", "a165", "a166", "a167", "a168", "a169", "a170",
+ "a171", "a172", "a173", "a174", "a175", "a176", "a177", "a178", "a179",
+ "a180", "a181", "a182", "a183", "a184", "a185", "a186", "a187", "a188",
+ "a189", "a190", "a191", "a192", "a193", "a194", "a195", "a196", "a197",
+ "a198", "a199", "a200", "a201", "a202", "a203", "a204", "a205", "a206",
+ "a207", "a208", "a209", "a210", "a211", "a212", "a213", "a214", "a215",
+ "a216", "a217", "a218", "a219", "a220", "a221", "a222", "a223", "a224",
+ "a225", "a226", "a227", "a228", "a229", "a230", "a231", "a232", "a233",
+ "a234", "a235", "a236", "a237", "a238", "a239", "a240", "a241", "a242",
+ "a243", "a244", "a245", "a246", "a247", "a248", "a249", "a250", "a251",
+ "a252", "a253", "a254", "a255"
};
ArrayRef<const char *> AMDGPUTargetInfo::getGCCRegNames() const {
@@ -140,6 +170,22 @@ bool AMDGPUTargetInfo::initFeatureMap(
// XXX - What does the member GPU mean if device name string passed here?
if (isAMDGCN(getTriple())) {
switch (llvm::AMDGPU::parseArchAMDGCN(CPU)) {
+ case GK_GFX1030:
+ Features["ci-insts"] = true;
+ Features["dot1-insts"] = true;
+ Features["dot2-insts"] = true;
+ Features["dot5-insts"] = true;
+ Features["dot6-insts"] = true;
+ Features["dl-insts"] = true;
+ Features["flat-address-space"] = true;
+ Features["16-bit-insts"] = true;
+ Features["dpp"] = true;
+ Features["gfx8-insts"] = true;
+ Features["gfx9-insts"] = true;
+ Features["gfx10-insts"] = true;
+ Features["gfx10-3-insts"] = true;
+ Features["s-memrealtime"] = true;
+ break;
case GK_GFX1012:
case GK_GFX1011:
Features["dot1-insts"] = true;
@@ -163,6 +209,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["dot4-insts"] = true;
Features["dot5-insts"] = true;
Features["dot6-insts"] = true;
+ Features["mai-insts"] = true;
LLVM_FALLTHROUGH;
case GK_GFX906:
Features["dl-insts"] = true;
@@ -232,27 +279,6 @@ bool AMDGPUTargetInfo::initFeatureMap(
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeatureVec);
}
-void AMDGPUTargetInfo::adjustTargetOptions(const CodeGenOptions &CGOpts,
- TargetOptions &TargetOpts) const {
- bool hasFP32Denormals = false;
- bool hasFP64Denormals = false;
-
- for (auto &I : TargetOpts.FeaturesAsWritten) {
- if (I == "+fp32-denormals" || I == "-fp32-denormals")
- hasFP32Denormals = true;
- if (I == "+fp64-fp16-denormals" || I == "-fp64-fp16-denormals")
- hasFP64Denormals = true;
- }
- if (!hasFP32Denormals)
- TargetOpts.Features.push_back(
- (Twine(hasFastFMAF() && hasFullRateDenormalsF32() && !CGOpts.FlushDenorm
- ? '+' : '-') + Twine("fp32-denormals"))
- .str());
- // Always do not flush fp64 or fp16 denorms.
- if (!hasFP64Denormals && hasFP64())
- TargetOpts.Features.push_back("+fp64-fp16-denormals");
-}
-
void AMDGPUTargetInfo::fillValidCPUList(
SmallVectorImpl<StringRef> &Values) const {
if (isAMDGCN(getTriple()))
@@ -277,6 +303,7 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
resetDataLayout(isAMDGCN(getTriple()) ? DataLayoutStringAMDGCN
: DataLayoutStringR600);
assert(DataLayout->getAllocaAddrSpace() == Private);
+ GridValues = llvm::omp::AMDGPUGpuGridValues;
setAddressSpaceMap(Triple.getOS() == llvm::Triple::Mesa3D ||
!isAMDGCN(Triple));
@@ -354,4 +381,17 @@ void AMDGPUTargetInfo::setAuxTarget(const TargetInfo *Aux) {
copyAuxTarget(Aux);
LongDoubleFormat = SaveLongDoubleFormat;
Float128Format = SaveFloat128Format;
+ // For certain builtin types support on the host target, claim they are
+ // support to pass the compilation of the host code during the device-side
+ // compilation.
+ // FIXME: As the side effect, we also accept `__float128` uses in the device
+ // code. To rejct these builtin types supported in the host target but not in
+ // the device target, one approach would support `device_builtin` attribute
+ // so that we could tell the device builtin types from the host ones. The
+ // also solves the different representations of the same builtin type, such
+ // as `size_t` in the MSVC environment.
+ if (Aux->hasFloat128Type()) {
+ HasFloat128 = true;
+ Float128Format = DoubleFormat;
+ }
}
diff --git a/clang/lib/Basic/Targets/AMDGPU.h b/clang/lib/Basic/Targets/AMDGPU.h
index 456cb2ebb8b5..d0394492cad6 100644
--- a/clang/lib/Basic/Targets/AMDGPU.h
+++ b/clang/lib/Basic/Targets/AMDGPU.h
@@ -114,11 +114,14 @@ public:
/// Accepted register names: (n, m is unsigned integer, n < m)
/// v
/// s
+ /// a
/// {vn}, {v[n]}
/// {sn}, {s[n]}
+ /// {an}, {a[n]}
/// {S} , where S is a special register name
////{v[n:m]}
/// {s[n:m]}
+ /// {a[n:m]}
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
static const ::llvm::StringSet<> SpecialRegs({
@@ -127,7 +130,30 @@ public:
"exec_hi", "tma_lo", "tma_hi", "tba_lo", "tba_hi",
});
+ switch (*Name) {
+ case 'I':
+ Info.setRequiresImmediate(-16, 64);
+ return true;
+ case 'J':
+ Info.setRequiresImmediate(-32768, 32767);
+ return true;
+ case 'A':
+ case 'B':
+ case 'C':
+ Info.setRequiresImmediate();
+ return true;
+ default:
+ break;
+ }
+
StringRef S(Name);
+
+ if (S == "DA" || S == "DB") {
+ Name++;
+ Info.setRequiresImmediate();
+ return true;
+ }
+
bool HasLeftParen = false;
if (S.front() == '{') {
HasLeftParen = true;
@@ -135,7 +161,7 @@ public:
}
if (S.empty())
return false;
- if (S.front() != 'v' && S.front() != 's') {
+ if (S.front() != 'v' && S.front() != 's' && S.front() != 'a') {
if (!HasLeftParen)
return false;
auto E = S.find('}');
@@ -153,7 +179,7 @@ public:
if (!HasLeftParen) {
if (!S.empty())
return false;
- // Found s or v.
+ // Found s, v or a.
Info.setAllowsRegister();
Name = S.data() - 1;
return true;
@@ -184,7 +210,8 @@ public:
S = S.drop_front();
if (!S.empty())
return false;
- // Found {vn}, {sn}, {v[n]}, {s[n]}, {v[n:m]}, or {s[n:m]}.
+ // Found {vn}, {sn}, {an}, {v[n]}, {s[n]}, {a[n]}, {v[n:m]}, {s[n:m]}
+ // or {a[n:m]}.
Info.setAllowsRegister();
Name = S.data() - 1;
return true;
@@ -194,6 +221,12 @@ public:
// the constraint. In practice, it won't be changed unless the
// constraint is longer than one character.
std::string convertConstraint(const char *&Constraint) const override {
+
+ StringRef S(Constraint);
+ if (S == "DA" || S == "DB") {
+ return std::string("^") + std::string(Constraint++, 2);
+ }
+
const char *Begin = Constraint;
TargetInfo::ConstraintInfo Info("", "");
if (validateAsmConstraint(Constraint, Info))
@@ -208,11 +241,10 @@ public:
StringRef CPU,
const std::vector<std::string> &FeatureVec) const override;
- void adjustTargetOptions(const CodeGenOptions &CGOpts,
- TargetOptions &TargetOpts) const override;
-
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+ bool useFP16ConversionIntrinsics() const override { return false; }
+
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -263,6 +295,7 @@ public:
Opts.support("cl_khr_int64_base_atomics");
Opts.support("cl_khr_int64_extended_atomics");
Opts.support("cl_khr_mipmap_image");
+ Opts.support("cl_khr_mipmap_image_writes");
Opts.support("cl_khr_subgroups");
Opts.support("cl_khr_3d_image_writes");
Opts.support("cl_amd_media_ops");
@@ -348,10 +381,14 @@ public:
// address space has value 0 but in private and local address space has
// value ~0.
uint64_t getNullPointerValue(LangAS AS) const override {
- return AS == LangAS::opencl_local ? ~0 : 0;
+ // FIXME: Also should handle region.
+ return (AS == LangAS::opencl_local || AS == LangAS::opencl_private)
+ ? ~0 : 0;
}
void setAuxTarget(const TargetInfo *Aux) override;
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
diff --git a/clang/lib/Basic/Targets/ARC.h b/clang/lib/Basic/Targets/ARC.h
index c43a39984edb..b314c42be1e9 100644
--- a/clang/lib/Basic/Targets/ARC.h
+++ b/clang/lib/Basic/Targets/ARC.h
@@ -65,6 +65,8 @@ public:
TargetInfo::ConstraintInfo &Info) const override {
return false;
}
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index be088e81cffe..21cfe0107bbb 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -25,6 +25,9 @@ void ARMTargetInfo::setABIAAPCS() {
IsAAPCS = true;
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
+
const llvm::Triple &T = getTriple();
bool IsNetBSD = T.isOSNetBSD();
@@ -74,6 +77,8 @@ void ARMTargetInfo::setABIAPCS(bool IsAAPCS16) {
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
else
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
WCharType = SignedInt;
@@ -107,7 +112,7 @@ void ARMTargetInfo::setArchInfo() {
StringRef ArchName = getTriple().getArchName();
ArchISA = llvm::ARM::parseArchISA(ArchName);
- CPU = llvm::ARM::getDefaultCPU(ArchName);
+ CPU = std::string(llvm::ARM::getDefaultCPU(ArchName));
llvm::ARM::ArchKind AK = llvm::ARM::parseArch(ArchName);
if (AK != llvm::ARM::ArchKind::INVALID)
ArchKind = AK;
@@ -154,6 +159,8 @@ bool ARMTargetInfo::hasMVEFloat() const {
return hasMVE() && (MVE & MVE_FP);
}
+bool ARMTargetInfo::hasCDE() const { return getARMCDECoprocMask() != 0; }
+
bool ARMTargetInfo::isThumb() const {
return ArchISA == llvm::ARM::ISAKind::THUMB;
}
@@ -199,6 +206,8 @@ StringRef ARMTargetInfo::getCPUAttr() const {
return "8_4A";
case llvm::ARM::ArchKind::ARMV8_5A:
return "8_5A";
+ case llvm::ARM::ArchKind::ARMV8_6A:
+ return "8_6A";
case llvm::ARM::ArchKind::ARMV8MBaseline:
return "8M_BASE";
case llvm::ARM::ArchKind::ARMV8MMainline:
@@ -310,7 +319,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
// Maximum alignment for ARM NEON data types should be 64-bits (AAPCS)
// as well the default alignment
- if (IsAAPCS && (Triple.getEnvironment() != llvm::Triple::Android))
+ if (IsAAPCS && !Triple.isAndroid())
DefaultAlignForAttributeAligned = MaxVectorAlign = 64;
// Do force alignment of members that follow zero length bitfields. If
@@ -372,7 +381,7 @@ bool ARMTargetInfo::initFeatureMap(
llvm::ARM::getFPUFeatures(FPUKind, TargetFeatures);
// get default Extension features
- unsigned Extensions = llvm::ARM::getDefaultExtensions(CPU, Arch);
+ uint64_t Extensions = llvm::ARM::getDefaultExtensions(CPU, Arch);
llvm::ARM::getExtensionFeatures(Extensions, TargetFeatures);
for (auto Feature : TargetFeatures)
@@ -421,7 +430,10 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
// Note that SoftFloatABI is initialized in our constructor.
HWDiv = 0;
DotProd = 0;
+ HasMatMul = 0;
HasFloat16 = true;
+ ARMCDECoprocMask = 0;
+ HasBFloat16 = false;
// This does not diagnose illegal cases like having both
// "+vfpv2" and "+vfpv3" or having "+neon" and "-fp64".
@@ -480,14 +492,20 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
} else if (Feature == "+dotprod") {
DotProd = true;
} else if (Feature == "+mve") {
- DSP = 1;
MVE |= MVE_INT;
} else if (Feature == "+mve.fp") {
- DSP = 1;
HasLegalHalfType = true;
FPU |= FPARMV8;
MVE |= MVE_INT | MVE_FP;
HW_FP |= HW_FP_SP | HW_FP_HP;
+ } else if (Feature == "+i8mm") {
+ HasMatMul = 1;
+ } else if (Feature.size() == strlen("+cdecp0") && Feature >= "+cdecp0" &&
+ Feature <= "+cdecp7") {
+ unsigned Coproc = Feature.back() - '0';
+ ARMCDECoprocMask |= (1U << Coproc);
+ } else if (Feature == "+bf16") {
+ HasBFloat16 = true;
}
}
@@ -537,6 +555,10 @@ bool ARMTargetInfo::hasFeature(StringRef Feature) const {
.Default(false);
}
+bool ARMTargetInfo::hasBFloat16Type() const {
+ return HasBFloat16 && !SoftFloat;
+}
+
bool ARMTargetInfo::isValidCPUName(StringRef Name) const {
return Name == "generic" ||
llvm::ARM::parseCPUArch(Name) != llvm::ARM::ArchKind::INVALID;
@@ -760,6 +782,12 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_FEATURE_MVE", hasMVEFloat() ? "3" : "1");
}
+ if (hasCDE()) {
+ Builder.defineMacro("__ARM_FEATURE_CDE", "1");
+ Builder.defineMacro("__ARM_FEATURE_CDE_COPROC",
+ "0x" + Twine::utohexstr(getARMCDECoprocMask()));
+ }
+
Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
Twine(Opts.WCharSize ? Opts.WCharSize : 4));
@@ -807,6 +835,15 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (DotProd)
Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
+ if (HasMatMul)
+ Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
+
+ if (HasBFloat16) {
+ Builder.defineMacro("__ARM_FEATURE_BF16", "1");
+ Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
+ Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
+ }
+
switch (ArchKind) {
default:
break;
@@ -819,6 +856,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::ARM::ArchKind::ARMV8_3A:
case llvm::ARM::ArchKind::ARMV8_4A:
case llvm::ARM::ArchKind::ARMV8_5A:
+ case llvm::ARM::ArchKind::ARMV8_6A:
getTargetDefinesARMV83A(Opts, Builder);
break;
}
diff --git a/clang/lib/Basic/Targets/ARM.h b/clang/lib/Basic/Targets/ARM.h
index 9696a4404589..1e80f74d0766 100644
--- a/clang/lib/Basic/Targets/ARM.h
+++ b/clang/lib/Basic/Targets/ARM.h
@@ -75,6 +75,7 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
unsigned DSP : 1;
unsigned Unaligned : 1;
unsigned DotProd : 1;
+ unsigned HasMatMul : 1;
enum {
LDREX_B = (1 << 0), /// byte (8-bit)
@@ -108,6 +109,7 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
bool supportsThumb2() const;
bool hasMVE() const;
bool hasMVEFloat() const;
+ bool hasCDE() const;
StringRef getCPUAttr() const;
StringRef getCPUProfile() const;
@@ -135,6 +137,8 @@ public:
bool hasFeature(StringRef Feature) const override;
+ bool hasBFloat16Type() const override;
+
bool isValidCPUName(StringRef Name) const override;
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
@@ -180,6 +184,10 @@ public:
int getEHDataRegisterNumber(unsigned RegNo) const override;
bool hasSjLjLowering() const override;
+
+ bool hasExtIntType() const override { return true; }
+
+ const char *getBFloat16Mangling() const override { return "u6__bf16"; };
};
class LLVM_LIBRARY_VISIBILITY ARMleTargetInfo : public ARMTargetInfo {
diff --git a/clang/lib/Basic/Targets/AVR.cpp b/clang/lib/Basic/Targets/AVR.cpp
index d865676700b5..bb215b4114ac 100644
--- a/clang/lib/Basic/Targets/AVR.cpp
+++ b/clang/lib/Basic/Targets/AVR.cpp
@@ -300,6 +300,7 @@ void AVRTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("AVR");
Builder.defineMacro("__AVR");
Builder.defineMacro("__AVR__");
+ Builder.defineMacro("__ELF__");
if (!this->CPU.empty()) {
auto It = llvm::find_if(
diff --git a/clang/lib/Basic/Targets/BPF.h b/clang/lib/Basic/Targets/BPF.h
index b2f1831e960e..43e55dfbfb2b 100644
--- a/clang/lib/Basic/Targets/BPF.h
+++ b/clang/lib/Basic/Targets/BPF.h
@@ -35,9 +35,9 @@ public:
Int64Type = SignedLong;
RegParmMax = 5;
if (Triple.getArch() == llvm::Triple::bpfeb) {
- resetDataLayout("E-m:e-p:64:64-i64:64-n32:64-S128");
+ resetDataLayout("E-m:e-p:64:64-i64:64-i128:128-n32:64-S128");
} else {
- resetDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128");
+ resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n32:64-S128");
}
MaxAtomicPromoteWidth = 64;
MaxAtomicInlineWidth = 64;
diff --git a/clang/lib/Basic/Targets/Hexagon.cpp b/clang/lib/Basic/Targets/Hexagon.cpp
index fcb94b93d69d..205601c359d0 100644
--- a/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/clang/lib/Basic/Targets/Hexagon.cpp
@@ -24,6 +24,11 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__qdsp6__", "1");
Builder.defineMacro("__hexagon__", "1");
+ Builder.defineMacro("__ELF__");
+
+ // The macro __HVXDBL__ is deprecated.
+ bool DefineHvxDbl = false;
+
if (CPU == "hexagonv5") {
Builder.defineMacro("__HEXAGON_V5__");
Builder.defineMacro("__HEXAGON_ARCH__", "5");
@@ -37,19 +42,29 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__QDSP6_V55__");
Builder.defineMacro("__QDSP6_ARCH__", "55");
} else if (CPU == "hexagonv60") {
+ DefineHvxDbl = true;
Builder.defineMacro("__HEXAGON_V60__");
Builder.defineMacro("__HEXAGON_ARCH__", "60");
Builder.defineMacro("__QDSP6_V60__");
Builder.defineMacro("__QDSP6_ARCH__", "60");
} else if (CPU == "hexagonv62") {
+ DefineHvxDbl = true;
Builder.defineMacro("__HEXAGON_V62__");
Builder.defineMacro("__HEXAGON_ARCH__", "62");
} else if (CPU == "hexagonv65") {
+ DefineHvxDbl = true;
Builder.defineMacro("__HEXAGON_V65__");
Builder.defineMacro("__HEXAGON_ARCH__", "65");
} else if (CPU == "hexagonv66") {
+ DefineHvxDbl = true;
Builder.defineMacro("__HEXAGON_V66__");
Builder.defineMacro("__HEXAGON_ARCH__", "66");
+ } else if (CPU == "hexagonv67") {
+ Builder.defineMacro("__HEXAGON_V67__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "67");
+ } else if (CPU == "hexagonv67t") {
+ Builder.defineMacro("__HEXAGON_V67T__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "67");
}
if (hasFeature("hvx-length64b")) {
@@ -62,14 +77,29 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__HVX__");
Builder.defineMacro("__HVX_ARCH__", HVXVersion);
Builder.defineMacro("__HVX_LENGTH__", "128");
- // FIXME: This macro is deprecated.
- Builder.defineMacro("__HVXDBL__");
+ if (DefineHvxDbl)
+ Builder.defineMacro("__HVXDBL__");
+ }
+
+ if (hasFeature("audio")) {
+ Builder.defineMacro("__HEXAGON_AUDIO__");
}
+
+ std::string NumPhySlots = isTinyCore() ? "3" : "4";
+ Builder.defineMacro("__HEXAGON_PHYSICAL_SLOTS__", NumPhySlots);
}
bool HexagonTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
+ if (isTinyCore())
+ Features["audio"] = true;
+
+ StringRef CPUFeature = CPU;
+ CPUFeature.consume_front("hexagon");
+ CPUFeature.consume_back("t");
+ Features[CPUFeature] = true;
+
Features["long-calls"] = false;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
@@ -91,6 +121,8 @@ bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
UseLongCalls = true;
else if (F == "-long-calls")
UseLongCalls = false;
+ else if (F == "+audio")
+ HasAudio = true;
}
return true;
}
@@ -125,6 +157,8 @@ const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = {
{#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
{#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
#include "clang/Basic/BuiltinsHexagon.def"
};
@@ -139,6 +173,7 @@ bool HexagonTargetInfo::hasFeature(StringRef Feature) const {
.Case("hvx-length64b", HasHVX64B)
.Case("hvx-length128b", HasHVX128B)
.Case("long-calls", UseLongCalls)
+ .Case("audio", HasAudio)
.Default(false);
}
@@ -148,9 +183,10 @@ struct CPUSuffix {
};
static constexpr CPUSuffix Suffixes[] = {
- {{"hexagonv5"}, {"5"}}, {{"hexagonv55"}, {"55"}},
- {{"hexagonv60"}, {"60"}}, {{"hexagonv62"}, {"62"}},
- {{"hexagonv65"}, {"65"}}, {{"hexagonv66"}, {"66"}},
+ {{"hexagonv5"}, {"5"}}, {{"hexagonv55"}, {"55"}},
+ {{"hexagonv60"}, {"60"}}, {{"hexagonv62"}, {"62"}},
+ {{"hexagonv65"}, {"65"}}, {{"hexagonv66"}, {"66"}},
+ {{"hexagonv67"}, {"67"}}, {{"hexagonv67t"}, {"67t"}},
};
const char *HexagonTargetInfo::getHexagonCPUSuffix(StringRef Name) {
diff --git a/clang/lib/Basic/Targets/Hexagon.h b/clang/lib/Basic/Targets/Hexagon.h
index 25a78c181580..d6c7da5f1e40 100644
--- a/clang/lib/Basic/Targets/Hexagon.h
+++ b/clang/lib/Basic/Targets/Hexagon.h
@@ -32,6 +32,7 @@ class LLVM_LIBRARY_VISIBILITY HexagonTargetInfo : public TargetInfo {
bool HasHVX = false;
bool HasHVX64B = false;
bool HasHVX128B = false;
+ bool HasAudio = false;
bool UseLongCalls = false;
public:
@@ -56,6 +57,13 @@ public:
LargeArrayAlign = 64;
UseBitFieldTypeAlignment = true;
ZeroLengthBitfieldBoundary = 32;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+
+ // These are the default values anyway, but explicitly make sure
+ // that the size of the boolean type is 8 bits. Bool vectors are used
+ // for modeling predicate registers in HVX, and the bool -> byte
+ // correspondence matches the HVX architecture.
+ BoolWidth = BoolAlign = 8;
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
@@ -96,6 +104,8 @@ public:
DiagnosticsEngine &Diags) override;
BuiltinVaListKind getBuiltinVaListKind() const override {
+ if (getTriple().isMusl())
+ return TargetInfo::HexagonBuiltinVaList;
return TargetInfo::CharPtrBuiltinVaList;
}
@@ -123,6 +133,13 @@ public:
int getEHDataRegisterNumber(unsigned RegNo) const override {
return RegNo < 2 ? RegNo : -1;
}
+
+ bool isTinyCore() const {
+ // We can write more stricter checks later.
+ return CPU.find('t') != std::string::npos;
+ }
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/clang/lib/Basic/Targets/Lanai.h b/clang/lib/Basic/Targets/Lanai.h
index e119606384c7..9af5427b81c4 100644
--- a/clang/lib/Basic/Targets/Lanai.h
+++ b/clang/lib/Basic/Targets/Lanai.h
@@ -86,6 +86,8 @@ public:
}
const char *getClobbers() const override { return ""; }
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/clang/lib/Basic/Targets/MSP430.h b/clang/lib/Basic/Targets/MSP430.h
index 620f12d2b8e3..9d42e4d4bb18 100644
--- a/clang/lib/Basic/Targets/MSP430.h
+++ b/clang/lib/Basic/Targets/MSP430.h
@@ -64,8 +64,14 @@ public:
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- // No aliases.
- return None;
+ // Make r0 - r3 be recognized by llc (f.e., in clobber list)
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ {{"r0"}, "pc"},
+ {{"r1"}, "sp"},
+ {{"r2"}, "sr"},
+ {{"r3"}, "cg"},
+ };
+ return llvm::makeArrayRef(GCCRegAliases);
}
bool validateAsmConstraint(const char *&Name,
diff --git a/clang/lib/Basic/Targets/Mips.h b/clang/lib/Basic/Targets/Mips.h
index 224ec0783edf..b475c03889a1 100644
--- a/clang/lib/Basic/Targets/Mips.h
+++ b/clang/lib/Basic/Targets/Mips.h
@@ -406,6 +406,7 @@ public:
unsigned getUnwindWordWidth() const override;
bool validateTarget(DiagnosticsEngine &Diags) const override;
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/clang/lib/Basic/Targets/NVPTX.cpp b/clang/lib/Basic/Targets/NVPTX.cpp
index f69e9d84c701..18c3c8370331 100644
--- a/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/clang/lib/Basic/Targets/NVPTX.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Frontend/OpenMP/OMPGridValues.h"
using namespace clang;
using namespace clang::targets;
@@ -44,6 +45,8 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
if (!Feature.startswith("+ptx"))
continue;
PTXVersion = llvm::StringSwitch<unsigned>(Feature)
+ .Case("+ptx70", 70)
+ .Case("+ptx65", 65)
.Case("+ptx64", 64)
.Case("+ptx63", 63)
.Case("+ptx61", 61)
@@ -60,6 +63,7 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
TLSSupported = false;
VLASupported = false;
AddrSpaceMap = &NVPTXAddrSpaceMap;
+ GridValues = llvm::omp::NVPTXGpuGridValues;
UseAddrSpaceMapMangling = true;
// Define available target features
@@ -196,6 +200,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
+ case CudaArch::GFX1030:
case CudaArch::LAST:
break;
case CudaArch::UNKNOWN:
@@ -231,6 +236,8 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
return "720";
case CudaArch::SM_75:
return "750";
+ case CudaArch::SM_80:
+ return "800";
}
llvm_unreachable("unhandled CudaArch");
}();
diff --git a/clang/lib/Basic/Targets/NVPTX.h b/clang/lib/Basic/Targets/NVPTX.h
index 63780789c474..f57a0f18efa3 100644
--- a/clang/lib/Basic/Targets/NVPTX.h
+++ b/clang/lib/Basic/Targets/NVPTX.h
@@ -160,6 +160,8 @@ public:
return HostTarget->checkCallingConvention(CC);
return CCCR_Warning;
}
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/clang/lib/Basic/Targets/OSTargets.cpp b/clang/lib/Basic/Targets/OSTargets.cpp
index d4ffffc64ba8..15e475a31d64 100644
--- a/clang/lib/Basic/Targets/OSTargets.cpp
+++ b/clang/lib/Basic/Targets/OSTargets.cpp
@@ -25,7 +25,7 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("__APPLE_CC__", "6000");
Builder.defineMacro("__APPLE__");
Builder.defineMacro("__STDC_NO_THREADS__");
- Builder.defineMacro("OBJC_NEW_PROPERTIES");
+
// AddressSanitizer doesn't play well with source fortification, which is on
// by default on Darwin.
if (Opts.Sanitize.has(SanitizerKind::Address))
@@ -179,7 +179,7 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1));
if (Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) {
- if (Opts.CPlusPlus2a)
+ if (Opts.CPlusPlus20)
Builder.defineMacro("_MSVC_LANG", "201705L");
else if (Opts.CPlusPlus17)
Builder.defineMacro("_MSVC_LANG", "201703L");
diff --git a/clang/lib/Basic/Targets/OSTargets.h b/clang/lib/Basic/Targets/OSTargets.h
index 70fac030bc5d..cfa362bef1b1 100644
--- a/clang/lib/Basic/Targets/OSTargets.h
+++ b/clang/lib/Basic/Targets/OSTargets.h
@@ -87,7 +87,7 @@ protected:
public:
DarwinTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
- // By default, no TLS, and we whitelist permitted architecture/OS
+ // By default, no TLS, and we list permitted architecture/OS
// combinations.
this->TLSSupported = false;
@@ -706,6 +706,8 @@ protected:
public:
AIXTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
+ this->TheCXXABI.set(TargetCXXABI::XL);
+
if (this->PointerWidth == 64) {
this->WCharType = this->UnsignedInt;
} else {
@@ -819,7 +821,7 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyOSTargetInfo
: public OSTargetInfo<Target> {
protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
- MacroBuilder &Builder) const {
+ MacroBuilder &Builder) const override {
// A common platform macro.
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
diff --git a/clang/lib/Basic/Targets/PNaCl.h b/clang/lib/Basic/Targets/PNaCl.h
index ab4abf9fc567..d5bfc369583f 100644
--- a/clang/lib/Basic/Targets/PNaCl.h
+++ b/clang/lib/Basic/Targets/PNaCl.h
@@ -68,6 +68,8 @@ public:
}
const char *getClobbers() const override { return ""; }
+
+ bool hasExtIntType() const override { return true; }
};
// We attempt to use PNaCl (le32) frontend and Mips32EL backend.
diff --git a/clang/lib/Basic/Targets/PPC.cpp b/clang/lib/Basic/Targets/PPC.cpp
index 1877d4a5ef70..f0de2bf070ea 100644
--- a/clang/lib/Basic/Targets/PPC.cpp
+++ b/clang/lib/Basic/Targets/PPC.cpp
@@ -54,6 +54,10 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFloat128 = true;
} else if (Feature == "+power9-vector") {
HasP9Vector = true;
+ } else if (Feature == "+power10-vector") {
+ HasP10Vector = true;
+ } else if (Feature == "+pcrelative-memops") {
+ HasPCRelativeMemops = true;
} else if (Feature == "+spe") {
HasSPE = true;
LongDoubleWidth = LongDoubleAlign = 64;
@@ -151,6 +155,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("_ARCH_PWR8");
if (ArchDefs & ArchDefinePwr9)
Builder.defineMacro("_ARCH_PWR9");
+ if (ArchDefs & ArchDefinePwr10)
+ Builder.defineMacro("_ARCH_PWR10");
if (ArchDefs & ArchDefineA2)
Builder.defineMacro("_ARCH_A2");
if (ArchDefs & ArchDefineA2q) {
@@ -189,6 +195,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__FLOAT128__");
if (HasP9Vector)
Builder.defineMacro("__POWER9_VECTOR__");
+ if (HasP10Vector)
+ Builder.defineMacro("__POWER10_VECTOR__");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
@@ -223,38 +231,32 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// - direct-move
// - float128
// - power9-vector
+// - power10-vector
// then go ahead and error since the customer has expressed an incompatible
// set of options.
static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
const std::vector<std::string> &FeaturesVec) {
- if (llvm::find(FeaturesVec, "-vsx") != FeaturesVec.end()) {
- if (llvm::find(FeaturesVec, "+power8-vector") != FeaturesVec.end()) {
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower8-vector"
- << "-mno-vsx";
- return false;
- }
-
- if (llvm::find(FeaturesVec, "+direct-move") != FeaturesVec.end()) {
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mdirect-move"
- << "-mno-vsx";
- return false;
- }
+ // vsx was not explicitly turned off.
+ if (llvm::find(FeaturesVec, "-vsx") == FeaturesVec.end())
+ return true;
- if (llvm::find(FeaturesVec, "+float128") != FeaturesVec.end()) {
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfloat128"
- << "-mno-vsx";
- return false;
+ auto FindVSXSubfeature = [&](StringRef Feature, StringRef Option) {
+ if (llvm::find(FeaturesVec, Feature) != FeaturesVec.end()) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << Option << "-mno-vsx";
+ return true;
}
+ return false;
+ };
- if (llvm::find(FeaturesVec, "+power9-vector") != FeaturesVec.end()) {
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower9-vector"
- << "-mno-vsx";
- return false;
- }
- }
+ bool Found = FindVSXSubfeature("+power8-vector", "-mpower8-vector");
+ Found |= FindVSXSubfeature("+direct-move", "-mdirect-move");
+ Found |= FindVSXSubfeature("+float128", "-mfloat128");
+ Found |= FindVSXSubfeature("+power9-vector", "-mpower9-vector");
+ Found |= FindVSXSubfeature("+power10-vector", "-mpower10-vector");
- return true;
+ // Return false if any vsx subfeatures was found.
+ return !Found;
}
bool PPCTargetInfo::initFeatureMap(
@@ -321,10 +323,17 @@ bool PPCTargetInfo::initFeatureMap(
.Case("e500", true)
.Default(false);
- // Future CPU should include all of the features of Power 9 as well as any
+ // Power10 includes all the same features as Power9 plus any features specific
+ // to the Power10 core.
+ if (CPU == "pwr10" || CPU == "power10") {
+ initFeatureMap(Features, Diags, "pwr9", FeaturesVec);
+ addP10SpecificFeatures(Features);
+ }
+
+ // Future CPU should include all of the features of Power 10 as well as any
// additional features (yet to be determined) specific to it.
if (CPU == "future") {
- initFeatureMap(Features, Diags, "pwr9", FeaturesVec);
+ initFeatureMap(Features, Diags, "pwr10", FeaturesVec);
addFutureSpecificFeatures(Features);
}
@@ -341,6 +350,15 @@ bool PPCTargetInfo::initFeatureMap(
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
+// Add any Power10 specific features.
+void PPCTargetInfo::addP10SpecificFeatures(
+ llvm::StringMap<bool> &Features) const {
+ Features["htm"] = false; // HTM was removed for P10.
+ Features["power10-vector"] = true;
+ Features["pcrelative-memops"] = true;
+ return;
+}
+
// Add features specific to the "Future" CPU.
void PPCTargetInfo::addFutureSpecificFeatures(
llvm::StringMap<bool> &Features) const {
@@ -361,6 +379,8 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Case("extdiv", HasExtDiv)
.Case("float128", HasFloat128)
.Case("power9-vector", HasP9Vector)
+ .Case("power10-vector", HasP10Vector)
+ .Case("pcrelative-memops", HasPCRelativeMemops)
.Case("spe", HasSPE)
.Default(false);
}
@@ -375,22 +395,34 @@ void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
.Case("direct-move", true)
.Case("power8-vector", true)
.Case("power9-vector", true)
+ .Case("power10-vector", true)
.Case("float128", true)
.Default(false);
if (FeatureHasVSX)
Features["vsx"] = Features["altivec"] = true;
if (Name == "power9-vector")
Features["power8-vector"] = true;
- Features[Name] = true;
+ else if (Name == "power10-vector")
+ Features["power8-vector"] = Features["power9-vector"] = true;
+ if (Name == "pcrel")
+ Features["pcrelative-memops"] = true;
+ else
+ Features[Name] = true;
} else {
// If we're disabling altivec or vsx go ahead and disable all of the vsx
// features.
if ((Name == "altivec") || (Name == "vsx"))
Features["vsx"] = Features["direct-move"] = Features["power8-vector"] =
- Features["float128"] = Features["power9-vector"] = false;
+ Features["float128"] = Features["power9-vector"] =
+ Features["power10-vector"] = false;
if (Name == "power8-vector")
- Features["power9-vector"] = false;
- Features[Name] = false;
+ Features["power9-vector"] = Features["power10-vector"] = false;
+ else if (Name == "power9-vector")
+ Features["power10-vector"] = false;
+ if (Name == "pcrel")
+ Features["pcrelative-memops"] = false;
+ else
+ Features[Name] = false;
}
}
@@ -471,18 +503,17 @@ ArrayRef<TargetInfo::AddlRegName> PPCTargetInfo::getGCCAddlRegNames() const {
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {
- {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
- {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
- {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
- {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
- {"g5"}, {"a2"}, {"a2q"}, {"e500"}, {"e500mc"},
- {"e5500"}, {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"},
- {"power5"}, {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"},
- {"pwr6"}, {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"},
- {"power8"}, {"pwr8"}, {"power9"}, {"pwr9"}, {"powerpc"},
- {"ppc"}, {"powerpc64"}, {"ppc64"}, {"powerpc64le"}, {"ppc64le"},
- {"future"}
-};
+ {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
+ {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
+ {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
+ {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
+ {"g5"}, {"a2"}, {"a2q"}, {"e500"}, {"e500mc"},
+ {"e5500"}, {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"},
+ {"power5"}, {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"},
+ {"pwr6"}, {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"},
+ {"power8"}, {"pwr8"}, {"power9"}, {"pwr9"}, {"power10"},
+ {"pwr10"}, {"powerpc"}, {"ppc"}, {"powerpc64"}, {"ppc64"},
+ {"powerpc64le"}, {"ppc64le"}, {"future"}};
bool PPCTargetInfo::isValidCPUName(StringRef Name) const {
return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
diff --git a/clang/lib/Basic/Targets/PPC.h b/clang/lib/Basic/Targets/PPC.h
index 270aa7ff9181..858059bacb86 100644
--- a/clang/lib/Basic/Targets/PPC.h
+++ b/clang/lib/Basic/Targets/PPC.h
@@ -43,13 +43,13 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
ArchDefinePwr7 = 1 << 11,
ArchDefinePwr8 = 1 << 12,
ArchDefinePwr9 = 1 << 13,
- ArchDefineFuture = 1 << 14,
- ArchDefineA2 = 1 << 15,
- ArchDefineA2q = 1 << 16,
- ArchDefineE500 = 1 << 17
+ ArchDefinePwr10 = 1 << 14,
+ ArchDefineFuture = 1 << 15,
+ ArchDefineA2 = 1 << 16,
+ ArchDefineA2q = 1 << 17,
+ ArchDefineE500 = 1 << 18
} ArchDefineTypes;
-
ArchDefineTypes ArchDefs = ArchDefineNone;
static const Builtin::Info BuiltinInfo[];
static const char *const GCCRegNames[];
@@ -69,6 +69,8 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
bool HasExtDiv = false;
bool HasP9Vector = false;
bool HasSPE = false;
+ bool HasP10Vector = false;
+ bool HasPCRelativeMemops = false;
protected:
std::string ABI;
@@ -119,20 +121,20 @@ public:
.Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
.Cases("power3", "pwr3", ArchDefinePpcgr)
.Cases("power4", "pwr4",
- ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("power5", "pwr5",
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
.Cases("power5x", "pwr5x",
- ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("power6", "pwr6",
- ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
- ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("power6x", "pwr6x",
- ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
+ ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
.Cases("power7", "pwr7",
ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
@@ -146,11 +148,16 @@ public:
ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Cases("power10", "pwr10",
+ ArchDefinePwr10 | ArchDefinePwr9 | ArchDefinePwr8 |
+ ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
.Case("future",
- ArchDefineFuture | ArchDefinePwr9 | ArchDefinePwr8 |
- ArchDefinePwr7 | ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
+ ArchDefineFuture | ArchDefinePwr10 | ArchDefinePwr9 |
+ ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
.Cases("8548", "e500", ArchDefineE500)
.Default(ArchDefineNone);
}
@@ -171,6 +178,7 @@ public:
StringRef CPU,
const std::vector<std::string> &FeaturesVec) const override;
+ void addP10SpecificFeatures(llvm::StringMap<bool> &Features) const;
void addFutureSpecificFeatures(llvm::StringMap<bool> &Features) const;
bool handleTargetFeatures(std::vector<std::string> &Features,
@@ -276,11 +284,12 @@ public:
break;
case 'Q': // Memory operand that is an offset from a register (it is
// usually better to use `m' or `es' in asm statements)
+ Info.setAllowsRegister();
+ LLVM_FALLTHROUGH;
case 'Z': // Memory operand that is an indexed or indirect from a
// register (it is usually better to use `m' or `es' in
// asm statements)
Info.setAllowsMemory();
- Info.setAllowsRegister();
break;
case 'R': // AIX TOC entry
case 'a': // Address operand that is an indexed or indirect from a
@@ -332,13 +341,22 @@ public:
: "u9__ieee128";
}
const char *getFloat128Mangling() const override { return "u9__ieee128"; }
+
+ bool hasExtIntType() const override { return true; }
+
+ bool isSPRegName(StringRef RegName) const override {
+ return RegName.equals("r1") || RegName.equals("x1");
+ }
};
class LLVM_LIBRARY_VISIBILITY PPC32TargetInfo : public PPCTargetInfo {
public:
PPC32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: PPCTargetInfo(Triple, Opts) {
- resetDataLayout("E-m:e-p:32:32-i64:64-n32");
+ if (Triple.isOSAIX())
+ resetDataLayout("E-m:a-p:32:32-i64:64-n32");
+ else
+ resetDataLayout("E-m:e-p:32:32-i64:64-n32");
switch (getTriple().getOS()) {
case llvm::Triple::Linux:
@@ -384,7 +402,11 @@ public:
IntMaxType = SignedLong;
Int64Type = SignedLong;
- if ((Triple.getArch() == llvm::Triple::ppc64le)) {
+ if (Triple.isOSAIX()) {
+ // TODO: Set appropriate ABI for AIX platform.
+ resetDataLayout("E-m:a-i64:64-n32:64");
+ SuitableAlign = 64;
+ } else if ((Triple.getArch() == llvm::Triple::ppc64le)) {
resetDataLayout("e-m:e-i64:64-n32:64");
ABI = "elfv2";
} else {
@@ -392,9 +414,6 @@ public:
ABI = "elfv1";
}
- if (Triple.getOS() == llvm::Triple::AIX)
- SuitableAlign = 64;
-
if (Triple.isOSFreeBSD() || Triple.getOS() == llvm::Triple::AIX ||
Triple.isMusl()) {
LongDoubleWidth = LongDoubleAlign = 64;
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index ab8272c034fd..522776437cd2 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -125,6 +125,9 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasC)
Builder.defineMacro("__riscv_compressed");
+
+ if (HasB)
+ Builder.defineMacro("__riscv_bitmanip");
}
/// Return true if has this feature, need to sync with handleTargetFeatures.
@@ -139,6 +142,7 @@ bool RISCVTargetInfo::hasFeature(StringRef Feature) const {
.Case("f", HasF)
.Case("d", HasD)
.Case("c", HasC)
+ .Case("experimental-b", HasB)
.Default(false);
}
@@ -156,6 +160,8 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasD = true;
else if (Feature == "+c")
HasC = true;
+ else if (Feature == "+experimental-b")
+ HasB = true;
}
return true;
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index 9118494a87ab..73652b409e9c 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -30,11 +30,12 @@ protected:
bool HasF;
bool HasD;
bool HasC;
+ bool HasB;
public:
RISCVTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple), HasM(false), HasA(false), HasF(false),
- HasD(false), HasC(false) {
+ HasD(false), HasC(false), HasB(false) {
LongDoubleWidth = 128;
LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
@@ -75,6 +76,8 @@ public:
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
+
+ bool hasExtIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo {
public:
diff --git a/clang/lib/Basic/Targets/SPIR.cpp b/clang/lib/Basic/Targets/SPIR.cpp
index a9b815d13bc1..9b7aab85314a 100644
--- a/clang/lib/Basic/Targets/SPIR.cpp
+++ b/clang/lib/Basic/Targets/SPIR.cpp
@@ -23,10 +23,12 @@ void SPIRTargetInfo::getTargetDefines(const LangOptions &Opts,
void SPIR32TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ SPIRTargetInfo::getTargetDefines(Opts, Builder);
DefineStd(Builder, "SPIR32", Opts);
}
void SPIR64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ SPIRTargetInfo::getTargetDefines(Opts, Builder);
DefineStd(Builder, "SPIR64", Opts);
}
diff --git a/clang/lib/Basic/Targets/SPIR.h b/clang/lib/Basic/Targets/SPIR.h
index 279d1866a428..f625d4980e29 100644
--- a/clang/lib/Basic/Targets/SPIR.h
+++ b/clang/lib/Basic/Targets/SPIR.h
@@ -100,6 +100,8 @@ public:
// for SPIR since it is a generic target.
getSupportedOpenCLOpts().supportAll();
}
+
+ bool hasExtIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY SPIR32TargetInfo : public SPIRTargetInfo {
public:
diff --git a/clang/lib/Basic/Targets/Sparc.h b/clang/lib/Basic/Targets/Sparc.h
index 1f799565e99b..d24cf15d7cd6 100644
--- a/clang/lib/Basic/Targets/Sparc.h
+++ b/clang/lib/Basic/Targets/Sparc.h
@@ -176,6 +176,7 @@ public:
MacroBuilder &Builder) const override;
bool hasSjLjLowering() const override { return true; }
+ bool hasExtIntType() const override { return true; }
};
// SPARCV8el is the 32-bit little-endian mode selected by Triple::sparcel.
@@ -227,6 +228,8 @@ public:
return false;
return getCPUGeneration(CPU) == CG_V9;
}
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index e751806f4747..d7869e3754a8 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -29,11 +29,12 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
int ISARevision;
bool HasTransactionalExecution;
bool HasVector;
+ bool SoftFloat;
public:
SystemZTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple), CPU("z10"), ISARevision(8),
- HasTransactionalExecution(false), HasVector(false) {
+ HasTransactionalExecution(false), HasVector(false), SoftFloat(false) {
IntMaxType = SignedLong;
Int64Type = SignedLong;
TLSSupported = true;
@@ -47,6 +48,7 @@ public:
MinGlobalAlign = 16;
resetDataLayout("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64");
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ HasStrictFP = true;
}
void getTargetDefines(const LangOptions &Opts,
@@ -63,6 +65,10 @@ public:
ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
+ bool isSPRegName(StringRef RegName) const override {
+ return RegName.equals("r15");
+ }
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
@@ -109,12 +115,17 @@ public:
DiagnosticsEngine &Diags) override {
HasTransactionalExecution = false;
HasVector = false;
+ SoftFloat = false;
for (const auto &Feature : Features) {
if (Feature == "+transactional-execution")
HasTransactionalExecution = true;
else if (Feature == "+vector")
HasVector = true;
+ else if (Feature == "+soft-float")
+ SoftFloat = true;
}
+ HasVector &= !SoftFloat;
+
// If we use the vector ABI, vector types are 64-bit aligned.
if (HasVector) {
MaxVectorAlign = 64;
@@ -144,6 +155,8 @@ public:
}
const char *getLongDoubleMangling() const override { return "g"; }
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/clang/lib/Basic/Targets/VE.cpp b/clang/lib/Basic/Targets/VE.cpp
new file mode 100644
index 000000000000..22223654e8ad
--- /dev/null
+++ b/clang/lib/Basic/Targets/VE.cpp
@@ -0,0 +1,39 @@
+//===--- VE.cpp - Implement VE target feature support ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements VE TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "VE.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetBuiltins.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+void VETargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("_LP64", "1");
+ Builder.defineMacro("unix", "1");
+ Builder.defineMacro("__unix__", "1");
+ Builder.defineMacro("__linux__", "1");
+ Builder.defineMacro("__ve", "1");
+ Builder.defineMacro("__ve__", "1");
+ Builder.defineMacro("__STDC_HOSTED__", "1");
+ Builder.defineMacro("__STDC__", "1");
+ Builder.defineMacro("__NEC__", "1");
+ // FIXME: define __FAST_MATH__ 1 if -ffast-math is enabled
+ // FIXME: define __OPTIMIZE__ n if -On is enabled
+ // FIXME: define __VECTOR__ n 1 if automatic vectorization is enabled
+}
+
+ArrayRef<Builtin::Info> VETargetInfo::getTargetBuiltins() const {
+ return ArrayRef<Builtin::Info>();
+}
diff --git a/clang/lib/Basic/Targets/VE.h b/clang/lib/Basic/Targets/VE.h
new file mode 100644
index 000000000000..f863a0af0acb
--- /dev/null
+++ b/clang/lib/Basic/Targets/VE.h
@@ -0,0 +1,170 @@
+//===--- VE.h - Declare VE target feature support ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares VE TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_VE_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_VE_H
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace targets {
+
+class LLVM_LIBRARY_VISIBILITY VETargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+
+public:
+ VETargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ : TargetInfo(Triple) {
+ NoAsmVariants = true;
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ DoubleAlign = LongLongAlign = 64;
+ SuitableAlign = 64;
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntPtrType = SignedLong;
+ IntMaxType = SignedLong;
+ Int64Type = SignedLong;
+ RegParmMax = 8;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+
+ WCharType = UnsignedInt;
+ WIntType = UnsignedInt;
+ UseZeroLengthBitfieldAlignment = true;
+ resetDataLayout("e-m:e-i64:64-n32:64-S128");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ bool hasSjLjLowering() const override {
+ // TODO
+ return false;
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ return CCCR_OK;
+ }
+ }
+
+ const char *getClobbers() const override { return ""; }
+
+ ArrayRef<const char *> getGCCRegNames() const override {
+ static const char *const GCCRegNames[] = {
+ // Regular registers
+ "sx0", "sx1", "sx2", "sx3", "sx4", "sx5", "sx6", "sx7",
+ "sx8", "sx9", "sx10", "sx11", "sx12", "sx13", "sx14", "sx15",
+ "sx16", "sx17", "sx18", "sx19", "sx20", "sx21", "sx22", "sx23",
+ "sx24", "sx25", "sx26", "sx27", "sx28", "sx29", "sx30", "sx31",
+ "sx32", "sx33", "sx34", "sx35", "sx36", "sx37", "sx38", "sx39",
+ "sx40", "sx41", "sx42", "sx43", "sx44", "sx45", "sx46", "sx47",
+ "sx48", "sx49", "sx50", "sx51", "sx52", "sx53", "sx54", "sx55",
+ "sx56", "sx57", "sx58", "sx59", "sx60", "sx61", "sx62", "sx63",
+ };
+ return llvm::makeArrayRef(GCCRegNames);
+ }
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ {{"s0"}, "sx0"},
+ {{"s1"}, "sx1"},
+ {{"s2"}, "sx2"},
+ {{"s3"}, "sx3"},
+ {{"s4"}, "sx4"},
+ {{"s5"}, "sx5"},
+ {{"s6"}, "sx6"},
+ {{"s7"}, "sx7"},
+ {{"s8", "sl"}, "sx8"},
+ {{"s9", "fp"}, "sx9"},
+ {{"s10", "lr"}, "sx10"},
+ {{"s11", "sp"}, "sx11"},
+ {{"s12", "outer"}, "sx12"},
+ {{"s13"}, "sx13"},
+ {{"s14", "tp"}, "sx14"},
+ {{"s15", "got"}, "sx15"},
+ {{"s16", "plt"}, "sx16"},
+ {{"s17", "info"}, "sx17"},
+ {{"s18"}, "sx18"},
+ {{"s19"}, "sx19"},
+ {{"s20"}, "sx20"},
+ {{"s21"}, "sx21"},
+ {{"s22"}, "sx22"},
+ {{"s23"}, "sx23"},
+ {{"s24"}, "sx24"},
+ {{"s25"}, "sx25"},
+ {{"s26"}, "sx26"},
+ {{"s27"}, "sx27"},
+ {{"s28"}, "sx28"},
+ {{"s29"}, "sx29"},
+ {{"s30"}, "sx30"},
+ {{"s31"}, "sx31"},
+ {{"s32"}, "sx32"},
+ {{"s33"}, "sx33"},
+ {{"s34"}, "sx34"},
+ {{"s35"}, "sx35"},
+ {{"s36"}, "sx36"},
+ {{"s37"}, "sx37"},
+ {{"s38"}, "sx38"},
+ {{"s39"}, "sx39"},
+ {{"s40"}, "sx40"},
+ {{"s41"}, "sx41"},
+ {{"s42"}, "sx42"},
+ {{"s43"}, "sx43"},
+ {{"s44"}, "sx44"},
+ {{"s45"}, "sx45"},
+ {{"s46"}, "sx46"},
+ {{"s47"}, "sx47"},
+ {{"s48"}, "sx48"},
+ {{"s49"}, "sx49"},
+ {{"s50"}, "sx50"},
+ {{"s51"}, "sx51"},
+ {{"s52"}, "sx52"},
+ {{"s53"}, "sx53"},
+ {{"s54"}, "sx54"},
+ {{"s55"}, "sx55"},
+ {{"s56"}, "sx56"},
+ {{"s57"}, "sx57"},
+ {{"s58"}, "sx58"},
+ {{"s59"}, "sx59"},
+ {{"s60"}, "sx60"},
+ {{"s61"}, "sx61"},
+ {{"s62"}, "sx62"},
+ {{"s63"}, "sx63"},
+ };
+ return llvm::makeArrayRef(GCCRegAliases);
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return false;
+ }
+
+ bool allowsLargerPreferedTypeAlignment() const override { return false; }
+};
+} // namespace targets
+} // namespace clang
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_VE_H
diff --git a/clang/lib/Basic/Targets/WebAssembly.cpp b/clang/lib/Basic/Targets/WebAssembly.cpp
index b16442b99b62..6746768090f5 100644
--- a/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -33,6 +33,16 @@ const Builtin::Info WebAssemblyTargetInfo::BuiltinInfo[] = {
static constexpr llvm::StringLiteral ValidCPUNames[] = {
{"mvp"}, {"bleeding-edge"}, {"generic"}};
+StringRef WebAssemblyTargetInfo::getABI() const { return ABI; }
+
+bool WebAssemblyTargetInfo::setABI(const std::string &Name) {
+ if (Name != "mvp" && Name != "experimental-mv")
+ return false;
+
+ ABI = Name;
+ return true;
+}
+
bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("simd128", SIMDLevel >= SIMD128)
@@ -45,6 +55,7 @@ bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
.Case("mutable-globals", HasMutableGlobals)
.Case("multivalue", HasMultivalue)
.Case("tail-call", HasTailCall)
+ .Case("reference-types", HasReferenceTypes)
.Default(false);
}
@@ -80,6 +91,8 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__wasm_multivalue__");
if (HasTailCall)
Builder.defineMacro("__wasm_tail_call__");
+ if (HasReferenceTypes)
+ Builder.defineMacro("__wasm_reference_types__");
}
void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
@@ -102,8 +115,10 @@ bool WebAssemblyTargetInfo::initFeatureMap(
if (CPU == "bleeding-edge") {
Features["nontrapping-fptoint"] = true;
Features["sign-ext"] = true;
+ Features["bulk-memory"] = true;
Features["atomics"] = true;
Features["mutable-globals"] = true;
+ Features["tail-call"] = true;
setSIMDLevel(Features, SIMD128);
}
// Other targets do not consider user-configured features here, but while we
@@ -126,6 +141,8 @@ bool WebAssemblyTargetInfo::initFeatureMap(
Features["multivalue"] = true;
if (HasTailCall)
Features["tail-call"] = true;
+ if (HasReferenceTypes)
+ Features["reference-types"] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -213,6 +230,14 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasTailCall = false;
continue;
}
+ if (Feature == "+reference-types") {
+ HasReferenceTypes = true;
+ continue;
+ }
+ if (Feature == "-reference-types") {
+ HasReferenceTypes = false;
+ continue;
+ }
Diags.Report(diag::err_opt_not_valid_with_opt)
<< Feature << "-target-feature";
diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h
index 9665156b143f..77a2fe9ae117 100644
--- a/clang/lib/Basic/Targets/WebAssembly.h
+++ b/clang/lib/Basic/Targets/WebAssembly.h
@@ -38,6 +38,9 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
bool HasMutableGlobals = false;
bool HasMultivalue = false;
bool HasTailCall = false;
+ bool HasReferenceTypes = false;
+
+ std::string ABI;
public:
explicit WebAssemblyTargetInfo(const llvm::Triple &T, const TargetOptions &)
@@ -58,6 +61,9 @@ public:
IntPtrType = SignedLong;
}
+ StringRef getABI() const override;
+ bool setABI(const std::string &Name) override;
+
protected:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -114,7 +120,22 @@ private:
? (IsSigned ? SignedLongLong : UnsignedLongLong)
: TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned);
}
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ case CC_C:
+ case CC_Swift:
+ return CCCR_OK;
+ default:
+ return CCCR_Warning;
+ }
+ }
+
+ bool hasExtIntType() const override { return true; }
+
+ bool hasProtectedVisibility() const override { return false; }
};
+
class LLVM_LIBRARY_VISIBILITY WebAssembly32TargetInfo
: public WebAssemblyTargetInfo {
public:
diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp
index d099d3742f0b..543f232d2459 100644
--- a/clang/lib/Basic/Targets/X86.cpp
+++ b/clang/lib/Basic/Targets/X86.cpp
@@ -17,7 +17,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/X86TargetParser.h"
namespace clang {
namespace targets {
@@ -62,6 +62,7 @@ static const char *const GCCRegNames[] = {
"cr0", "cr2", "cr3", "cr4", "cr8",
"dr0", "dr1", "dr2", "dr3", "dr6", "dr7",
"bnd0", "bnd1", "bnd2", "bnd3",
+ "tmm0", "tmm1", "tmm2", "tmm3", "tmm4", "tmm5", "tmm6", "tmm7",
};
const TargetInfo::AddlRegName AddlRegNames[] = {
@@ -107,339 +108,15 @@ bool X86TargetInfo::initFeatureMap(
// FIXME: This *really* should not be here.
// X86_64 always has SSE2.
if (getTriple().getArch() == llvm::Triple::x86_64)
- setFeatureEnabledImpl(Features, "sse2", true);
+ setFeatureEnabled(Features, "sse2", true);
- const CPUKind Kind = getCPUKind(CPU);
+ using namespace llvm::X86;
- // Enable X87 for all X86 processors but Lakemont.
- if (Kind != CK_Lakemont)
- setFeatureEnabledImpl(Features, "x87", true);
+ SmallVector<StringRef, 16> CPUFeatures;
+ getFeaturesForCPU(CPU, CPUFeatures);
+ for (auto &F : CPUFeatures)
+ setFeatureEnabled(Features, F, true);
- // Enable cmpxchg8 for i586 and greater CPUs. Include generic for backwards
- // compatibility.
- if (Kind >= CK_i586 || Kind == CK_Generic)
- setFeatureEnabledImpl(Features, "cx8", true);
-
- switch (Kind) {
- case CK_Generic:
- case CK_i386:
- case CK_i486:
- case CK_i586:
- case CK_Pentium:
- case CK_PentiumPro:
- case CK_i686:
- case CK_Lakemont:
- break;
-
- case CK_Cooperlake:
- // CPX inherits all CLX features plus AVX512BF16
- setFeatureEnabledImpl(Features, "avx512bf16", true);
- LLVM_FALLTHROUGH;
- case CK_Cascadelake:
- // CLX inherits all SKX features plus AVX512VNNI
- setFeatureEnabledImpl(Features, "avx512vnni", true);
- LLVM_FALLTHROUGH;
- case CK_SkylakeServer:
- setFeatureEnabledImpl(Features, "avx512f", true);
- setFeatureEnabledImpl(Features, "avx512cd", true);
- setFeatureEnabledImpl(Features, "avx512dq", true);
- setFeatureEnabledImpl(Features, "avx512bw", true);
- setFeatureEnabledImpl(Features, "avx512vl", true);
- setFeatureEnabledImpl(Features, "clwb", true);
- setFeatureEnabledImpl(Features, "pku", true);
- // SkylakeServer cores inherits all SKL features, except SGX
- goto SkylakeCommon;
-
- case CK_Tigerlake:
- setFeatureEnabledImpl(Features, "avx512vp2intersect", true);
- setFeatureEnabledImpl(Features, "movdiri", true);
- setFeatureEnabledImpl(Features, "movdir64b", true);
- setFeatureEnabledImpl(Features, "shstk", true);
- // Tigerlake cores inherits IcelakeClient, except pconfig and wbnoinvd
- goto IcelakeCommon;
-
- case CK_IcelakeServer:
- setFeatureEnabledImpl(Features, "pconfig", true);
- setFeatureEnabledImpl(Features, "wbnoinvd", true);
- LLVM_FALLTHROUGH;
- case CK_IcelakeClient:
-IcelakeCommon:
- setFeatureEnabledImpl(Features, "vaes", true);
- setFeatureEnabledImpl(Features, "gfni", true);
- setFeatureEnabledImpl(Features, "vpclmulqdq", true);
- setFeatureEnabledImpl(Features, "avx512bitalg", true);
- setFeatureEnabledImpl(Features, "avx512vbmi2", true);
- setFeatureEnabledImpl(Features, "avx512vnni", true);
- setFeatureEnabledImpl(Features, "avx512vpopcntdq", true);
- setFeatureEnabledImpl(Features, "rdpid", true);
- setFeatureEnabledImpl(Features, "clwb", true);
- LLVM_FALLTHROUGH;
- case CK_Cannonlake:
- setFeatureEnabledImpl(Features, "avx512f", true);
- setFeatureEnabledImpl(Features, "avx512cd", true);
- setFeatureEnabledImpl(Features, "avx512dq", true);
- setFeatureEnabledImpl(Features, "avx512bw", true);
- setFeatureEnabledImpl(Features, "avx512vl", true);
- setFeatureEnabledImpl(Features, "avx512ifma", true);
- setFeatureEnabledImpl(Features, "avx512vbmi", true);
- setFeatureEnabledImpl(Features, "pku", true);
- setFeatureEnabledImpl(Features, "sha", true);
- LLVM_FALLTHROUGH;
- case CK_SkylakeClient:
- setFeatureEnabledImpl(Features, "sgx", true);
- // SkylakeServer cores inherits all SKL features, except SGX
-SkylakeCommon:
- setFeatureEnabledImpl(Features, "xsavec", true);
- setFeatureEnabledImpl(Features, "xsaves", true);
- setFeatureEnabledImpl(Features, "clflushopt", true);
- setFeatureEnabledImpl(Features, "aes", true);
- LLVM_FALLTHROUGH;
- case CK_Broadwell:
- setFeatureEnabledImpl(Features, "rdseed", true);
- setFeatureEnabledImpl(Features, "adx", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- LLVM_FALLTHROUGH;
- case CK_Haswell:
- setFeatureEnabledImpl(Features, "avx2", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "fma", true);
- setFeatureEnabledImpl(Features, "invpcid", true);
- setFeatureEnabledImpl(Features, "movbe", true);
- LLVM_FALLTHROUGH;
- case CK_IvyBridge:
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- LLVM_FALLTHROUGH;
- case CK_SandyBridge:
- setFeatureEnabledImpl(Features, "avx", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- LLVM_FALLTHROUGH;
- case CK_Westmere:
- setFeatureEnabledImpl(Features, "pclmul", true);
- LLVM_FALLTHROUGH;
- case CK_Nehalem:
- setFeatureEnabledImpl(Features, "sse4.2", true);
- LLVM_FALLTHROUGH;
- case CK_Penryn:
- setFeatureEnabledImpl(Features, "sse4.1", true);
- LLVM_FALLTHROUGH;
- case CK_Core2:
- setFeatureEnabledImpl(Features, "ssse3", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- LLVM_FALLTHROUGH;
- case CK_Nocona:
- setFeatureEnabledImpl(Features, "cx16", true);
- LLVM_FALLTHROUGH;
- case CK_Yonah:
- case CK_Prescott:
- setFeatureEnabledImpl(Features, "sse3", true);
- LLVM_FALLTHROUGH;
- case CK_PentiumM:
- case CK_Pentium4:
- case CK_x86_64:
- setFeatureEnabledImpl(Features, "sse2", true);
- LLVM_FALLTHROUGH;
- case CK_Pentium3:
- case CK_C3_2:
- setFeatureEnabledImpl(Features, "sse", true);
- LLVM_FALLTHROUGH;
- case CK_Pentium2:
- setFeatureEnabledImpl(Features, "fxsr", true);
- LLVM_FALLTHROUGH;
- case CK_PentiumMMX:
- case CK_K6:
- case CK_WinChipC6:
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
-
- case CK_Tremont:
- setFeatureEnabledImpl(Features, "cldemote", true);
- setFeatureEnabledImpl(Features, "movdiri", true);
- setFeatureEnabledImpl(Features, "movdir64b", true);
- setFeatureEnabledImpl(Features, "gfni", true);
- setFeatureEnabledImpl(Features, "waitpkg", true);
- LLVM_FALLTHROUGH;
- case CK_GoldmontPlus:
- setFeatureEnabledImpl(Features, "ptwrite", true);
- setFeatureEnabledImpl(Features, "rdpid", true);
- setFeatureEnabledImpl(Features, "sgx", true);
- LLVM_FALLTHROUGH;
- case CK_Goldmont:
- setFeatureEnabledImpl(Features, "sha", true);
- setFeatureEnabledImpl(Features, "rdseed", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- setFeatureEnabledImpl(Features, "xsavec", true);
- setFeatureEnabledImpl(Features, "xsaves", true);
- setFeatureEnabledImpl(Features, "clflushopt", true);
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- setFeatureEnabledImpl(Features, "aes", true);
- LLVM_FALLTHROUGH;
- case CK_Silvermont:
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "sse4.2", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- LLVM_FALLTHROUGH;
- case CK_Bonnell:
- setFeatureEnabledImpl(Features, "movbe", true);
- setFeatureEnabledImpl(Features, "ssse3", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
-
- case CK_KNM:
- // TODO: Add avx5124fmaps/avx5124vnniw.
- setFeatureEnabledImpl(Features, "avx512vpopcntdq", true);
- LLVM_FALLTHROUGH;
- case CK_KNL:
- setFeatureEnabledImpl(Features, "avx512f", true);
- setFeatureEnabledImpl(Features, "avx512cd", true);
- setFeatureEnabledImpl(Features, "avx512er", true);
- setFeatureEnabledImpl(Features, "avx512pf", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- setFeatureEnabledImpl(Features, "prefetchwt1", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "rdseed", true);
- setFeatureEnabledImpl(Features, "adx", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "fma", true);
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "movbe", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
-
- case CK_K6_2:
- case CK_K6_3:
- case CK_WinChip2:
- case CK_C3:
- setFeatureEnabledImpl(Features, "3dnow", true);
- break;
-
- case CK_AMDFAM10:
- setFeatureEnabledImpl(Features, "sse4a", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "popcnt", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- LLVM_FALLTHROUGH;
- case CK_K8SSE3:
- setFeatureEnabledImpl(Features, "sse3", true);
- LLVM_FALLTHROUGH;
- case CK_K8:
- setFeatureEnabledImpl(Features, "sse2", true);
- LLVM_FALLTHROUGH;
- case CK_AthlonXP:
- setFeatureEnabledImpl(Features, "sse", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- LLVM_FALLTHROUGH;
- case CK_Athlon:
- case CK_Geode:
- setFeatureEnabledImpl(Features, "3dnowa", true);
- break;
-
- case CK_BTVER2:
- setFeatureEnabledImpl(Features, "avx", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- setFeatureEnabledImpl(Features, "movbe", true);
- LLVM_FALLTHROUGH;
- case CK_BTVER1:
- setFeatureEnabledImpl(Features, "ssse3", true);
- setFeatureEnabledImpl(Features, "sse4a", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "popcnt", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
-
- case CK_ZNVER2:
- setFeatureEnabledImpl(Features, "clwb", true);
- setFeatureEnabledImpl(Features, "rdpid", true);
- setFeatureEnabledImpl(Features, "wbnoinvd", true);
- LLVM_FALLTHROUGH;
- case CK_ZNVER1:
- setFeatureEnabledImpl(Features, "adx", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "avx2", true);
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "clflushopt", true);
- setFeatureEnabledImpl(Features, "clzero", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "fma", true);
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- setFeatureEnabledImpl(Features, "mwaitx", true);
- setFeatureEnabledImpl(Features, "movbe", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "popcnt", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "rdseed", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "sha", true);
- setFeatureEnabledImpl(Features, "sse4a", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "xsavec", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- setFeatureEnabledImpl(Features, "xsaves", true);
- break;
-
- case CK_BDVER4:
- setFeatureEnabledImpl(Features, "avx2", true);
- setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "mwaitx", true);
- LLVM_FALLTHROUGH;
- case CK_BDVER3:
- setFeatureEnabledImpl(Features, "fsgsbase", true);
- setFeatureEnabledImpl(Features, "xsaveopt", true);
- LLVM_FALLTHROUGH;
- case CK_BDVER2:
- setFeatureEnabledImpl(Features, "bmi", true);
- setFeatureEnabledImpl(Features, "fma", true);
- setFeatureEnabledImpl(Features, "f16c", true);
- setFeatureEnabledImpl(Features, "tbm", true);
- LLVM_FALLTHROUGH;
- case CK_BDVER1:
- // xop implies avx, sse4a and fma4.
- setFeatureEnabledImpl(Features, "xop", true);
- setFeatureEnabledImpl(Features, "lwp", true);
- setFeatureEnabledImpl(Features, "lzcnt", true);
- setFeatureEnabledImpl(Features, "aes", true);
- setFeatureEnabledImpl(Features, "pclmul", true);
- setFeatureEnabledImpl(Features, "prfchw", true);
- setFeatureEnabledImpl(Features, "cx16", true);
- setFeatureEnabledImpl(Features, "fxsr", true);
- setFeatureEnabledImpl(Features, "xsave", true);
- setFeatureEnabledImpl(Features, "sahf", true);
- setFeatureEnabledImpl(Features, "mmx", true);
- break;
- }
if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec))
return false;
@@ -452,12 +129,6 @@ SkylakeCommon:
llvm::find(FeaturesVec, "-popcnt") == FeaturesVec.end())
Features["popcnt"] = true;
- // Enable prfchw if 3DNow! is enabled and prfchw is not explicitly disabled.
- I = Features.find("3dnow");
- if (I != Features.end() && I->getValue() &&
- llvm::find(FeaturesVec, "-prfchw") == FeaturesVec.end())
- Features["prfchw"] = true;
-
// Additionally, if SSE is enabled and mmx is not explicitly disabled,
// then enable MMX.
I = Features.find("sse");
@@ -465,264 +136,34 @@ SkylakeCommon:
llvm::find(FeaturesVec, "-mmx") == FeaturesVec.end())
Features["mmx"] = true;
- return true;
-}
-
-void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
- X86SSEEnum Level, bool Enabled) {
- if (Enabled) {
- switch (Level) {
- case AVX512F:
- Features["avx512f"] = true;
- Features["fma"] = true;
- Features["f16c"] = true;
- LLVM_FALLTHROUGH;
- case AVX2:
- Features["avx2"] = true;
- LLVM_FALLTHROUGH;
- case AVX:
- Features["avx"] = true;
- Features["xsave"] = true;
- LLVM_FALLTHROUGH;
- case SSE42:
- Features["sse4.2"] = true;
- LLVM_FALLTHROUGH;
- case SSE41:
- Features["sse4.1"] = true;
- LLVM_FALLTHROUGH;
- case SSSE3:
- Features["ssse3"] = true;
- LLVM_FALLTHROUGH;
- case SSE3:
- Features["sse3"] = true;
- LLVM_FALLTHROUGH;
- case SSE2:
- Features["sse2"] = true;
- LLVM_FALLTHROUGH;
- case SSE1:
- Features["sse"] = true;
- LLVM_FALLTHROUGH;
- case NoSSE:
- break;
- }
- return;
- }
-
- switch (Level) {
- case NoSSE:
- case SSE1:
- Features["sse"] = false;
- LLVM_FALLTHROUGH;
- case SSE2:
- Features["sse2"] = Features["pclmul"] = Features["aes"] = false;
- Features["sha"] = Features["gfni"] = false;
- LLVM_FALLTHROUGH;
- case SSE3:
- Features["sse3"] = false;
- setXOPLevel(Features, NoXOP, false);
- LLVM_FALLTHROUGH;
- case SSSE3:
- Features["ssse3"] = false;
- LLVM_FALLTHROUGH;
- case SSE41:
- Features["sse4.1"] = false;
- LLVM_FALLTHROUGH;
- case SSE42:
- Features["sse4.2"] = false;
- LLVM_FALLTHROUGH;
- case AVX:
- Features["fma"] = Features["avx"] = Features["f16c"] = false;
- Features["xsave"] = Features["xsaveopt"] = Features["vaes"] = false;
- Features["vpclmulqdq"] = false;
- setXOPLevel(Features, FMA4, false);
- LLVM_FALLTHROUGH;
- case AVX2:
- Features["avx2"] = false;
- LLVM_FALLTHROUGH;
- case AVX512F:
- Features["avx512f"] = Features["avx512cd"] = Features["avx512er"] = false;
- Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] = false;
- Features["avx512vl"] = Features["avx512vbmi"] = false;
- Features["avx512ifma"] = Features["avx512vpopcntdq"] = false;
- Features["avx512bitalg"] = Features["avx512vnni"] = false;
- Features["avx512vbmi2"] = Features["avx512bf16"] = false;
- Features["avx512vp2intersect"] = false;
- break;
- }
-}
-
-void X86TargetInfo::setMMXLevel(llvm::StringMap<bool> &Features,
- MMX3DNowEnum Level, bool Enabled) {
- if (Enabled) {
- switch (Level) {
- case AMD3DNowAthlon:
- Features["3dnowa"] = true;
- LLVM_FALLTHROUGH;
- case AMD3DNow:
- Features["3dnow"] = true;
- LLVM_FALLTHROUGH;
- case MMX:
- Features["mmx"] = true;
- LLVM_FALLTHROUGH;
- case NoMMX3DNow:
- break;
- }
- return;
- }
-
- switch (Level) {
- case NoMMX3DNow:
- case MMX:
- Features["mmx"] = false;
- LLVM_FALLTHROUGH;
- case AMD3DNow:
- Features["3dnow"] = false;
- LLVM_FALLTHROUGH;
- case AMD3DNowAthlon:
- Features["3dnowa"] = false;
- break;
- }
-}
-
-void X86TargetInfo::setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level,
- bool Enabled) {
- if (Enabled) {
- switch (Level) {
- case XOP:
- Features["xop"] = true;
- LLVM_FALLTHROUGH;
- case FMA4:
- Features["fma4"] = true;
- setSSELevel(Features, AVX, true);
- LLVM_FALLTHROUGH;
- case SSE4A:
- Features["sse4a"] = true;
- setSSELevel(Features, SSE3, true);
- LLVM_FALLTHROUGH;
- case NoXOP:
- break;
- }
- return;
- }
+ // Enable xsave if avx is enabled and xsave is not explicitly disabled.
+ I = Features.find("avx");
+ if (I != Features.end() && I->getValue() &&
+ llvm::find(FeaturesVec, "-xsave") == FeaturesVec.end())
+ Features["xsave"] = true;
- switch (Level) {
- case NoXOP:
- case SSE4A:
- Features["sse4a"] = false;
- LLVM_FALLTHROUGH;
- case FMA4:
- Features["fma4"] = false;
- LLVM_FALLTHROUGH;
- case XOP:
- Features["xop"] = false;
- break;
- }
+ return true;
}
-void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
- StringRef Name, bool Enabled) {
- // This is a bit of a hack to deal with the sse4 target feature when used
- // as part of the target attribute. We handle sse4 correctly everywhere
- // else. See below for more information on how we handle the sse4 options.
- if (Name != "sse4")
- Features[Name] = Enabled;
-
- if (Name == "mmx") {
- setMMXLevel(Features, MMX, Enabled);
- } else if (Name == "sse") {
- setSSELevel(Features, SSE1, Enabled);
- } else if (Name == "sse2") {
- setSSELevel(Features, SSE2, Enabled);
- } else if (Name == "sse3") {
- setSSELevel(Features, SSE3, Enabled);
- } else if (Name == "ssse3") {
- setSSELevel(Features, SSSE3, Enabled);
- } else if (Name == "sse4.2") {
- setSSELevel(Features, SSE42, Enabled);
- } else if (Name == "sse4.1") {
- setSSELevel(Features, SSE41, Enabled);
- } else if (Name == "3dnow") {
- setMMXLevel(Features, AMD3DNow, Enabled);
- } else if (Name == "3dnowa") {
- setMMXLevel(Features, AMD3DNowAthlon, Enabled);
- } else if (Name == "aes") {
- if (Enabled)
- setSSELevel(Features, SSE2, Enabled);
- else
- Features["vaes"] = false;
- } else if (Name == "vaes") {
- if (Enabled) {
- setSSELevel(Features, AVX, Enabled);
- Features["aes"] = true;
- }
- } else if (Name == "pclmul") {
- if (Enabled)
- setSSELevel(Features, SSE2, Enabled);
- else
- Features["vpclmulqdq"] = false;
- } else if (Name == "vpclmulqdq") {
- if (Enabled) {
- setSSELevel(Features, AVX, Enabled);
- Features["pclmul"] = true;
- }
- } else if (Name == "gfni") {
- if (Enabled)
- setSSELevel(Features, SSE2, Enabled);
- } else if (Name == "avx") {
- setSSELevel(Features, AVX, Enabled);
- } else if (Name == "avx2") {
- setSSELevel(Features, AVX2, Enabled);
- } else if (Name == "avx512f") {
- setSSELevel(Features, AVX512F, Enabled);
- } else if (Name.startswith("avx512")) {
- if (Enabled)
- setSSELevel(Features, AVX512F, Enabled);
- // Enable BWI instruction if certain features are being enabled.
- if ((Name == "avx512vbmi" || Name == "avx512vbmi2" ||
- Name == "avx512bitalg" || Name == "avx512bf16") && Enabled)
- Features["avx512bw"] = true;
- // Also disable some features if BWI is being disabled.
- if (Name == "avx512bw" && !Enabled) {
- Features["avx512vbmi"] = false;
- Features["avx512vbmi2"] = false;
- Features["avx512bitalg"] = false;
- Features["avx512bf16"] = false;
- }
- } else if (Name == "fma") {
- if (Enabled)
- setSSELevel(Features, AVX, Enabled);
- else
- setSSELevel(Features, AVX512F, Enabled);
- } else if (Name == "fma4") {
- setXOPLevel(Features, FMA4, Enabled);
- } else if (Name == "xop") {
- setXOPLevel(Features, XOP, Enabled);
- } else if (Name == "sse4a") {
- setXOPLevel(Features, SSE4A, Enabled);
- } else if (Name == "f16c") {
- if (Enabled)
- setSSELevel(Features, AVX, Enabled);
- else
- setSSELevel(Features, AVX512F, Enabled);
- } else if (Name == "sha") {
- if (Enabled)
- setSSELevel(Features, SSE2, Enabled);
- } else if (Name == "sse4") {
+void X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled) const {
+ if (Name == "sse4") {
// We can get here via the __target__ attribute since that's not controlled
// via the -msse4/-mno-sse4 command line alias. Handle this the same way
// here - turn on the sse4.2 if enabled, turn off the sse4.1 level if
// disabled.
if (Enabled)
- setSSELevel(Features, SSE42, Enabled);
+ Name = "sse4.2";
else
- setSSELevel(Features, SSE41, Enabled);
- } else if (Name == "xsave") {
- if (!Enabled)
- Features["xsaveopt"] = false;
- } else if (Name == "xsaveopt" || Name == "xsavec" || Name == "xsaves") {
- if (Enabled)
- Features["xsave"] = true;
+ Name = "sse4.1";
}
+
+ Features[Name] = Enabled;
+
+ SmallVector<StringRef, 8> ImpliedFeatures;
+ llvm::X86::getImpliedFeatures(Name, Enabled, ImpliedFeatures);
+ for (const auto &F : ImpliedFeatures)
+ Features[F] = Enabled;
}
/// handleTargetFeatures - Perform initialization based on the user
@@ -857,6 +298,16 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasINVPCID = true;
} else if (Feature == "+enqcmd") {
HasENQCMD = true;
+ } else if (Feature == "+amx-bf16") {
+ HasAMXBF16 = true;
+ } else if (Feature == "+amx-int8") {
+ HasAMXINT8 = true;
+ } else if (Feature == "+amx-tile") {
+ HasAMXTILE = true;
+ } else if (Feature == "+serialize") {
+ HasSERIALIZE = true;
+ } else if (Feature == "+tsxldtrk") {
+ HasTSXLDTRK = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -911,7 +362,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
std::string CodeModel = getTargetOpts().CodeModel;
if (CodeModel == "default")
CodeModel = "small";
- Builder.defineMacro("__code_model_" + CodeModel + "_");
+ Builder.defineMacro("__code_model_" + CodeModel + "__");
// Target identification.
if (getTriple().getArch() == llvm::Triple::x86_64) {
@@ -935,8 +386,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
// Subtarget options.
// FIXME: We are hard-coding the tune parameters based on the CPU, but they
// truly should be based on -mtune options.
+ using namespace llvm::X86;
switch (CPU) {
- case CK_Generic:
+ case CK_None:
break;
case CK_i386:
// The rest are coming from the i386 define above.
@@ -1247,6 +699,16 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__INVPCID__");
if (HasENQCMD)
Builder.defineMacro("__ENQCMD__");
+ if (HasAMXTILE)
+ Builder.defineMacro("__AMXTILE__");
+ if (HasAMXINT8)
+ Builder.defineMacro("__AMXINT8__");
+ if (HasAMXBF16)
+ Builder.defineMacro("__AMXBF16__");
+ if (HasSERIALIZE)
+ Builder.defineMacro("__SERIALIZE__");
+ if (HasTSXLDTRK)
+ Builder.defineMacro("__TSXLDTRK__");
// Each case falls through to the previous one here.
switch (SSELevel) {
@@ -1319,7 +781,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
break;
}
- if (CPU >= CK_i486 || CPU == CK_Generic) {
+ if (CPU >= CK_i486 || CPU == CK_None) {
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
@@ -1339,6 +801,9 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("3dnowa", true)
.Case("adx", true)
.Case("aes", true)
+ .Case("amx-bf16", true)
+ .Case("amx-int8", true)
+ .Case("amx-tile", true)
.Case("avx", true)
.Case("avx2", true)
.Case("avx512f", true)
@@ -1390,6 +855,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("rdseed", true)
.Case("rtm", true)
.Case("sahf", true)
+ .Case("serialize", true)
.Case("sgx", true)
.Case("sha", true)
.Case("shstk", true)
@@ -1402,6 +868,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("sse4.2", true)
.Case("sse4a", true)
.Case("tbm", true)
+ .Case("tsxldtrk", true)
.Case("vaes", true)
.Case("vpclmulqdq", true)
.Case("wbnoinvd", true)
@@ -1419,6 +886,9 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("adx", HasADX)
.Case("aes", HasAES)
+ .Case("amx-bf16", HasAMXBF16)
+ .Case("amx-int8", HasAMXINT8)
+ .Case("amx-tile", HasAMXTILE)
.Case("avx", SSELevel >= AVX)
.Case("avx2", SSELevel >= AVX2)
.Case("avx512f", SSELevel >= AVX512F)
@@ -1474,6 +944,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("retpoline-external-thunk", HasRetpolineExternalThunk)
.Case("rtm", HasRTM)
.Case("sahf", HasLAHFSAHF)
+ .Case("serialize", HasSERIALIZE)
.Case("sgx", HasSGX)
.Case("sha", HasSHA)
.Case("shstk", HasSHSTK)
@@ -1485,6 +956,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("sse4.2", SSELevel >= SSE42)
.Case("sse4a", XOPLevel >= SSE4A)
.Case("tbm", HasTBM)
+ .Case("tsxldtrk", HasTSXLDTRK)
.Case("vaes", HasVAES)
.Case("vpclmulqdq", HasVPCLMULQDQ)
.Case("wbnoinvd", HasWBNOINVD)
@@ -1507,14 +979,14 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
// X86TargetInfo::hasFeature for a somewhat comprehensive list).
bool X86TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
return llvm::StringSwitch<bool>(FeatureStr)
-#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, true)
+#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, true)
#include "llvm/Support/X86TargetParser.def"
.Default(false);
}
static llvm::X86::ProcessorFeatures getFeature(StringRef Name) {
return llvm::StringSwitch<llvm::X86::ProcessorFeatures>(Name)
-#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, llvm::X86::ENUM)
+#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
#include "llvm/Support/X86TargetParser.def"
;
// Note, this function should only be used after ensuring the value is
@@ -1539,17 +1011,11 @@ static unsigned getFeaturePriority(llvm::X86::ProcessorFeatures Feat) {
unsigned X86TargetInfo::multiVersionSortPriority(StringRef Name) const {
// Valid CPUs have a 'key feature' that compares just better than its key
// feature.
- CPUKind Kind = getCPUKind(Name);
- if (Kind != CK_Generic) {
- switch (Kind) {
- default:
- llvm_unreachable(
- "CPU Type without a key feature used in 'target' attribute");
-#define PROC_WITH_FEAT(ENUM, STR, IS64, KEY_FEAT) \
- case CK_##ENUM: \
- return (getFeaturePriority(llvm::X86::KEY_FEAT) << 1) + 1;
-#include "clang/Basic/X86Target.def"
- }
+ using namespace llvm::X86;
+ CPUKind Kind = parseArchX86(Name);
+ if (Kind != CK_None) {
+ ProcessorFeatures KeyFeature = getKeyFeature(Kind);
+ return (getFeaturePriority(KeyFeature) << 1) + 1;
}
// Now we know we have a feature, so get its priority and shift it a few so
@@ -1596,10 +1062,9 @@ void X86TargetInfo::getCPUSpecificCPUDispatchFeatures(
bool X86TargetInfo::validateCpuIs(StringRef FeatureStr) const {
return llvm::StringSwitch<bool>(FeatureStr)
#define X86_VENDOR(ENUM, STRING) .Case(STRING, true)
-#define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \
- .Cases(STR, ALIAS, true)
-#define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) .Case(STR, true)
-#define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) .Case(STR, true)
+#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) .Case(ALIAS, true)
+#define X86_CPU_TYPE(ENUM, STR) .Case(STR, true)
+#define X86_CPU_SUBTYPE(ENUM, STR) .Case(STR, true)
#include "llvm/Support/X86TargetParser.def"
.Default(false);
}
@@ -1679,8 +1144,7 @@ bool X86TargetInfo::validateAsmConstraint(
switch (*Name) {
default:
return false;
- case 'z':
- case '0': // First SSE register.
+ case 'z': // First SSE register.
case '2':
case 't': // Any SSE register, when SSE2 is enabled.
case 'i': // Any SSE register, when SSE2 and inter-unit moves enabled.
@@ -1731,6 +1195,121 @@ bool X86TargetInfo::validateAsmConstraint(
}
}
+// Below is based on the following information:
+// +------------------------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
+// | Processor Name | Cache Line Size (Bytes) | Source |
+// +------------------------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
+// | i386 | 64 | https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf |
+// | i486 | 16 | "four doublewords" (doubleword = 32 bits, 4 bits * 32 bits = 16 bytes) https://en.wikichip.org/w/images/d/d3/i486_MICROPROCESSOR_HARDWARE_REFERENCE_MANUAL_%281990%29.pdf and http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.126.4216&rep=rep1&type=pdf (page 29) |
+// | i586/Pentium MMX | 32 | https://www.7-cpu.com/cpu/P-MMX.html |
+// | i686/Pentium | 32 | https://www.7-cpu.com/cpu/P6.html |
+// | Netburst/Pentium4 | 64 | https://www.7-cpu.com/cpu/P4-180.html |
+// | Atom | 64 | https://www.7-cpu.com/cpu/Atom.html |
+// | Westmere | 64 | https://en.wikichip.org/wiki/intel/microarchitectures/sandy_bridge_(client) "Cache Architecture" |
+// | Sandy Bridge | 64 | https://en.wikipedia.org/wiki/Sandy_Bridge and https://www.7-cpu.com/cpu/SandyBridge.html |
+// | Ivy Bridge | 64 | https://blog.stuffedcow.net/2013/01/ivb-cache-replacement/ and https://www.7-cpu.com/cpu/IvyBridge.html |
+// | Haswell | 64 | https://www.7-cpu.com/cpu/Haswell.html |
+// | Boadwell | 64 | https://www.7-cpu.com/cpu/Broadwell.html |
+// | Skylake (including skylake-avx512) | 64 | https://www.nas.nasa.gov/hecc/support/kb/skylake-processors_550.html "Cache Hierarchy" |
+// | Cascade Lake | 64 | https://www.nas.nasa.gov/hecc/support/kb/cascade-lake-processors_579.html "Cache Hierarchy" |
+// | Skylake | 64 | https://en.wikichip.org/wiki/intel/microarchitectures/kaby_lake "Memory Hierarchy" |
+// | Ice Lake | 64 | https://www.7-cpu.com/cpu/Ice_Lake.html |
+// | Knights Landing | 64 | https://software.intel.com/en-us/articles/intel-xeon-phi-processor-7200-family-memory-management-optimizations "The Intel® Xeon Phi™ Processor Architecture" |
+// | Knights Mill | 64 | https://software.intel.com/sites/default/files/managed/9e/bc/64-ia-32-architectures-optimization-manual.pdf?countrylabel=Colombia "2.5.5.2 L1 DCache " |
+// +------------------------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
+Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
+ using namespace llvm::X86;
+ switch (CPU) {
+ // i386
+ case CK_i386:
+ // i486
+ case CK_i486:
+ case CK_WinChipC6:
+ case CK_WinChip2:
+ case CK_C3:
+ // Lakemont
+ case CK_Lakemont:
+ return 16;
+
+ // i586
+ case CK_i586:
+ case CK_Pentium:
+ case CK_PentiumMMX:
+ // i686
+ case CK_PentiumPro:
+ case CK_i686:
+ case CK_Pentium2:
+ case CK_Pentium3:
+ case CK_PentiumM:
+ case CK_C3_2:
+ // K6
+ case CK_K6:
+ case CK_K6_2:
+ case CK_K6_3:
+ // Geode
+ case CK_Geode:
+ return 32;
+
+ // Netburst
+ case CK_Pentium4:
+ case CK_Prescott:
+ case CK_Nocona:
+ // Atom
+ case CK_Bonnell:
+ case CK_Silvermont:
+ case CK_Goldmont:
+ case CK_GoldmontPlus:
+ case CK_Tremont:
+
+ case CK_Westmere:
+ case CK_SandyBridge:
+ case CK_IvyBridge:
+ case CK_Haswell:
+ case CK_Broadwell:
+ case CK_SkylakeClient:
+ case CK_SkylakeServer:
+ case CK_Cascadelake:
+ case CK_Nehalem:
+ case CK_Cooperlake:
+ case CK_Cannonlake:
+ case CK_Tigerlake:
+ case CK_IcelakeClient:
+ case CK_IcelakeServer:
+ case CK_KNL:
+ case CK_KNM:
+ // K7
+ case CK_Athlon:
+ case CK_AthlonXP:
+ // K8
+ case CK_K8:
+ case CK_K8SSE3:
+ case CK_AMDFAM10:
+ // Bobcat
+ case CK_BTVER1:
+ case CK_BTVER2:
+ // Bulldozer
+ case CK_BDVER1:
+ case CK_BDVER2:
+ case CK_BDVER3:
+ case CK_BDVER4:
+ // Zen
+ case CK_ZNVER1:
+ case CK_ZNVER2:
+ // Deprecated
+ case CK_x86_64:
+ case CK_Yonah:
+ case CK_Penryn:
+ case CK_Core2:
+ return 64;
+
+ // The following currently have unknown cache line sizes (but they are probably all 64):
+ // Core
+ case CK_None:
+ return None;
+ }
+ llvm_unreachable("Unknown CPU kind");
+}
+
bool X86TargetInfo::validateOutputSize(const llvm::StringMap<bool> &FeatureMap,
StringRef Constraint,
unsigned Size) const {
@@ -1771,9 +1350,14 @@ bool X86TargetInfo::validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
case 'k':
return Size <= 64;
case 'z':
- case '0':
- // XMM0
- if (FeatureMap.lookup("sse"))
+ // XMM0/YMM/ZMM0
+ if (FeatureMap.lookup("avx512f"))
+ // ZMM0 can be used if target supports AVX512F.
+ return Size <= 512U;
+ else if (FeatureMap.lookup("avx"))
+ // YMM0 can be used if target supports AVX.
+ return Size <= 256U;
+ else if (FeatureMap.lookup("sse"))
return Size <= 128U;
return false;
case 'i':
@@ -1784,7 +1368,7 @@ bool X86TargetInfo::validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
return false;
break;
}
- LLVM_FALLTHROUGH;
+ break;
case 'v':
case 'x':
if (FeatureMap.lookup("avx512f"))
@@ -1839,7 +1423,6 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
case 'i':
case 't':
case 'z':
- case '0':
case '2':
// "^" hints llvm that this is a 2 letter constraint.
// "Constraint++" is used to promote the string iterator
@@ -1852,38 +1435,9 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
}
}
-bool X86TargetInfo::checkCPUKind(CPUKind Kind) const {
- // Perform any per-CPU checks necessary to determine if this CPU is
- // acceptable.
- switch (Kind) {
- case CK_Generic:
- // No processor selected!
- return false;
-#define PROC(ENUM, STRING, IS64BIT) \
- case CK_##ENUM: \
- return IS64BIT || getTriple().getArch() == llvm::Triple::x86;
-#include "clang/Basic/X86Target.def"
- }
- llvm_unreachable("Unhandled CPU kind");
-}
-
void X86TargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
-#define PROC(ENUM, STRING, IS64BIT) \
- if (IS64BIT || getTriple().getArch() == llvm::Triple::x86) \
- Values.emplace_back(STRING);
- // For aliases we need to lookup the CPUKind to check get the 64-bit ness.
-#define PROC_ALIAS(ENUM, ALIAS) \
- if (checkCPUKind(CK_##ENUM)) \
- Values.emplace_back(ALIAS);
-#include "clang/Basic/X86Target.def"
-}
-
-X86TargetInfo::CPUKind X86TargetInfo::getCPUKind(StringRef CPU) const {
- return llvm::StringSwitch<CPUKind>(CPU)
-#define PROC(ENUM, STRING, IS64BIT) .Case(STRING, CK_##ENUM)
-#define PROC_ALIAS(ENUM, ALIAS) .Case(ALIAS, CK_##ENUM)
-#include "clang/Basic/X86Target.def"
- .Default(CK_Generic);
+ bool Only64Bit = getTriple().getArch() != llvm::Triple::x86;
+ llvm::X86::fillValidCPUArchList(Values, Only64Bit);
}
ArrayRef<const char *> X86TargetInfo::getGCCRegNames() const {
diff --git a/clang/lib/Basic/Targets/X86.h b/clang/lib/Basic/Targets/X86.h
index 5b5e284e5141..72a01d2514c2 100644
--- a/clang/lib/Basic/Targets/X86.h
+++ b/clang/lib/Basic/Targets/X86.h
@@ -18,6 +18,7 @@
#include "clang/Basic/TargetOptions.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/X86TargetParser.h"
namespace clang {
namespace targets {
@@ -124,21 +125,14 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasPTWRITE = false;
bool HasINVPCID = false;
bool HasENQCMD = false;
+ bool HasAMXTILE = false;
+ bool HasAMXINT8 = false;
+ bool HasAMXBF16 = false;
+ bool HasSERIALIZE = false;
+ bool HasTSXLDTRK = false;
protected:
- /// Enumeration of all of the X86 CPUs supported by Clang.
- ///
- /// Each enumeration represents a particular CPU supported by Clang. These
- /// loosely correspond to the options passed to '-march' or '-mtune' flags.
- enum CPUKind {
- CK_Generic,
-#define PROC(ENUM, STRING, IS64BIT) CK_##ENUM,
-#include "clang/Basic/X86Target.def"
- } CPU = CK_Generic;
-
- bool checkCPUKind(CPUKind Kind) const;
-
- CPUKind getCPUKind(StringRef CPU) const;
+ llvm::X86::CPUKind CPU = llvm::X86::CK_None;
enum FPMathKind { FP_Default, FP_SSE, FP_387 } FPMath = FP_Default;
@@ -147,6 +141,7 @@ public:
: TargetInfo(Triple) {
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
AddrSpaceMap = &X86AddrSpaceMap;
+ HasStrictFP = true;
}
const char *getLongDoubleMangling() const override {
@@ -166,6 +161,10 @@ public:
ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
+ bool isSPRegName(StringRef RegName) const override {
+ return RegName.equals("esp") || RegName.equals("rsp");
+ }
+
bool validateCpuSupports(StringRef Name) const override;
bool validateCpuIs(StringRef Name) const override;
@@ -178,6 +177,8 @@ public:
StringRef Name,
llvm::SmallVectorImpl<StringRef> &Features) const override;
+ Optional<unsigned> getCPUCacheLineSize() const override;
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
@@ -262,24 +263,8 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- static void setSSELevel(llvm::StringMap<bool> &Features, X86SSEEnum Level,
- bool Enabled);
-
- static void setMMXLevel(llvm::StringMap<bool> &Features, MMX3DNowEnum Level,
- bool Enabled);
-
- static void setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level,
- bool Enabled);
-
void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
- bool Enabled) const override {
- setFeatureEnabledImpl(Features, Name, Enabled);
- }
-
- // This exists purely to cut down on the number of virtual calls in
- // initFeatureMap which calls this repeatedly.
- static void setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
- StringRef Name, bool Enabled);
+ bool Enabled) const final;
bool
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
@@ -288,7 +273,7 @@ public:
bool isValidFeatureName(StringRef Name) const override;
- bool hasFeature(StringRef Feature) const override;
+ bool hasFeature(StringRef Feature) const final;
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
@@ -305,13 +290,16 @@ public:
}
bool isValidCPUName(StringRef Name) const override {
- return checkCPUKind(getCPUKind(Name));
+ bool Only64Bit = getTriple().getArch() != llvm::Triple::x86;
+ return llvm::X86::parseArchX86(Name, Only64Bit) != llvm::X86::CK_None;
}
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
- return checkCPUKind(CPU = getCPUKind(Name));
+ bool Only64Bit = getTriple().getArch() != llvm::Triple::x86;
+ CPU = llvm::X86::parseArchX86(Name, Only64Bit);
+ return CPU != llvm::X86::CK_None;
}
unsigned multiVersionSortPriority(StringRef Name) const override;
@@ -427,6 +415,8 @@ public:
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
+ bool hasExtIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY NetBSDI386TargetInfo
@@ -729,6 +719,8 @@ public:
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
+ bool hasExtIntType() const override { return true; }
};
// x86-64 Windows target
diff --git a/clang/lib/Basic/Targets/XCore.h b/clang/lib/Basic/Targets/XCore.h
index c94f93a99bca..c33766751aa1 100644
--- a/clang/lib/Basic/Targets/XCore.h
+++ b/clang/lib/Basic/Targets/XCore.h
@@ -75,6 +75,8 @@ public:
}
bool allowsLargerPreferedTypeAlignment() const override { return false; }
+
+ bool hasExtIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/clang/lib/Basic/TypeTraits.cpp b/clang/lib/Basic/TypeTraits.cpp
new file mode 100644
index 000000000000..3b723afff70b
--- /dev/null
+++ b/clang/lib/Basic/TypeTraits.cpp
@@ -0,0 +1,86 @@
+//===--- TypeTraits.cpp - Type Traits Support -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the type traits support functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TypeTraits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+using namespace clang;
+
+static constexpr const char *TypeTraitNames[] = {
+#define TYPE_TRAIT_1(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_2(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_N(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *TypeTraitSpellings[] = {
+#define TYPE_TRAIT_1(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_2(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_N(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *ArrayTypeTraitNames[] = {
+#define ARRAY_TYPE_TRAIT(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *ArrayTypeTraitSpellings[] = {
+#define ARRAY_TYPE_TRAIT(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *UnaryExprOrTypeTraitNames[] = {
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) #Name,
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) #Name,
+#include "clang/Basic/TokenKinds.def"
+};
+
+static constexpr const char *UnaryExprOrTypeTraitSpellings[] = {
+#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) #Spelling,
+#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) #Spelling,
+#include "clang/Basic/TokenKinds.def"
+};
+
+const char *clang::getTraitName(TypeTrait T) {
+ assert(T <= TT_Last && "invalid enum value!");
+ return TypeTraitNames[T];
+}
+
+const char *clang::getTraitName(ArrayTypeTrait T) {
+ assert(T <= ATT_Last && "invalid enum value!");
+ return ArrayTypeTraitNames[T];
+}
+
+const char *clang::getTraitName(UnaryExprOrTypeTrait T) {
+ assert(T <= UETT_Last && "invalid enum value!");
+ return UnaryExprOrTypeTraitNames[T];
+}
+
+const char *clang::getTraitSpelling(TypeTrait T) {
+ assert(T <= TT_Last && "invalid enum value!");
+ return TypeTraitSpellings[T];
+}
+
+const char *clang::getTraitSpelling(ArrayTypeTrait T) {
+ assert(T <= ATT_Last && "invalid enum value!");
+ return ArrayTypeTraitSpellings[T];
+}
+
+const char *clang::getTraitSpelling(UnaryExprOrTypeTrait T) {
+ assert(T <= UETT_Last && "invalid enum value!");
+ return UnaryExprOrTypeTraitSpellings[T];
+}
diff --git a/clang/lib/Basic/Version.cpp b/clang/lib/Basic/Version.cpp
index c69d13b2f689..286107cab9d7 100644
--- a/clang/lib/Basic/Version.cpp
+++ b/clang/lib/Basic/Version.cpp
@@ -28,46 +28,19 @@ std::string getClangRepositoryPath() {
return CLANG_REPOSITORY_STRING;
#else
#ifdef CLANG_REPOSITORY
- StringRef URL(CLANG_REPOSITORY);
+ return CLANG_REPOSITORY;
#else
- StringRef URL("");
+ return "";
#endif
-
- // If the CLANG_REPOSITORY is empty, try to use the SVN keyword. This helps us
- // pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL$");
- if (URL.empty()) {
- URL = SVNRepository.slice(SVNRepository.find(':'),
- SVNRepository.find("/lib/Basic"));
- }
-
- // Strip off version from a build from an integration branch.
- URL = URL.slice(0, URL.find("/src/tools/clang"));
-
- // Trim path prefix off, assuming path came from standard cfe path.
- size_t Start = URL.find("cfe/");
- if (Start != StringRef::npos)
- URL = URL.substr(Start + 4);
-
- return URL;
#endif
}
std::string getLLVMRepositoryPath() {
#ifdef LLVM_REPOSITORY
- StringRef URL(LLVM_REPOSITORY);
+ return LLVM_REPOSITORY;
#else
- StringRef URL("");
+ return "";
#endif
-
- // Trim path prefix off, assuming path came from standard llvm path.
- // Leave "llvm/" prefix to distinguish the following llvm revision from the
- // clang revision.
- size_t Start = URL.find("llvm/");
- if (Start != StringRef::npos)
- URL = URL.substr(Start);
-
- return URL;
}
std::string getClangRevision() {
@@ -124,8 +97,12 @@ std::string getClangToolFullVersion(StringRef ToolName) {
#ifdef CLANG_VENDOR
OS << CLANG_VENDOR;
#endif
- OS << ToolName << " version " CLANG_VERSION_STRING " "
- << getClangFullRepositoryVersion();
+ OS << ToolName << " version " CLANG_VERSION_STRING;
+
+ std::string repo = getClangFullRepositoryVersion();
+ if (!repo.empty()) {
+ OS << " " << repo;
+ }
return OS.str();
}
@@ -138,7 +115,13 @@ std::string getClangFullCPPVersion() {
#ifdef CLANG_VENDOR
OS << CLANG_VENDOR;
#endif
- OS << "Clang " CLANG_VERSION_STRING " " << getClangFullRepositoryVersion();
+ OS << "Clang " CLANG_VERSION_STRING;
+
+ std::string repo = getClangFullRepositoryVersion();
+ if (!repo.empty()) {
+ OS << " " << repo;
+ }
+
return OS.str();
}
diff --git a/clang/lib/Basic/Warnings.cpp b/clang/lib/Basic/Warnings.cpp
index 88ef2eaa6589..2c909d9510d4 100644
--- a/clang/lib/Basic/Warnings.cpp
+++ b/clang/lib/Basic/Warnings.cpp
@@ -36,8 +36,9 @@ static void EmitUnknownDiagWarning(DiagnosticsEngine &Diags,
StringRef Opt) {
StringRef Suggestion = DiagnosticIDs::getNearestOption(Flavor, Opt);
Diags.Report(diag::warn_unknown_diag_option)
- << (Flavor == diag::Flavor::WarningOrError ? 0 : 1) << (Prefix.str() += Opt)
- << !Suggestion.empty() << (Prefix.str() += Suggestion);
+ << (Flavor == diag::Flavor::WarningOrError ? 0 : 1)
+ << (Prefix.str() += std::string(Opt)) << !Suggestion.empty()
+ << (Prefix.str() += std::string(Suggestion));
}
void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
diff --git a/clang/lib/Basic/XRayInstr.cpp b/clang/lib/Basic/XRayInstr.cpp
index ef2470f67200..79052e05860e 100644
--- a/clang/lib/Basic/XRayInstr.cpp
+++ b/clang/lib/Basic/XRayInstr.cpp
@@ -16,13 +16,17 @@
namespace clang {
XRayInstrMask parseXRayInstrValue(StringRef Value) {
- XRayInstrMask ParsedKind = llvm::StringSwitch<XRayInstrMask>(Value)
- .Case("all", XRayInstrKind::All)
- .Case("custom", XRayInstrKind::Custom)
- .Case("function", XRayInstrKind::Function)
- .Case("typed", XRayInstrKind::Typed)
- .Case("none", XRayInstrKind::None)
- .Default(XRayInstrKind::None);
+ XRayInstrMask ParsedKind =
+ llvm::StringSwitch<XRayInstrMask>(Value)
+ .Case("all", XRayInstrKind::All)
+ .Case("custom", XRayInstrKind::Custom)
+ .Case("function",
+ XRayInstrKind::FunctionEntry | XRayInstrKind::FunctionExit)
+ .Case("function-entry", XRayInstrKind::FunctionEntry)
+ .Case("function-exit", XRayInstrKind::FunctionExit)
+ .Case("typed", XRayInstrKind::Typed)
+ .Case("none", XRayInstrKind::None)
+ .Default(XRayInstrKind::None);
return ParsedKind;
}
diff --git a/clang/lib/Basic/XRayLists.cpp b/clang/lib/Basic/XRayLists.cpp
index 222a28f79cc5..6d34617d4795 100644
--- a/clang/lib/Basic/XRayLists.cpp
+++ b/clang/lib/Basic/XRayLists.cpp
@@ -9,7 +9,11 @@
// User-provided filters for always/never XRay instrumenting certain functions.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Basic/XRayLists.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/SpecialCaseList.h"
using namespace clang;
@@ -25,6 +29,8 @@ XRayFunctionFilter::XRayFunctionFilter(
AttrListPaths, SM.getFileManager().getVirtualFileSystem())),
SM(SM) {}
+XRayFunctionFilter::~XRayFunctionFilter() = default;
+
XRayFunctionFilter::ImbueAttribute
XRayFunctionFilter::shouldImbueFunction(StringRef FunctionName) const {
// First apply the always instrument list, than if it isn't an "always" see
diff --git a/clang/lib/CodeGen/ABIInfo.h b/clang/lib/CodeGen/ABIInfo.h
index 0c3a076da0b5..bb40dace8a84 100644
--- a/clang/lib/CodeGen/ABIInfo.h
+++ b/clang/lib/CodeGen/ABIInfo.h
@@ -60,6 +60,8 @@ namespace swiftcall {
virtual bool supportsSwift() const { return false; }
+ virtual bool allowBFloatArgsAndRet() const { return false; }
+
CodeGen::CGCXXABI &getCXXABI() const;
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
@@ -102,6 +104,10 @@ namespace swiftcall {
bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const;
+ // Implement the Type::IsPromotableIntegerType for ABI specific needs. The
+ // only difference is that this considers _ExtInt as well.
+ bool isPromotableIntegerTypeForABI(QualType Ty) const;
+
/// A convenience method to return an indirect ABIArgInfo with an
/// expected alignment equal to the ABI alignment of the given type.
CodeGen::ABIArgInfo
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 0bfcab88a3a9..dce0940670a2 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Bitcode/BitcodeReader.h"
@@ -31,6 +32,7 @@
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/IR/Verifier.h"
#include "llvm/LTO/LTOBackend.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -45,12 +47,18 @@
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
+#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/Coroutines.h"
+#include "llvm/Transforms/Coroutines/CoroCleanup.h"
+#include "llvm/Transforms/Coroutines/CoroEarly.h"
+#include "llvm/Transforms/Coroutines/CoroElide.h"
+#include "llvm/Transforms/Coroutines/CoroSplit.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
+#include "llvm/Transforms/IPO/LowerTypeTests.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
@@ -71,6 +79,7 @@
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
+#include "llvm/Transforms/Utils/UniqueInternalLinkageNames.h"
#include <memory>
using namespace clang;
using namespace llvm;
@@ -216,6 +225,7 @@ getSancovOptsFromCGOpts(const CodeGenOptions &CGOpts) {
Opts.TracePCGuard = CGOpts.SanitizeCoverageTracePCGuard;
Opts.NoPrune = CGOpts.SanitizeCoverageNoPrune;
Opts.Inline8bitCounters = CGOpts.SanitizeCoverageInline8bitCounters;
+ Opts.InlineBoolFlag = CGOpts.SanitizeCoverageInlineBoolFlag;
Opts.PCTable = CGOpts.SanitizeCoveragePCTable;
Opts.StackDepth = CGOpts.SanitizeCoverageStackDepth;
return Opts;
@@ -227,7 +237,9 @@ static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
static_cast<const PassManagerBuilderWrapper &>(Builder);
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
auto Opts = getSancovOptsFromCGOpts(CGOpts);
- PM.add(createModuleSanitizerCoverageLegacyPassPass(Opts));
+ PM.add(createModuleSanitizerCoverageLegacyPassPass(
+ Opts, CGOpts.SanitizeCoverageAllowlistFiles,
+ CGOpts.SanitizeCoverageBlocklistFiles));
}
// Check if ASan should use GC-friendly instrumentation for globals.
@@ -350,7 +362,7 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
break;
case CodeGenOptions::MASSV:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::MASSV);
- break;
+ break;
case CodeGenOptions::SVML:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML);
break;
@@ -413,7 +425,8 @@ static CodeGenFileType getCodeGenFileType(BackendAction Action) {
}
}
-static void initTargetOptions(llvm::TargetOptions &Options,
+static void initTargetOptions(DiagnosticsEngine &Diags,
+ llvm::TargetOptions &Options,
const CodeGenOptions &CodeGenOpts,
const clang::TargetOptions &TargetOpts,
const LangOptions &LangOpts,
@@ -436,15 +449,15 @@ static void initTargetOptions(llvm::TargetOptions &Options,
// Set FP fusion mode.
switch (LangOpts.getDefaultFPContractMode()) {
- case LangOptions::FPC_Off:
+ case LangOptions::FPM_Off:
// Preserve any contraction performed by the front-end. (Strict performs
// splitting of the muladd intrinsic in the backend.)
Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
break;
- case LangOptions::FPC_On:
+ case LangOptions::FPM_On:
Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
break;
- case LangOptions::FPC_Fast:
+ case LangOptions::FPM_Fast:
Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
break;
}
@@ -466,22 +479,44 @@ static void initTargetOptions(llvm::TargetOptions &Options,
if (LangOpts.WasmExceptions)
Options.ExceptionModel = llvm::ExceptionHandling::Wasm;
- Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
- Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
+ Options.NoInfsFPMath = LangOpts.NoHonorInfs;
+ Options.NoNaNsFPMath = LangOpts.NoHonorNaNs;
Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
+ Options.UnsafeFPMath = LangOpts.UnsafeFPMath;
Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
+
+ Options.BBSections =
+ llvm::StringSwitch<llvm::BasicBlockSection>(CodeGenOpts.BBSections)
+ .Case("all", llvm::BasicBlockSection::All)
+ .Case("labels", llvm::BasicBlockSection::Labels)
+ .StartsWith("list=", llvm::BasicBlockSection::List)
+ .Case("none", llvm::BasicBlockSection::None)
+ .Default(llvm::BasicBlockSection::None);
+
+ if (Options.BBSections == llvm::BasicBlockSection::List) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr =
+ MemoryBuffer::getFile(CodeGenOpts.BBSections.substr(5));
+ if (!MBOrErr)
+ Diags.Report(diag::err_fe_unable_to_load_basic_block_sections_file)
+ << MBOrErr.getError().message();
+ else
+ Options.BBSectionsFuncListBuf = std::move(*MBOrErr);
+ }
+
Options.FunctionSections = CodeGenOpts.FunctionSections;
Options.DataSections = CodeGenOpts.DataSections;
Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
+ Options.UniqueBasicBlockSectionNames =
+ CodeGenOpts.UniqueBasicBlockSectionNames;
Options.TLSSize = CodeGenOpts.TLSSize;
Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
Options.ExplicitEmulatedTLS = CodeGenOpts.ExplicitEmulatedTLS;
Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
Options.EmitStackSizeSection = CodeGenOpts.StackSizeSection;
Options.EmitAddrsig = CodeGenOpts.Addrsig;
- Options.EnableDebugEntryValues = CodeGenOpts.EnableDebugEntryValues;
Options.ForceDwarfFrameSection = CodeGenOpts.ForceDwarfFrameSection;
+ Options.EmitCallSiteInfo = CodeGenOpts.EmitCallSiteInfo;
+ Options.XRayOmitFunctionIndex = CodeGenOpts.XRayOmitFunctionIndex;
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
@@ -502,6 +537,8 @@ static void initTargetOptions(llvm::TargetOptions &Options,
Entry.Group == frontend::IncludeDirGroup::System))
Options.MCOptions.IASSearchPaths.push_back(
Entry.IgnoreSysRoot ? Entry.Path : HSOpts.Sysroot + Entry.Path);
+ Options.MCOptions.Argv0 = CodeGenOpts.Argv0;
+ Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
}
static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
if (CodeGenOpts.DisableGCov)
@@ -514,12 +551,9 @@ static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
Options.EmitNotes = CodeGenOpts.EmitGcovNotes;
Options.EmitData = CodeGenOpts.EmitGcovArcs;
llvm::copy(CodeGenOpts.CoverageVersion, std::begin(Options.Version));
- Options.UseCfgChecksum = CodeGenOpts.CoverageExtraChecksum;
Options.NoRedZone = CodeGenOpts.DisableRedZone;
- Options.FunctionNamesInData = !CodeGenOpts.CoverageNoFunctionNamesInData;
Options.Filter = CodeGenOpts.ProfileFilterFiles;
Options.Exclude = CodeGenOpts.ProfileExcludeFiles;
- Options.ExitBlockBeforeBody = CodeGenOpts.CoverageExitBlockBeforeBody;
return Options;
}
@@ -553,13 +587,24 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
std::unique_ptr<TargetLibraryInfoImpl> TLII(
createTLII(TargetTriple, CodeGenOpts));
+ // If we reached here with a non-empty index file name, then the index file
+ // was empty and we are not performing ThinLTO backend compilation (used in
+ // testing in a distributed build environment). Drop any the type test
+ // assume sequences inserted for whole program vtables so that codegen doesn't
+ // complain.
+ if (!CodeGenOpts.ThinLTOIndexFile.empty())
+ MPM.add(createLowerTypeTestsPass(/*ExportSummary=*/nullptr,
+ /*ImportSummary=*/nullptr,
+ /*DropTypeTests=*/true));
+
PassManagerBuilderWrapper PMBuilder(TargetTriple, CodeGenOpts, LangOpts);
// At O0 and O1 we only run the always inliner which is more efficient. At
// higher optimization levels we run the normal inliner.
if (CodeGenOpts.OptimizationLevel <= 1) {
- bool InsertLifetimeIntrinsics = (CodeGenOpts.OptimizationLevel != 0 &&
- !CodeGenOpts.DisableLifetimeMarkers);
+ bool InsertLifetimeIntrinsics = ((CodeGenOpts.OptimizationLevel != 0 &&
+ !CodeGenOpts.DisableLifetimeMarkers) ||
+ LangOpts.Coroutines);
PMBuilder.Inliner = createAlwaysInlinerLegacyPass(InsertLifetimeIntrinsics);
} else {
// We do not want to inline hot callsites for SamplePGO module-summary build
@@ -575,6 +620,9 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
PMBuilder.SLPVectorize = CodeGenOpts.VectorizeSLP;
PMBuilder.LoopVectorize = CodeGenOpts.VectorizeLoop;
+ // Only enable CGProfilePass when using integrated assembler, since
+ // non-integrated assemblers don't recognize .cgprofile section.
+ PMBuilder.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
// Loop interleaving in the loop vectorizer has historically been set to be
@@ -689,6 +737,12 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (!CodeGenOpts.RewriteMapFiles.empty())
addSymbolRewriterPass(CodeGenOpts, &MPM);
+ // Add UniqueInternalLinkageNames Pass which renames internal linkage symbols
+ // with unique names.
+ if (CodeGenOpts.UniqueInternalLinkageNames) {
+ MPM.add(createUniqueInternalLinkageNamesPass());
+ }
+
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts)) {
MPM.add(createGCOVProfilerPass(*Options));
if (CodeGenOpts.getDebugInfo() == codegenoptions::NoDebugInfo)
@@ -718,7 +772,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (!CodeGenOpts.InstrProfileOutput.empty())
PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
else
- PMBuilder.PGOInstrGen = DefaultProfileGenName;
+ PMBuilder.PGOInstrGen = std::string(DefaultProfileGenName);
}
if (CodeGenOpts.hasProfileIRUse()) {
PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
@@ -766,7 +820,7 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
CodeGenOpt::Level OptLevel = getCGOptLevel(CodeGenOpts);
llvm::TargetOptions Options;
- initTargetOptions(Options, CodeGenOpts, TargetOpts, LangOpts, HSOpts);
+ initTargetOptions(Diags, Options, CodeGenOpts, TargetOpts, LangOpts, HSOpts);
TM.reset(TheTarget->createTargetMachine(Triple, TargetOpts.CPU, FeaturesStr,
Options, RM, CM, OptLevel));
}
@@ -924,7 +978,7 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
llvm_unreachable("Invalid optimization level!");
case 1:
- return PassBuilder::O1;
+ return PassBuilder::OptimizationLevel::O1;
case 2:
switch (Opts.OptimizeSize) {
@@ -932,24 +986,49 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
llvm_unreachable("Invalid optimization level for size!");
case 0:
- return PassBuilder::O2;
+ return PassBuilder::OptimizationLevel::O2;
case 1:
- return PassBuilder::Os;
+ return PassBuilder::OptimizationLevel::Os;
case 2:
- return PassBuilder::Oz;
+ return PassBuilder::OptimizationLevel::Oz;
}
case 3:
- return PassBuilder::O3;
+ return PassBuilder::OptimizationLevel::O3;
}
}
+static void addCoroutinePassesAtO0(ModulePassManager &MPM,
+ const LangOptions &LangOpts,
+ const CodeGenOptions &CodeGenOpts) {
+ if (!LangOpts.Coroutines)
+ return;
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(CoroEarlyPass()));
+
+ CGSCCPassManager CGPM(CodeGenOpts.DebugPassManager);
+ CGPM.addPass(CoroSplitPass());
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(CoroElidePass()));
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(CoroCleanupPass()));
+}
+
static void addSanitizersAtO0(ModulePassManager &MPM,
const Triple &TargetTriple,
const LangOptions &LangOpts,
const CodeGenOptions &CodeGenOpts) {
+ if (CodeGenOpts.SanitizeCoverageType ||
+ CodeGenOpts.SanitizeCoverageIndirectCalls ||
+ CodeGenOpts.SanitizeCoverageTraceCmp) {
+ auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
+ MPM.addPass(ModuleSanitizerCoveragePass(
+ SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
+ CodeGenOpts.SanitizeCoverageBlocklistFiles));
+ }
+
auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
@@ -970,8 +1049,11 @@ static void addSanitizersAtO0(ModulePassManager &MPM,
}
if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
- MPM.addPass(MemorySanitizerPass({}));
- MPM.addPass(createModuleToFunctionPassAdaptor(MemorySanitizerPass({})));
+ bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Memory);
+ int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
+ MPM.addPass(MemorySanitizerPass({TrackOrigins, Recover, false}));
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ MemorySanitizerPass({TrackOrigins, Recover, false})));
}
if (LangOpts.Sanitize.has(SanitizerKind::KernelMemory)) {
@@ -1013,7 +1095,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (CodeGenOpts.hasProfileIRInstr())
// -fprofile-generate.
PGOOpt = PGOOptions(CodeGenOpts.InstrProfileOutput.empty()
- ? DefaultProfileGenName
+ ? std::string(DefaultProfileGenName)
: CodeGenOpts.InstrProfileOutput,
"", "", PGOOptions::IRInstr, PGOOptions::NoCSAction,
CodeGenOpts.DebugInfoForProfiling);
@@ -1046,13 +1128,13 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
"Cannot run CSProfileGen pass with ProfileGen or SampleUse "
" pass");
PGOOpt->CSProfileGenFile = CodeGenOpts.InstrProfileOutput.empty()
- ? DefaultProfileGenName
+ ? std::string(DefaultProfileGenName)
: CodeGenOpts.InstrProfileOutput;
PGOOpt->CSAction = PGOOptions::CSIRInstr;
} else
PGOOpt = PGOOptions("",
CodeGenOpts.InstrProfileOutput.empty()
- ? DefaultProfileGenName
+ ? std::string(DefaultProfileGenName)
: CodeGenOpts.InstrProfileOutput,
"", PGOOptions::NoAction, PGOOptions::CSIRInstr,
CodeGenOpts.DebugInfoForProfiling);
@@ -1065,6 +1147,10 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PTO.LoopInterleaving = CodeGenOpts.UnrollLoops;
PTO.LoopVectorization = CodeGenOpts.VectorizeLoop;
PTO.SLPVectorization = CodeGenOpts.VectorizeSLP;
+ // Only enable CGProfilePass when using integrated assembler, since
+ // non-integrated assemblers don't recognize .cgprofile section.
+ PTO.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
+ PTO.Coroutines = LangOpts.Coroutines;
PassInstrumentationCallbacks PIC;
StandardInstrumentations SI;
@@ -1114,6 +1200,15 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
bool IsLTO = CodeGenOpts.PrepareForLTO;
if (CodeGenOpts.OptimizationLevel == 0) {
+ // If we reached here with a non-empty index file name, then the index
+ // file was empty and we are not performing ThinLTO backend compilation
+ // (used in testing in a distributed build environment). Drop any the type
+ // test assume sequences inserted for whole program vtables so that
+ // codegen doesn't complain.
+ if (!CodeGenOpts.ThinLTOIndexFile.empty())
+ MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
+ /*ImportSummary=*/nullptr,
+ /*DropTypeTests=*/true));
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
MPM.addPass(GCOVProfilerPass(*Options));
if (Optional<InstrProfOptions> Options =
@@ -1124,7 +1219,10 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// which is just that always inlining occurs. Further, disable generating
// lifetime intrinsics to avoid enabling further optimizations during
// code generation.
- MPM.addPass(AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/false));
+ // However, we need to insert lifetime intrinsics to avoid invalid access
+ // caused by multithreaded coroutines.
+ MPM.addPass(
+ AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/LangOpts.Coroutines));
// At -O0, we can still do PGO. Add all the requested passes for
// instrumentation PGO, if requested.
@@ -1140,6 +1238,12 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
MPM.addPass(createModuleToFunctionPassAdaptor(BoundsCheckingPass()));
+ // Add UniqueInternalLinkageNames Pass which renames internal linkage
+ // symbols with unique names.
+ if (CodeGenOpts.UniqueInternalLinkageNames) {
+ MPM.addPass(UniqueInternalLinkageNamesPass());
+ }
+
// Lastly, add semantically necessary passes for LTO.
if (IsLTO || IsThinLTO) {
MPM.addPass(CanonicalizeAliasesPass());
@@ -1150,6 +1254,18 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// configure the pipeline.
PassBuilder::OptimizationLevel Level = mapToLevel(CodeGenOpts);
+ // If we reached here with a non-empty index file name, then the index
+ // file was empty and we are not performing ThinLTO backend compilation
+ // (used in testing in a distributed build environment). Drop any the type
+ // test assume sequences inserted for whole program vtables so that
+ // codegen doesn't complain.
+ if (!CodeGenOpts.ThinLTOIndexFile.empty())
+ PB.registerPipelineStartEPCallback([](ModulePassManager &MPM) {
+ MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
+ /*ImportSummary=*/nullptr,
+ /*DropTypeTests=*/true));
+ });
+
PB.registerPipelineStartEPCallback([](ModulePassManager &MPM) {
MPM.addPass(createModuleToFunctionPassAdaptor(
EntryExitInstrumenterPass(/*PostInlining=*/false)));
@@ -1157,50 +1273,60 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// Register callbacks to schedule sanitizer passes at the appropriate part of
// the pipeline.
- // FIXME: either handle asan/the remaining sanitizers or error out
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
PB.registerScalarOptimizerLateEPCallback(
[](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
FPM.addPass(BoundsCheckingPass());
});
+
+ if (CodeGenOpts.SanitizeCoverageType ||
+ CodeGenOpts.SanitizeCoverageIndirectCalls ||
+ CodeGenOpts.SanitizeCoverageTraceCmp) {
+ PB.registerOptimizerLastEPCallback(
+ [this](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
+ auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
+ MPM.addPass(ModuleSanitizerCoveragePass(
+ SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
+ CodeGenOpts.SanitizeCoverageBlocklistFiles));
+ });
+ }
+
if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
- PB.registerPipelineStartEPCallback([](ModulePassManager &MPM) {
- MPM.addPass(MemorySanitizerPass({}));
- });
+ int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
+ bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Memory);
PB.registerOptimizerLastEPCallback(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
- FPM.addPass(MemorySanitizerPass({}));
+ [TrackOrigins, Recover](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(MemorySanitizerPass({TrackOrigins, Recover, false}));
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ MemorySanitizerPass({TrackOrigins, Recover, false})));
});
}
if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
- PB.registerPipelineStartEPCallback(
- [](ModulePassManager &MPM) { MPM.addPass(ThreadSanitizerPass()); });
PB.registerOptimizerLastEPCallback(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
- FPM.addPass(ThreadSanitizerPass());
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(ThreadSanitizerPass());
+ MPM.addPass(
+ createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
});
}
if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
- PB.registerPipelineStartEPCallback([&](ModulePassManager &MPM) {
- MPM.addPass(
- RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
- });
bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Address);
bool UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
- PB.registerOptimizerLastEPCallback(
- [Recover, UseAfterScope](FunctionPassManager &FPM,
- PassBuilder::OptimizationLevel Level) {
- FPM.addPass(AddressSanitizerPass(
- /*CompileKernel=*/false, Recover, UseAfterScope));
- });
bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
bool UseOdrIndicator = CodeGenOpts.SanitizeAddressUseOdrIndicator;
- PB.registerPipelineStartEPCallback(
- [Recover, ModuleUseAfterScope,
- UseOdrIndicator](ModulePassManager &MPM) {
+ PB.registerOptimizerLastEPCallback(
+ [Recover, UseAfterScope, ModuleUseAfterScope, UseOdrIndicator](
+ ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(
+ RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
MPM.addPass(ModuleAddressSanitizerPass(
/*CompileKernel=*/false, Recover, ModuleUseAfterScope,
UseOdrIndicator));
+ MPM.addPass(
+ createModuleToFunctionPassAdaptor(AddressSanitizerPass(
+ /*CompileKernel=*/false, Recover, UseAfterScope)));
});
}
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
@@ -1213,6 +1339,12 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
MPM.addPass(InstrProfiling(*Options, false));
});
+ // Add UniqueInternalLinkageNames Pass which renames internal linkage
+ // symbols with unique names.
+ if (CodeGenOpts.UniqueInternalLinkageNames) {
+ MPM.addPass(UniqueInternalLinkageNamesPass());
+ }
+
if (IsThinLTO) {
MPM = PB.buildThinLTOPreLinkDefaultPipeline(
Level, CodeGenOpts.DebugPassManager);
@@ -1229,13 +1361,6 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
}
}
- if (CodeGenOpts.SanitizeCoverageType ||
- CodeGenOpts.SanitizeCoverageIndirectCalls ||
- CodeGenOpts.SanitizeCoverageTraceCmp) {
- auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
- MPM.addPass(ModuleSanitizerCoveragePass(SancovOpts));
- }
-
if (LangOpts.Sanitize.has(SanitizerKind::HWAddress)) {
bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
MPM.addPass(HWAddressSanitizerPass(
@@ -1247,6 +1372,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
}
if (CodeGenOpts.OptimizationLevel == 0) {
+ addCoroutinePassesAtO0(MPM, LangOpts, CodeGenOpts);
addSanitizersAtO0(MPM, TargetTriple, LangOpts, CodeGenOpts);
}
}
@@ -1358,15 +1484,12 @@ BitcodeModule *clang::FindThinLTOModule(MutableArrayRef<BitcodeModule> BMs) {
return nullptr;
}
-static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
- const HeaderSearchOptions &HeaderOpts,
- const CodeGenOptions &CGOpts,
- const clang::TargetOptions &TOpts,
- const LangOptions &LOpts,
- std::unique_ptr<raw_pwrite_stream> OS,
- std::string SampleProfile,
- std::string ProfileRemapping,
- BackendAction Action) {
+static void runThinLTOBackend(
+ DiagnosticsEngine &Diags, ModuleSummaryIndex *CombinedIndex, Module *M,
+ const HeaderSearchOptions &HeaderOpts, const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts, const LangOptions &LOpts,
+ std::unique_ptr<raw_pwrite_stream> OS, std::string SampleProfile,
+ std::string ProfileRemapping, BackendAction Action) {
StringMap<DenseMap<GlobalValue::GUID, GlobalValueSummary *>>
ModuleToDefinedGVSummaries;
CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
@@ -1436,7 +1559,7 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
Conf.RelocModel = CGOpts.RelocationModel;
Conf.CGOptLevel = getCGOptLevel(CGOpts);
Conf.OptLevel = CGOpts.OptimizationLevel;
- initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
+ initTargetOptions(Diags, Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
Conf.SampleProfile = std::move(SampleProfile);
Conf.PTO.LoopUnrolling = CGOpts.UnrollLoops;
// For historical reasons, loop interleaving is set to mirror setting for loop
@@ -1444,6 +1567,9 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
Conf.PTO.LoopInterleaving = CGOpts.UnrollLoops;
Conf.PTO.LoopVectorization = CGOpts.VectorizeLoop;
Conf.PTO.SLPVectorization = CGOpts.VectorizeSLP;
+ // Only enable CGProfilePass when using integrated assembler, since
+ // non-integrated assemblers don't recognize .cgprofile section.
+ Conf.PTO.CallGraphProfile = !CGOpts.DisableIntegratedAS;
// Context sensitive profile.
if (CGOpts.hasProfileCSIRInstr()) {
@@ -1525,8 +1651,8 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
// of an error).
if (CombinedIndex) {
if (!CombinedIndex->skipModuleByDistributedBackend()) {
- runThinLTOBackend(CombinedIndex.get(), M, HeaderOpts, CGOpts, TOpts,
- LOpts, std::move(OS), CGOpts.SampleProfileFile,
+ runThinLTOBackend(Diags, CombinedIndex.get(), M, HeaderOpts, CGOpts,
+ TOpts, LOpts, std::move(OS), CGOpts.SampleProfileFile,
CGOpts.ProfileRemappingFile, Action);
return;
}
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 149982d82790..a58450ddd4c5 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -119,8 +119,9 @@ namespace {
ValueTy = lvalue.getType();
ValueSizeInBits = C.getTypeSize(ValueTy);
AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
- lvalue.getType(), lvalue.getExtVectorAddress()
- .getElementType()->getVectorNumElements());
+ lvalue.getType(), cast<llvm::VectorType>(
+ lvalue.getExtVectorAddress().getElementType())
+ ->getNumElements());
AtomicSizeInBits = C.getTypeSize(AtomicTy);
AtomicAlign = ValueAlign = lvalue.getAlignment();
LVal = lvalue;
@@ -1826,7 +1827,7 @@ void AtomicInfo::EmitAtomicUpdateOp(
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
// Do the atomic load.
- auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
+ auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
// For non-simple lvalues perform compare-and-swap procedure.
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
@@ -1908,7 +1909,7 @@ void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
// Do the atomic load.
- auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
+ auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
// For non-simple lvalues perform compare-and-swap procedure.
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
@@ -2018,6 +2019,10 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
intValue, addr.getElementType(), /*isSigned=*/false);
llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
+ if (AO == llvm::AtomicOrdering::Acquire)
+ AO = llvm::AtomicOrdering::Monotonic;
+ else if (AO == llvm::AtomicOrdering::AcquireRelease)
+ AO = llvm::AtomicOrdering::Release;
// Initializations don't need to be atomic.
if (!isInit)
store->setAtomic(AO);
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 11f54d1f7fb2..615b78235041 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -36,7 +36,7 @@ CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
CapturesNonExternalType(false), LocalAddress(Address::invalid()),
- StructureType(nullptr), Block(block), DominatingIP(nullptr) {
+ StructureType(nullptr), Block(block) {
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
@@ -775,151 +775,23 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
}
-/// Enter the scope of a block. This should be run at the entrance to
-/// a full-expression so that the block's cleanups are pushed at the
-/// right place in the stack.
-static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
- assert(CGF.HaveInsertPoint());
-
- // Allocate the block info and place it at the head of the list.
- CGBlockInfo &blockInfo =
- *new CGBlockInfo(block, CGF.CurFn->getName());
- blockInfo.NextBlockInfo = CGF.FirstBlockInfo;
- CGF.FirstBlockInfo = &blockInfo;
-
- // Compute information about the layout, etc., of this block,
- // pushing cleanups as necessary.
- computeBlockInfo(CGF.CGM, &CGF, blockInfo);
-
- // Nothing else to do if it can be global.
- if (blockInfo.CanBeGlobal) return;
-
- // Make the allocation for the block.
- blockInfo.LocalAddress = CGF.CreateTempAlloca(blockInfo.StructureType,
- blockInfo.BlockAlign, "block");
-
- // If there are cleanups to emit, enter them (but inactive).
- if (!blockInfo.NeedsCopyDispose) return;
-
- // Walk through the captures (in order) and find the ones not
- // captured by constant.
- for (const auto &CI : block->captures()) {
- // Ignore __block captures; there's nothing special in the
- // on-stack block that we need to do for them.
- if (CI.isByRef()) continue;
-
- // Ignore variables that are constant-captured.
- const VarDecl *variable = CI.getVariable();
- CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
- if (capture.isConstant()) continue;
-
- // Ignore objects that aren't destructed.
- QualType VT = getCaptureFieldType(CGF, CI);
- QualType::DestructionKind dtorKind = VT.isDestructedType();
- if (dtorKind == QualType::DK_none) continue;
-
- CodeGenFunction::Destroyer *destroyer;
-
- // Block captures count as local values and have imprecise semantics.
- // They also can't be arrays, so need to worry about that.
- //
- // For const-qualified captures, emit clang.arc.use to ensure the captured
- // object doesn't get released while we are still depending on its validity
- // within the block.
- if (VT.isConstQualified() &&
- VT.getObjCLifetime() == Qualifiers::OCL_Strong &&
- CGF.CGM.getCodeGenOpts().OptimizationLevel != 0) {
- assert(CGF.CGM.getLangOpts().ObjCAutoRefCount &&
- "expected ObjC ARC to be enabled");
- destroyer = CodeGenFunction::emitARCIntrinsicUse;
- } else if (dtorKind == QualType::DK_objc_strong_lifetime) {
- destroyer = CodeGenFunction::destroyARCStrongImprecise;
- } else {
- destroyer = CGF.getDestroyer(dtorKind);
- }
-
- // GEP down to the address.
- Address addr =
- CGF.Builder.CreateStructGEP(blockInfo.LocalAddress, capture.getIndex());
-
- // We can use that GEP as the dominating IP.
- if (!blockInfo.DominatingIP)
- blockInfo.DominatingIP = cast<llvm::Instruction>(addr.getPointer());
-
- CleanupKind cleanupKind = InactiveNormalCleanup;
- bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
- if (useArrayEHCleanup)
- cleanupKind = InactiveNormalAndEHCleanup;
-
- CGF.pushDestroy(cleanupKind, addr, VT,
- destroyer, useArrayEHCleanup);
-
- // Remember where that cleanup was.
- capture.setCleanup(CGF.EHStack.stable_begin());
- }
-}
-
-/// Enter a full-expression with a non-trivial number of objects to
-/// clean up. This is in this file because, at the moment, the only
-/// kind of cleanup object is a BlockDecl*.
-void CodeGenFunction::enterNonTrivialFullExpression(const FullExpr *E) {
- if (const auto EWC = dyn_cast<ExprWithCleanups>(E)) {
- assert(EWC->getNumObjects() != 0);
- for (const ExprWithCleanups::CleanupObject &C : EWC->getObjects())
- enterBlockScope(*this, C);
- }
-}
-
-/// Find the layout for the given block in a linked list and remove it.
-static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head,
- const BlockDecl *block) {
- while (true) {
- assert(head && *head);
- CGBlockInfo *cur = *head;
-
- // If this is the block we're looking for, splice it out of the list.
- if (cur->getBlockDecl() == block) {
- *head = cur->NextBlockInfo;
- return cur;
- }
-
- head = &cur->NextBlockInfo;
- }
-}
-
-/// Destroy a chain of block layouts.
-void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) {
- assert(head && "destroying an empty chain");
- do {
- CGBlockInfo *cur = head;
- head = cur->NextBlockInfo;
- delete cur;
- } while (head != nullptr);
-}
-
/// Emit a block literal expression in the current function.
llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// If the block has no captures, we won't have a pre-computed
// layout for it.
- if (!blockExpr->getBlockDecl()->hasCaptures()) {
+ if (!blockExpr->getBlockDecl()->hasCaptures())
// The block literal is emitted as a global variable, and the block invoke
// function has to be extracted from its initializer.
- if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr)) {
+ if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr))
return Block;
- }
- CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
- computeBlockInfo(CGM, this, blockInfo);
- blockInfo.BlockExpression = blockExpr;
- return EmitBlockLiteral(blockInfo);
- }
-
- // Find the block info for this block and take ownership of it.
- std::unique_ptr<CGBlockInfo> blockInfo;
- blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo,
- blockExpr->getBlockDecl()));
- blockInfo->BlockExpression = blockExpr;
- return EmitBlockLiteral(*blockInfo);
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
+ computeBlockInfo(CGM, this, blockInfo);
+ blockInfo.BlockExpression = blockExpr;
+ if (!blockInfo.CanBeGlobal)
+ blockInfo.LocalAddress = CreateTempAlloca(blockInfo.StructureType,
+ blockInfo.BlockAlign, "block");
+ return EmitBlockLiteral(blockInfo);
}
llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
@@ -1161,12 +1033,64 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
/*captured by init*/ false);
}
- // Activate the cleanup if layout pushed one.
- if (!CI.isByRef()) {
- EHScopeStack::stable_iterator cleanup = capture.getCleanup();
- if (cleanup.isValid())
- ActivateCleanupBlock(cleanup, blockInfo.DominatingIP);
+ // Push a cleanup for the capture if necessary.
+ if (!blockInfo.NeedsCopyDispose)
+ continue;
+
+ // Ignore __block captures; there's nothing special in the on-stack block
+ // that we need to do for them.
+ if (CI.isByRef())
+ continue;
+
+ // Ignore objects that aren't destructed.
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (dtorKind == QualType::DK_none)
+ continue;
+
+ CodeGenFunction::Destroyer *destroyer;
+
+ // Block captures count as local values and have imprecise semantics.
+ // They also can't be arrays, so need to worry about that.
+ //
+ // For const-qualified captures, emit clang.arc.use to ensure the captured
+ // object doesn't get released while we are still depending on its validity
+ // within the block.
+ if (type.isConstQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Strong &&
+ CGM.getCodeGenOpts().OptimizationLevel != 0) {
+ assert(CGM.getLangOpts().ObjCAutoRefCount &&
+ "expected ObjC ARC to be enabled");
+ destroyer = emitARCIntrinsicUse;
+ } else if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = destroyARCStrongImprecise;
+ } else {
+ destroyer = getDestroyer(dtorKind);
}
+
+ CleanupKind cleanupKind = NormalCleanup;
+ bool useArrayEHCleanup = needsEHCleanup(dtorKind);
+ if (useArrayEHCleanup)
+ cleanupKind = NormalAndEHCleanup;
+
+ // Extend the lifetime of the capture to the end of the scope enclosing the
+ // block expression except when the block decl is in the list of RetExpr's
+ // cleanup objects, in which case its lifetime ends after the full
+ // expression.
+ auto IsBlockDeclInRetExpr = [&]() {
+ auto *EWC = llvm::dyn_cast_or_null<ExprWithCleanups>(RetExpr);
+ if (EWC)
+ for (auto &C : EWC->getObjects())
+ if (auto *BD = C.dyn_cast<BlockDecl *>())
+ if (BD == blockDecl)
+ return true;
+ return false;
+ };
+
+ if (IsBlockDeclInRetExpr())
+ pushDestroy(cleanupKind, blockField, type, destroyer, useArrayEHCleanup);
+ else
+ pushLifetimeExtendedDestroy(cleanupKind, blockField, type, destroyer,
+ useArrayEHCleanup);
}
// Cast to the converted block-pointer type, which happens (somewhat
@@ -1449,7 +1373,8 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
llvm::IRBuilder<> b(llvm::BasicBlock::Create(CGM.getLLVMContext(), "entry",
Init));
b.CreateAlignedStore(CGM.getNSConcreteGlobalBlock(),
- b.CreateStructGEP(literal, 0), CGM.getPointerAlign().getQuantity());
+ b.CreateStructGEP(literal, 0),
+ CGM.getPointerAlign().getAsAlign());
b.CreateRetVoid();
// We can't use the normal LLVM global initialisation array, because we
// need to specify that this runs early in library initialisation.
@@ -2031,11 +1956,13 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
FunctionDecl *FD = FunctionDecl::Create(
C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
FunctionTy, nullptr, SC_Static, false, false);
-
setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
CGM);
+ // This is necessary to avoid inheriting the previous line number.
+ FD->setImplicit();
StartFunction(FD, ReturnTy, Fn, FI, args);
- ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getBeginLoc()};
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
+
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
Address src = GetAddrOfLocalVar(&SrcDecl);
@@ -2226,10 +2153,12 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
CGM);
+ // This is necessary to avoid inheriting the previous line number.
+ FD->setImplicit();
StartFunction(FD, ReturnTy, Fn, FI, args);
markAsIgnoreThreadCheckingAtRuntime(Fn);
- ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getBeginLoc()};
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h
index c4bfde666154..698ecd3d926a 100644
--- a/clang/lib/CodeGen/CGBlocks.h
+++ b/clang/lib/CodeGen/CGBlocks.h
@@ -257,10 +257,6 @@ public:
// This could be zero if no forced alignment is required.
CharUnits BlockHeaderForcedGapSize;
- /// An instruction which dominates the full-expression that the
- /// block is inside.
- llvm::Instruction *DominatingIP;
-
/// The next block in the block-info chain. Invalid if this block
/// info is not part of the CGF's block-info chain, which is true
/// if it corresponds to a global block or a block whose expression
diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h
index 107c9275431c..38e96c0f4ee6 100644
--- a/clang/lib/CodeGen/CGBuilder.h
+++ b/clang/lib/CodeGen/CGBuilder.h
@@ -22,16 +22,15 @@ class CodeGenFunction;
/// This is an IRBuilder insertion helper that forwards to
/// CodeGenFunction::InsertHelper, which adds necessary metadata to
/// instructions.
-class CGBuilderInserter : protected llvm::IRBuilderDefaultInserter {
+class CGBuilderInserter final : public llvm::IRBuilderDefaultInserter {
public:
CGBuilderInserter() = default;
explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {}
-protected:
/// This forwards to CodeGenFunction::InsertHelper.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock *BB,
- llvm::BasicBlock::iterator InsertPt) const;
+ llvm::BasicBlock::iterator InsertPt) const override;
private:
CodeGenFunction *CGF = nullptr;
};
@@ -68,38 +67,34 @@ public:
// take an alignment.
llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") {
return CreateAlignedLoad(Addr.getPointer(),
- Addr.getAlignment().getQuantity(),
- Name);
+ Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, const char *Name) {
// This overload is required to prevent string literals from
// ending up in the IsVolatile overload.
return CreateAlignedLoad(Addr.getPointer(),
- Addr.getAlignment().getQuantity(),
- Name);
+ Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile,
const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr.getPointer(),
- Addr.getAlignment().getQuantity(),
- IsVolatile,
- Name);
+ return CreateAlignedLoad(
+ Addr.getPointer(), Addr.getAlignment().getAsAlign(), IsVolatile, Name);
}
using CGBuilderBaseTy::CreateAlignedLoad;
llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ return CreateAlignedLoad(Addr, Align.getAsAlign(), Name);
}
llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
const char *Name) {
- return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ return CreateAlignedLoad(Addr, Align.getAsAlign(), Name);
}
llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
CharUnits Align,
const llvm::Twine &Name = "") {
assert(Addr->getType()->getPointerElementType() == Ty);
- return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ return CreateAlignedLoad(Addr, Align.getAsAlign(), Name);
}
// Note that we intentionally hide the CreateStore APIs that don't
@@ -113,7 +108,7 @@ public:
using CGBuilderBaseTy::CreateAlignedStore;
llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr,
CharUnits Align, bool IsVolatile = false) {
- return CreateAlignedStore(Val, Addr, Align.getQuantity(), IsVolatile);
+ return CreateAlignedStore(Val, Addr, Align.getAsAlign(), IsVolatile);
}
// FIXME: these "default-aligned" APIs should be removed,
@@ -284,6 +279,13 @@ public:
IsVolatile);
}
+ using CGBuilderBaseTy::CreateMemCpyInline;
+ llvm::CallInst *CreateMemCpyInline(Address Dest, Address Src, uint64_t Size) {
+ return CreateMemCpyInline(
+ Dest.getPointer(), Dest.getAlignment().getAsAlign(), Src.getPointer(),
+ Src.getAlignment().getAsAlign(), getInt64(Size));
+ }
+
using CGBuilderBaseTy::CreateMemMove;
llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 09fd3087b494..8994b939093e 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -28,6 +28,7 @@
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
@@ -43,9 +44,10 @@
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/MatrixBuilder.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ScopedPrinter.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/X86TargetParser.h"
#include <sstream>
using namespace clang;
@@ -74,6 +76,8 @@ static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
break;
}
}
+ if (CGF.CGM.stopAutoInit())
+ return;
CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
}
@@ -215,8 +219,9 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
if (Invert)
- Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
- llvm::ConstantInt::get(IntType, -1));
+ Result =
+ CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
+ llvm::ConstantInt::getAllOnesValue(IntType));
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
@@ -411,6 +416,25 @@ static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
}
}
+// Emit an intrinsic where all operands are of the same type as the result.
+// Depending on mode, this may be a constrained floating-point intrinsic.
+static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
+ unsigned IntrinsicID,
+ unsigned ConstrainedIntrinsicID,
+ llvm::Type *Ty,
+ ArrayRef<Value *> Args) {
+ Function *F;
+ if (CGF.Builder.getIsFPConstrained())
+ F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
+ else
+ F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
+
+ if (CGF.Builder.getIsFPConstrained())
+ return CGF.Builder.CreateConstrainedFPCall(F, Args);
+ else
+ return CGF.Builder.CreateCall(F, Args);
+}
+
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
@@ -566,7 +590,9 @@ static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &context,
const clang::QualType Type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
- unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
+ unsigned Width = Type->isBooleanType() ? 1
+ : Type->isExtIntType() ? context.getIntWidth(Type)
+ : context.getTypeInfo(Type).Width;
bool Signed = Type->isSignedIntegerType();
return {Width, Signed};
}
@@ -1251,6 +1277,8 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
FunctionDecl *FD = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
FuncionTy, nullptr, SC_PrivateExtern, false, false);
+ // Avoid generating debug location info for the function.
+ FD->setImplicit();
StartFunction(FD, ReturnTy, Fn, FI, Args);
@@ -1320,14 +1348,42 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
} else if (const Expr *TheExpr = Item.getExpr()) {
ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
- // Check if this is a retainable type.
- if (TheExpr->getType()->isObjCRetainableType()) {
+ // If a temporary object that requires destruction after the full
+ // expression is passed, push a lifetime-extended cleanup to extend its
+ // lifetime to the end of the enclosing block scope.
+ auto LifetimeExtendObject = [&](const Expr *E) {
+ E = E->IgnoreParenCasts();
+ // Extend lifetimes of objects returned by function calls and message
+ // sends.
+
+ // FIXME: We should do this in other cases in which temporaries are
+ // created including arguments of non-ARC types (e.g., C++
+ // temporaries).
+ if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
+ return true;
+ return false;
+ };
+
+ if (TheExpr->getType()->isObjCRetainableType() &&
+ getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
"Only scalar can be a ObjC retainable type");
- // Check if the object is constant, if not, save it in
- // RetainableOperands.
- if (!isa<Constant>(ArgVal))
- RetainableOperands.push_back(ArgVal);
+ if (!isa<Constant>(ArgVal)) {
+ CleanupKind Cleanup = getARCCleanupKind();
+ QualType Ty = TheExpr->getType();
+ Address Alloca = Address::invalid();
+ Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
+ ArgVal = EmitARCRetain(Ty, ArgVal);
+ Builder.CreateStore(ArgVal, Addr);
+ pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
+ CodeGenFunction::destroyARCStrongPrecise,
+ Cleanup & EHCleanup);
+
+ // Push a clang.arc.use call to ensure ARC optimizer knows that the
+ // argument has to be alive.
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0)
+ pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
+ }
}
} else {
ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
@@ -1349,18 +1405,6 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
Layout, BufAddr.getAlignment());
EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
-
- // Push a clang.arc.use cleanup for each object in RetainableOperands. The
- // cleanup will cause the use to appear after the final log call, keeping
- // the object valid while it’s held in the log buffer. Note that if there’s
- // a release cleanup on the object, it will already be active; since
- // cleanups are emitted in reverse order, the use will occur before the
- // object is released.
- if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount &&
- CGM.getCodeGenOpts().OptimizationLevel != 0)
- for (llvm::Value *Object : RetainableOperands)
- pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object);
-
return RValue::get(BufAddr.getPointer());
}
@@ -1521,8 +1565,7 @@ static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
// We check whether we are in a recursive type
if (CanonicalType->isRecordType()) {
- Value *TmpRes =
- dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
+ TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
Res = CGF.Builder.CreateAdd(TmpRes, Res);
continue;
}
@@ -1629,7 +1672,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_ceilf:
case Builtin::BI__builtin_ceilf16:
case Builtin::BI__builtin_ceill:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::ceil,
Intrinsic::experimental_constrained_ceil));
@@ -1650,7 +1693,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_cosf:
case Builtin::BI__builtin_cosf16:
case Builtin::BI__builtin_cosl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::cos,
Intrinsic::experimental_constrained_cos));
@@ -1661,7 +1704,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_expf:
case Builtin::BI__builtin_expf16:
case Builtin::BI__builtin_expl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::exp,
Intrinsic::experimental_constrained_exp));
@@ -1672,7 +1715,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_exp2f:
case Builtin::BI__builtin_exp2f16:
case Builtin::BI__builtin_exp2l:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::exp2,
Intrinsic::experimental_constrained_exp2));
@@ -1693,7 +1736,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_floorf:
case Builtin::BI__builtin_floorf16:
case Builtin::BI__builtin_floorl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::floor,
Intrinsic::experimental_constrained_floor));
@@ -1704,7 +1747,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaf:
case Builtin::BI__builtin_fmaf16:
case Builtin::BI__builtin_fmal:
- return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::fma,
Intrinsic::experimental_constrained_fma));
@@ -1715,7 +1758,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaxf:
case Builtin::BI__builtin_fmaxf16:
case Builtin::BI__builtin_fmaxl:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::maxnum,
Intrinsic::experimental_constrained_maxnum));
@@ -1726,7 +1769,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fminf:
case Builtin::BI__builtin_fminf16:
case Builtin::BI__builtin_fminl:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::minnum,
Intrinsic::experimental_constrained_minnum));
@@ -1751,7 +1794,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_logf:
case Builtin::BI__builtin_logf16:
case Builtin::BI__builtin_logl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log,
Intrinsic::experimental_constrained_log));
@@ -1762,7 +1805,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_log10f:
case Builtin::BI__builtin_log10f16:
case Builtin::BI__builtin_log10l:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log10,
Intrinsic::experimental_constrained_log10));
@@ -1773,7 +1816,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_log2f:
case Builtin::BI__builtin_log2f16:
case Builtin::BI__builtin_log2l:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::log2,
Intrinsic::experimental_constrained_log2));
@@ -1783,7 +1826,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_nearbyint:
case Builtin::BI__builtin_nearbyintf:
case Builtin::BI__builtin_nearbyintl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::nearbyint,
Intrinsic::experimental_constrained_nearbyint));
@@ -1794,7 +1837,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_powf:
case Builtin::BI__builtin_powf16:
case Builtin::BI__builtin_powl:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::pow,
Intrinsic::experimental_constrained_pow));
@@ -1805,7 +1848,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_rintf:
case Builtin::BI__builtin_rintf16:
case Builtin::BI__builtin_rintl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::rint,
Intrinsic::experimental_constrained_rint));
@@ -1816,7 +1859,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_roundf:
case Builtin::BI__builtin_roundf16:
case Builtin::BI__builtin_roundl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::round,
Intrinsic::experimental_constrained_round));
@@ -1827,7 +1870,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_sinf:
case Builtin::BI__builtin_sinf16:
case Builtin::BI__builtin_sinl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::sin,
Intrinsic::experimental_constrained_sin));
@@ -1838,7 +1881,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_sqrtf:
case Builtin::BI__builtin_sqrtf16:
case Builtin::BI__builtin_sqrtl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::sqrt,
Intrinsic::experimental_constrained_sqrt));
@@ -1849,7 +1892,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_truncf:
case Builtin::BI__builtin_truncf16:
case Builtin::BI__builtin_truncl:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::trunc,
Intrinsic::experimental_constrained_trunc));
@@ -2152,6 +2195,33 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_expect_with_probability: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+
+ Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
+ llvm::APFloat Probability(0.0);
+ const Expr *ProbArg = E->getArg(2);
+ bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
+ assert(EvalSucceed && "probability should be able to evaluate as float");
+ (void)EvalSucceed;
+ bool LoseInfo = false;
+ Probability.convert(llvm::APFloat::IEEEdouble(),
+ llvm::RoundingMode::Dynamic, &LoseInfo);
+ llvm::Type *Ty = ConvertType(ProbArg->getType());
+ Constant *Confidence = ConstantFP::get(Ty, Probability);
+ // Don't generate llvm.expect.with.probability on -O0 as the backend
+ // won't use it for anything.
+ // Note, we still IRGen ExpectedValue because it could have side-effects.
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ return RValue::get(ArgValue);
+
+ Function *FnExpect =
+ CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
+ Value *Result = Builder.CreateCall(
+ FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
+ return RValue::get(Result);
+ }
case Builtin::BI__builtin_assume_aligned: {
const Expr *Ptr = E->getArg(0);
Value *PtrValue = EmitScalarExpr(Ptr);
@@ -2164,7 +2234,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
llvm::Value::MaximumAlignment);
- EmitAlignmentAssumption(PtrValue, Ptr,
+ emitAlignmentAssumption(PtrValue, Ptr,
/*The expr loc is sufficient.*/ SourceLocation(),
AlignmentCI, OffsetValue);
return RValue::get(PtrValue);
@@ -2336,6 +2406,53 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
+ case Builtin::BI__builtin_matrix_transpose: {
+ const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
+ Value *MatValue = EmitScalarExpr(E->getArg(0));
+ MatrixBuilder<CGBuilderTy> MB(Builder);
+ Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
+ MatrixTy->getNumColumns());
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__builtin_matrix_column_major_load: {
+ MatrixBuilder<CGBuilderTy> MB(Builder);
+ // Emit everything that isn't dependent on the first parameter type
+ Value *Stride = EmitScalarExpr(E->getArg(3));
+ const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
+ auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
+ assert(PtrTy && "arg0 must be of pointer type");
+ bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
+
+ Address Src = EmitPointerWithAlignment(E->getArg(0));
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ Value *Result = MB.CreateColumnMajorLoad(
+ Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
+ IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
+ "matrix");
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__builtin_matrix_column_major_store: {
+ MatrixBuilder<CGBuilderTy> MB(Builder);
+ Value *Matrix = EmitScalarExpr(E->getArg(0));
+ Address Dst = EmitPointerWithAlignment(E->getArg(1));
+ Value *Stride = EmitScalarExpr(E->getArg(2));
+
+ const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
+ auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
+ assert(PtrTy && "arg1 must be of pointer type");
+ bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
+
+ EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
+ E->getArg(1)->getExprLoc(), FD, 0);
+ Value *Result = MB.CreateColumnMajorStore(
+ Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
+ Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
+ return RValue::get(Result);
+ }
+
case Builtin::BIfinite:
case Builtin::BI__finite:
case Builtin::BIfinitef:
@@ -2518,6 +2635,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Dest.getPointer());
}
+ case Builtin::BI__builtin_memcpy_inline: {
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
+ uint64_t Size =
+ E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
+ E->getArg(1)->getExprLoc(), FD, 1);
+ Builder.CreateMemCpyInline(Dest, Src, Size);
+ return RValue::get(nullptr);
+ }
+
case Builtin::BI__builtin_char_memchr:
BuiltinID = Builtin::BI__builtin_memchr;
break;
@@ -3222,6 +3352,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
ConvertType(E->getType())));
}
+ case Builtin::BI__warn_memset_zero_len:
+ return RValue::getIgnored();
case Builtin::BI__annotation: {
// Re-encode each wide string to UTF8 and make an MDString.
SmallVector<Metadata *, 1> Strings;
@@ -3928,7 +4060,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto *V =
Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
Builder.CreateAlignedStore(
- V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy));
+ V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
}
return std::tie(ElemPtr, TmpSize, TmpPtr);
};
@@ -3947,19 +4079,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
- std::vector<llvm::Value *> Args = {
- Queue, Flags, Range,
- Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4),
- ElemPtr};
- std::vector<llvm::Type *> ArgTys = {
+ llvm::Value *const Args[] = {Queue, Flags,
+ Range, Kernel,
+ Block, ConstantInt::get(IntTy, NumArgs - 4),
+ ElemPtr};
+ llvm::Type *const ArgTys[] = {
QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
GenericVoidPtrTy, IntTy, ElemPtr->getType()};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- auto Call =
- RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
+ auto Call = RValue::get(
+ Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
if (TmpSize)
EmitLifetimeEnd(TmpSize, TmpPtr);
return Call;
@@ -4115,6 +4245,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BIprintf:
if (getTarget().getTriple().isNVPTX())
return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
+ if (getTarget().getTriple().getArch() == Triple::amdgcn &&
+ getLangOpts().HIP)
+ return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
break;
case Builtin::BI__builtin_canonicalize:
case Builtin::BI__builtin_canonicalizef:
@@ -4427,35 +4560,41 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
- bool HasLegalHalfType=true,
- bool V1Ty=false) {
+ bool HasLegalHalfType = true,
+ bool V1Ty = false,
+ bool AllowBFloatArgsAndRet = true) {
int IsQuad = TypeFlags.isQuad();
switch (TypeFlags.getEltType()) {
case NeonTypeFlags::Int8:
case NeonTypeFlags::Poly8:
- return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
case NeonTypeFlags::Int16:
case NeonTypeFlags::Poly16:
- return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ case NeonTypeFlags::BFloat16:
+ if (AllowBFloatArgsAndRet)
+ return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
+ else
+ return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Float16:
if (HasLegalHalfType)
- return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
else
- return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Int32:
- return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Int64:
case NeonTypeFlags::Poly64:
- return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
case NeonTypeFlags::Poly128:
// FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
// There is a lot of i128 and f128 API missing.
// so we use v16i8 to represent poly128 and get pattern matched.
- return llvm::VectorType::get(CGF->Int8Ty, 16);
+ return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
case NeonTypeFlags::Float32:
- return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Float64:
- return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
}
llvm_unreachable("Unknown vector element type!");
}
@@ -4465,34 +4604,46 @@ static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
int IsQuad = IntTypeFlags.isQuad();
switch (IntTypeFlags.getEltType()) {
case NeonTypeFlags::Int16:
- return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
case NeonTypeFlags::Int32:
- return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
case NeonTypeFlags::Int64:
- return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
default:
llvm_unreachable("Type can't be converted to floating-point!");
}
}
-Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
- unsigned nElts = V->getType()->getVectorNumElements();
- Value* SV = llvm::ConstantVector::getSplat(nElts, C);
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
+ const ElementCount &Count) {
+ Value *SV = llvm::ConstantVector::getSplat(Count, C);
return Builder.CreateShuffleVector(V, V, SV, "lane");
}
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
+ ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
+ return EmitNeonSplat(V, C, EC);
+}
+
Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
const char *name,
unsigned shift, bool rightshift) {
unsigned j = 0;
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
- ai != ae; ++ai, ++j)
+ ai != ae; ++ai, ++j) {
+ if (F->isConstrainedFPIntrinsic())
+ if (ai->getType()->isMetadataTy())
+ continue;
if (shift > 0 && shift == j)
Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
+ }
- return Builder.CreateCall(F, Ops, name);
+ if (F->isConstrainedFPIntrinsic())
+ return Builder.CreateConstrainedFPCall(F, Ops, name);
+ else
+ return Builder.CreateCall(F, Ops, name);
}
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
@@ -4556,17 +4707,17 @@ enum {
};
namespace {
-struct NeonIntrinsicInfo {
+struct ARMVectorIntrinsicInfo {
const char *NameHint;
unsigned BuiltinID;
unsigned LLVMIntrinsic;
unsigned AltLLVMIntrinsic;
- unsigned TypeModifier;
+ uint64_t TypeModifier;
bool operator<(unsigned RHSBuiltinID) const {
return BuiltinID < RHSBuiltinID;
}
- bool operator<(const NeonIntrinsicInfo &TE) const {
+ bool operator<(const ARMVectorIntrinsicInfo &TE) const {
return BuiltinID < TE.BuiltinID;
}
};
@@ -4584,7 +4735,12 @@ struct NeonIntrinsicInfo {
Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
TypeModifier }
-static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
+static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
+ NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
+ NEONMAP0(splat_lane_v),
+ NEONMAP0(splat_laneq_v),
+ NEONMAP0(splatq_lane_v),
+ NEONMAP0(splatq_laneq_v),
NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP1(vabs_v, arm_neon_vabs, 0),
@@ -4594,6 +4750,11 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vaeseq_v, arm_neon_aese, 0),
NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
+ NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
+ NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
+ NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
+ NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
+ NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
@@ -4654,6 +4815,7 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
@@ -4752,6 +4914,7 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
@@ -4859,13 +5022,21 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP0(vtrnq_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
+ NEONMAP1(vusdot_v, arm_neon_usdot, 0),
+ NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
+ NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
NEONMAP0(vuzp_v),
NEONMAP0(vuzpq_v),
NEONMAP0(vzip_v),
NEONMAP0(vzipq_v)
};
-static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
+static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
+ NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
+ NEONMAP0(splat_lane_v),
+ NEONMAP0(splat_laneq_v),
+ NEONMAP0(splatq_lane_v),
+ NEONMAP0(splatq_laneq_v),
NEONMAP1(vabs_v, aarch64_neon_abs, 0),
NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
NEONMAP0(vaddhn_v),
@@ -4873,6 +5044,11 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
+ NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
+ NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
+ NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
+ NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
+ NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
@@ -4916,6 +5092,7 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvtq_f16_v),
NEONMAP0(vcvtq_f32_v),
+ NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
@@ -4950,6 +5127,7 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
+ NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
@@ -4964,14 +5142,22 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
+ NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
+ NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
+ NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
+ NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
+ NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
+ NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
+ NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
@@ -5024,9 +5210,12 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vsubhn_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
+ NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
+ NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
+ NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
};
-static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
+static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
@@ -5059,6 +5248,7 @@ static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
@@ -5256,24 +5446,42 @@ static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
#undef NEONMAP1
#undef NEONMAP2
+#define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
+ { \
+ #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
+ TypeModifier \
+ }
+
+#define SVEMAP2(NameBase, TypeModifier) \
+ { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
+static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
+#define GET_SVE_LLVM_INTRINSIC_MAP
+#include "clang/Basic/arm_sve_builtin_cg.inc"
+#undef GET_SVE_LLVM_INTRINSIC_MAP
+};
+
+#undef SVEMAP1
+#undef SVEMAP2
+
static bool NEONSIMDIntrinsicsProvenSorted = false;
static bool AArch64SIMDIntrinsicsProvenSorted = false;
static bool AArch64SISDIntrinsicsProvenSorted = false;
+static bool AArch64SVEIntrinsicsProvenSorted = false;
-
-static const NeonIntrinsicInfo *
-findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
- unsigned BuiltinID, bool &MapProvenSorted) {
+static const ARMVectorIntrinsicInfo *
+findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
+ unsigned BuiltinID, bool &MapProvenSorted) {
#ifndef NDEBUG
if (!MapProvenSorted) {
- assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)));
+ assert(llvm::is_sorted(IntrinsicMap));
MapProvenSorted = true;
}
#endif
- const NeonIntrinsicInfo *Builtin = llvm::lower_bound(IntrinsicMap, BuiltinID);
+ const ARMVectorIntrinsicInfo *Builtin =
+ llvm::lower_bound(IntrinsicMap, BuiltinID);
if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
return Builtin;
@@ -5296,7 +5504,7 @@ Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
if (Modifier & AddRetType) {
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
if (Modifier & VectorizeRetType)
- Ty = llvm::VectorType::get(
+ Ty = llvm::FixedVectorType::get(
Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
Tys.push_back(Ty);
@@ -5305,7 +5513,7 @@ Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
// Arguments.
if (Modifier & VectorizeArgTypes) {
int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
- ArgType = llvm::VectorType::get(ArgType, Elts);
+ ArgType = llvm::FixedVectorType::get(ArgType, Elts);
}
if (Modifier & (Add1ArgType | Add2ArgTypes))
@@ -5320,10 +5528,9 @@ Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
return CGM.getIntrinsic(IntrinsicID, Tys);
}
-static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
- const NeonIntrinsicInfo &SISDInfo,
- SmallVectorImpl<Value *> &Ops,
- const CallExpr *E) {
+static Value *EmitCommonNeonSISDBuiltinExpr(
+ CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
+ SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
unsigned BuiltinID = SISDInfo.BuiltinID;
unsigned int Int = SISDInfo.LLVMIntrinsic;
unsigned Modifier = SISDInfo.TypeModifier;
@@ -5368,8 +5575,8 @@ static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
// The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
// it before inserting.
- Ops[j] =
- CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
+ Ops[j] = CGF.Builder.CreateTruncOrBitCast(
+ Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
Ops[j] =
CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
}
@@ -5399,8 +5606,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
bool Usgn = Type.isUnsigned();
bool Quad = Type.isQuad();
const bool HasLegalHalfType = getTarget().hasLegalHalfType();
+ const bool AllowBFloatArgsAndRet =
+ getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
- llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType);
+ llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType, false,
+ AllowBFloatArgsAndRet);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -5415,6 +5625,19 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
switch (BuiltinID) {
default: break;
+ case NEON::BI__builtin_neon_splat_lane_v:
+ case NEON::BI__builtin_neon_splat_laneq_v:
+ case NEON::BI__builtin_neon_splatq_lane_v:
+ case NEON::BI__builtin_neon_splatq_laneq_v: {
+ auto NumElements = VTy->getElementCount();
+ if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
+ NumElements = NumElements * 2;
+ if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
+ NumElements = NumElements / 2;
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
+ return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
+ }
case NEON::BI__builtin_neon_vpadd_v:
case NEON::BI__builtin_neon_vpaddq_v:
// We don't allow fp/int overloading of intrinsics.
@@ -5467,7 +5690,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ty = HalfTy;
break;
}
- llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements());
+ auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
llvm::Type *Tys[] = { VTy, VecFlt };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, NameHint);
@@ -5614,7 +5837,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vext_v:
case NEON::BI__builtin_neon_vextq_v: {
int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(i+CV);
@@ -5624,13 +5847,14 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vfma_v:
case NEON::BI__builtin_neon_vfmaq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
- return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+ {Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
@@ -5644,7 +5868,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
@@ -5726,8 +5950,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- llvm::Type *NarrowTy =
- llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ auto *NarrowTy =
+ llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
}
@@ -5736,8 +5960,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- llvm::Type *NarrowTy =
- llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ auto *NarrowTy =
+ llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
}
@@ -5749,6 +5973,29 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops.resize(2);
return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
}
+ case NEON::BI__builtin_neon_vqdmulhq_lane_v:
+ case NEON::BI__builtin_neon_vqdmulh_lane_v:
+ case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
+ case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
+ auto *RTy = cast<llvm::VectorType>(Ty);
+ if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
+ BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
+ RTy = llvm::FixedVectorType::get(RTy->getElementType(),
+ RTy->getNumElements() * 2);
+ llvm::Type *Tys[2] = {
+ RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
+ /*isQuad*/ false))};
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
+ }
+ case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
+ case NEON::BI__builtin_neon_vqdmulh_laneq_v:
+ case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
+ case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
+ llvm::Type *Tys[2] = {
+ Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
+ /*isQuad*/ true))};
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
+ }
case NEON::BI__builtin_neon_vqshl_n_v:
case NEON::BI__builtin_neon_vqshlq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
@@ -5765,7 +6012,9 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrndi_v:
case NEON::BI__builtin_neon_vrndiq_v:
- Int = Intrinsic::nearbyint;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_nearbyint
+ : Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrshr_n_v:
case NEON::BI__builtin_neon_vrshrq_n_v:
@@ -5823,7 +6072,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vst1q_x3_v:
case NEON::BI__builtin_neon_vst1_x4_v:
case NEON::BI__builtin_neon_vst1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
// TODO: Currently in AArch32 mode the pointer operand comes first, whereas
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
@@ -5860,7 +6109,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(i+vi);
Indices.push_back(i+e+vi);
@@ -5888,7 +6137,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(2*i+vi);
@@ -5906,7 +6155,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back((i + vi*e) >> 1);
Indices.push_back(((i + vi*e) >> 1)+e);
@@ -5919,40 +6168,91 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vdot_v:
case NEON::BI__builtin_neon_vdotq_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ auto *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
}
case NEON::BI__builtin_neon_vfmlal_low_v:
case NEON::BI__builtin_neon_vfmlalq_low_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ auto *InputTy =
+ llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
}
case NEON::BI__builtin_neon_vfmlsl_low_v:
case NEON::BI__builtin_neon_vfmlslq_low_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ auto *InputTy =
+ llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
}
case NEON::BI__builtin_neon_vfmlal_high_v:
case NEON::BI__builtin_neon_vfmlalq_high_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ auto *InputTy =
+ llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
}
case NEON::BI__builtin_neon_vfmlsl_high_v:
case NEON::BI__builtin_neon_vfmlslq_high_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ auto *InputTy =
+ llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
}
+ case NEON::BI__builtin_neon_vmmlaq_v: {
+ auto *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
+ }
+ case NEON::BI__builtin_neon_vusmmlaq_v: {
+ auto *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
+ }
+ case NEON::BI__builtin_neon_vusdot_v:
+ case NEON::BI__builtin_neon_vusdotq_v: {
+ auto *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
+ }
+ case NEON::BI__builtin_neon_vbfdot_v:
+ case NEON::BI__builtin_neon_vbfdotq_v: {
+ llvm::Type *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
+ }
+ case NEON::BI__builtin_neon_vbfmmlaq_v: {
+ llvm::Type *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmmla");
+ }
+ case NEON::BI__builtin_neon_vbfmlalbq_v: {
+ llvm::Type *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmlalb");
+ }
+ case NEON::BI__builtin_neon_vbfmlaltq_v: {
+ llvm::Type *InputTy =
+ llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfmlalt");
+ }
+ case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
+ llvm::Type *Tys[1] = { Ty };
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvtfp2bf");
+ }
+
}
assert(Int && "Expected valid intrinsic number");
@@ -5997,7 +6297,7 @@ static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
TblOps.push_back(ExtOp);
// Build a vector containing sequential number like (0, 1, 2, ..., 15)
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
Indices.push_back(2*i);
@@ -6061,6 +6361,12 @@ Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
llvm::ConstantInt::get(Int32Ty, Value));
}
+enum SpecialRegisterAccessKind {
+ NormalRead,
+ VolatileRead,
+ Write,
+};
+
// Generates the IR for the read/write special register builtin,
// ValueType is the type of the value that is to be written or read,
// RegisterType is the type of the register being written to or read from.
@@ -6068,7 +6374,7 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
llvm::Type *RegisterType,
llvm::Type *ValueType,
- bool IsRead,
+ SpecialRegisterAccessKind AccessKind,
StringRef SysReg = "") {
// write and register intrinsics only support 32 and 64 bit operations.
assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
@@ -6093,8 +6399,12 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
&& "Can't fit 64-bit value in 32-bit register");
- if (IsRead) {
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ if (AccessKind != Write) {
+ assert(AccessKind == NormalRead || AccessKind == VolatileRead);
+ llvm::Function *F = CGM.getIntrinsic(
+ AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
+ : llvm::Intrinsic::read_register,
+ Types);
llvm::Value *Call = Builder.CreateCall(F, Metadata);
if (MixedTypes)
@@ -6132,21 +6442,27 @@ static bool HasExtraNeonArgument(unsigned BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
+ case NEON::BI__builtin_neon_vget_lane_bf16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
+ case NEON::BI__builtin_neon_vgetq_lane_bf16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vgetq_lane_f32:
+ case NEON::BI__builtin_neon_vduph_lane_bf16:
+ case NEON::BI__builtin_neon_vduph_laneq_bf16:
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
+ case NEON::BI__builtin_neon_vset_lane_bf16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
+ case NEON::BI__builtin_neon_vsetq_lane_bf16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_f32:
@@ -6154,6 +6470,7 @@ static bool HasExtraNeonArgument(unsigned BuiltinID) {
case NEON::BI__builtin_neon_vsha1cq_u32:
case NEON::BI__builtin_neon_vsha1pq_u32:
case NEON::BI__builtin_neon_vsha1mq_u32:
+ case NEON::BI__builtin_neon_vcvth_bf16_f32:
case clang::ARM::BI_MoveToCoprocessor:
case clang::ARM::BI_MoveToCoprocessor2:
return false;
@@ -6466,9 +6783,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
BuiltinID == ARM::BI__builtin_arm_wsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsrp) {
- bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsrp;
+ SpecialRegisterAccessKind AccessKind = Write;
+ if (BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp)
+ AccessKind = VolatileRead;
bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsrp;
@@ -6487,12 +6806,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
ValueType = RegisterType = Int32Ty;
}
- return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
+ return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
+ AccessKind);
}
// Deal with MVE builtins
if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
return Result;
+ // Handle CDE builtins
+ if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
+ return Result;
// Find out if any arguments are required to be integer constant
// expressions.
@@ -6589,12 +6912,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
+ case NEON::BI__builtin_neon_vget_lane_bf16:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
+ case NEON::BI__builtin_neon_vgetq_lane_bf16:
case NEON::BI__builtin_neon_vgetq_lane_f32:
+ case NEON::BI__builtin_neon_vduph_lane_bf16:
+ case NEON::BI__builtin_neon_vduph_laneq_bf16:
return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
case NEON::BI__builtin_neon_vrndns_f32: {
@@ -6607,11 +6934,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
+ case NEON::BI__builtin_neon_vset_lane_bf16:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
+ case NEON::BI__builtin_neon_vsetq_lane_bf16:
case NEON::BI__builtin_neon_vsetq_lane_f32:
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
@@ -6628,6 +6957,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
"vsha1h");
+ case NEON::BI__builtin_neon_vcvth_bf16_f32: {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
+ "vcvtbfp2bf");
+ }
+
// The ARM _MoveToCoprocessor builtins put the input register value as
// the first argument, but the LLVM intrinsic expects it as the third one.
case ARM::BI_MoveToCoprocessor:
@@ -6807,7 +7141,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
bool rightShift = false;
llvm::VectorType *VTy = GetNeonType(this, Type,
- getTarget().hasLegalHalfType());
+ getTarget().hasLegalHalfType(),
+ false,
+ getTarget().hasBFloat16Type());
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -6815,7 +7151,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
- const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
+ const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
@@ -6831,19 +7167,18 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (VTy->getElementType()->isIntegerTy(64)) {
// Extract the other lane.
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
+ int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
// Load the value as a one-element vector.
- Ty = llvm::VectorType::get(VTy->getElementType(), 1);
+ Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
Value *Align = getAlignmentValue32(PtrOp0);
Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
// Combine them.
- uint32_t Indices[] = {1 - Lane, Lane};
- SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
- return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
+ int Indices[] = {1 - Lane, Lane};
+ return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
}
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vld1_lane_v: {
@@ -6966,8 +7301,9 @@ static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
// equal to the lane size. In LLVM IR, an LShr with that parameter would be
// undefined behavior, but in MVE it's legal, so we must convert it to code
// that is not undefined in IR.
- unsigned LaneBits =
- V->getType()->getVectorElementType()->getPrimitiveSizeInBits();
+ unsigned LaneBits = cast<llvm::VectorType>(V->getType())
+ ->getElementType()
+ ->getPrimitiveSizeInBits();
if (Shift == LaneBits) {
// An unsigned shift of the full lane size always generates zero, so we can
// simply emit a zero vector. A signed shift of the full lane size does the
@@ -6988,6 +7324,86 @@ static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
return Builder.CreateVectorSplat(Elements, V);
}
+static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
+ CodeGenFunction *CGF,
+ llvm::Value *V,
+ llvm::Type *DestType) {
+ // Convert one MVE vector type into another by reinterpreting its in-register
+ // format.
+ //
+ // Little-endian, this is identical to a bitcast (which reinterprets the
+ // memory format). But big-endian, they're not necessarily the same, because
+ // the register and memory formats map to each other differently depending on
+ // the lane size.
+ //
+ // We generate a bitcast whenever we can (if we're little-endian, or if the
+ // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
+ // that performs the different kind of reinterpretation.
+ if (CGF->getTarget().isBigEndian() &&
+ V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
+ return Builder.CreateCall(
+ CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
+ {DestType, V->getType()}),
+ V);
+ } else {
+ return Builder.CreateBitCast(V, DestType);
+ }
+}
+
+static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
+ // Make a shufflevector that extracts every other element of a vector (evens
+ // or odds, as desired).
+ SmallVector<int, 16> Indices;
+ unsigned InputElements =
+ cast<llvm::VectorType>(V->getType())->getNumElements();
+ for (unsigned i = 0; i < InputElements; i += 2)
+ Indices.push_back(i + Odd);
+ return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
+ Indices);
+}
+
+static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
+ llvm::Value *V1) {
+ // Make a shufflevector that interleaves two vectors element by element.
+ assert(V0->getType() == V1->getType() && "Can't zip different vector types");
+ SmallVector<int, 16> Indices;
+ unsigned InputElements =
+ cast<llvm::VectorType>(V0->getType())->getNumElements();
+ for (unsigned i = 0; i < InputElements; i++) {
+ Indices.push_back(i);
+ Indices.push_back(i + InputElements);
+ }
+ return Builder.CreateShuffleVector(V0, V1, Indices);
+}
+
+template<unsigned HighBit, unsigned OtherBits>
+static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
+ // MVE-specific helper function to make a vector splat of a constant such as
+ // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
+ llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
+ unsigned LaneBits = T->getPrimitiveSizeInBits();
+ uint32_t Value = HighBit << (LaneBits - 1);
+ if (OtherBits)
+ Value |= (1UL << (LaneBits - 1)) - 1;
+ llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
+ return ARMMVEVectorSplat(Builder, Lane);
+}
+
+static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
+ llvm::Value *V,
+ unsigned ReverseWidth) {
+ // MVE-specific helper function which reverses the elements of a
+ // vector within every (ReverseWidth)-bit collection of lanes.
+ SmallVector<int, 16> Indices;
+ unsigned LaneSize = V->getType()->getScalarSizeInBits();
+ unsigned Elements = 128 / LaneSize;
+ unsigned Mask = ReverseWidth / LaneSize - 1;
+ for (unsigned i = 0; i < Elements; i++)
+ Indices.push_back(i ^ Mask);
+ return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
+ Indices);
+}
+
Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue,
@@ -7089,6 +7505,17 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
llvm_unreachable("unknown custom codegen type.");
}
+Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E,
+ ReturnValueSlot ReturnValue,
+ llvm::Triple::ArchType Arch) {
+ switch (BuiltinID) {
+ default:
+ return nullptr;
+#include "clang/Basic/arm_cde_builtin_cg.inc"
+ }
+}
+
static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E,
SmallVectorImpl<Value *> &Ops,
@@ -7238,7 +7665,7 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
}
Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
- llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
+ auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
Op = Builder.CreateBitCast(Op, Int16Ty);
Value *V = UndefValue::get(VTy);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
@@ -7246,9 +7673,840 @@ Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
return Op;
}
+/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
+/// access builtin. Only required if it can't be inferred from the base pointer
+/// operand.
+llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
+ switch (TypeFlags.getMemEltType()) {
+ case SVETypeFlags::MemEltTyDefault:
+ return getEltType(TypeFlags);
+ case SVETypeFlags::MemEltTyInt8:
+ return Builder.getInt8Ty();
+ case SVETypeFlags::MemEltTyInt16:
+ return Builder.getInt16Ty();
+ case SVETypeFlags::MemEltTyInt32:
+ return Builder.getInt32Ty();
+ case SVETypeFlags::MemEltTyInt64:
+ return Builder.getInt64Ty();
+ }
+ llvm_unreachable("Unknown MemEltType");
+}
+
+llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
+ switch (TypeFlags.getEltType()) {
+ default:
+ llvm_unreachable("Invalid SVETypeFlag!");
+
+ case SVETypeFlags::EltTyInt8:
+ return Builder.getInt8Ty();
+ case SVETypeFlags::EltTyInt16:
+ return Builder.getInt16Ty();
+ case SVETypeFlags::EltTyInt32:
+ return Builder.getInt32Ty();
+ case SVETypeFlags::EltTyInt64:
+ return Builder.getInt64Ty();
+
+ case SVETypeFlags::EltTyFloat16:
+ return Builder.getHalfTy();
+ case SVETypeFlags::EltTyFloat32:
+ return Builder.getFloatTy();
+ case SVETypeFlags::EltTyFloat64:
+ return Builder.getDoubleTy();
+
+ case SVETypeFlags::EltTyBFloat16:
+ return Builder.getBFloatTy();
+
+ case SVETypeFlags::EltTyBool8:
+ case SVETypeFlags::EltTyBool16:
+ case SVETypeFlags::EltTyBool32:
+ case SVETypeFlags::EltTyBool64:
+ return Builder.getInt1Ty();
+ }
+}
+
+// Return the llvm predicate vector type corresponding to the specified element
+// TypeFlags.
+llvm::ScalableVectorType *
+CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
+ switch (TypeFlags.getEltType()) {
+ default: llvm_unreachable("Unhandled SVETypeFlag!");
+
+ case SVETypeFlags::EltTyInt8:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ case SVETypeFlags::EltTyInt16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyInt32:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
+ case SVETypeFlags::EltTyInt64:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
+
+ case SVETypeFlags::EltTyBFloat16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyFloat16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyFloat32:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
+ case SVETypeFlags::EltTyFloat64:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
+
+ case SVETypeFlags::EltTyBool8:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ case SVETypeFlags::EltTyBool16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyBool32:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
+ case SVETypeFlags::EltTyBool64:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
+ }
+}
+
+// Return the llvm vector type corresponding to the specified element TypeFlags.
+llvm::ScalableVectorType *
+CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
+ switch (TypeFlags.getEltType()) {
+ default:
+ llvm_unreachable("Invalid SVETypeFlag!");
+
+ case SVETypeFlags::EltTyInt8:
+ return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
+ case SVETypeFlags::EltTyInt16:
+ return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
+ case SVETypeFlags::EltTyInt32:
+ return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
+ case SVETypeFlags::EltTyInt64:
+ return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
+
+ case SVETypeFlags::EltTyFloat16:
+ return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
+ case SVETypeFlags::EltTyBFloat16:
+ return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
+ case SVETypeFlags::EltTyFloat32:
+ return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
+ case SVETypeFlags::EltTyFloat64:
+ return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
+
+ case SVETypeFlags::EltTyBool8:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ case SVETypeFlags::EltTyBool16:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
+ case SVETypeFlags::EltTyBool32:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
+ case SVETypeFlags::EltTyBool64:
+ return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
+ }
+}
+
+llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
+ Function *Ptrue =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
+ return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
+}
+
+constexpr unsigned SVEBitsPerBlock = 128;
+
+static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
+ unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
+ return llvm::ScalableVectorType::get(EltTy, NumElts);
+}
+
+// Reinterpret the input predicate so that it can be used to correctly isolate
+// the elements of the specified datatype.
+Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
+ llvm::ScalableVectorType *VTy) {
+ auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
+ if (Pred->getType() == RTy)
+ return Pred;
+
+ unsigned IntID;
+ llvm::Type *IntrinsicTy;
+ switch (VTy->getMinNumElements()) {
+ default:
+ llvm_unreachable("unsupported element count!");
+ case 2:
+ case 4:
+ case 8:
+ IntID = Intrinsic::aarch64_sve_convert_from_svbool;
+ IntrinsicTy = RTy;
+ break;
+ case 16:
+ IntID = Intrinsic::aarch64_sve_convert_to_svbool;
+ IntrinsicTy = Pred->getType();
+ break;
+ }
+
+ Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
+ Value *C = Builder.CreateCall(F, Pred);
+ assert(C->getType() == RTy && "Unexpected return type!");
+ return C;
+}
+
+Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ auto *ResultTy = getSVEType(TypeFlags);
+ auto *OverloadedTy =
+ llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
+
+ // At the ACLE level there's only one predicate type, svbool_t, which is
+ // mapped to <n x 16 x i1>. However, this might be incompatible with the
+ // actual type being loaded. For example, when loading doubles (i64) the
+ // predicated should be <n x 2 x i1> instead. At the IR level the type of
+ // the predicate and the data being loaded must match. Cast accordingly.
+ Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
+
+ Function *F = nullptr;
+ if (Ops[1]->getType()->isVectorTy())
+ // This is the "vector base, scalar offset" case. In order to uniquely
+ // map this built-in to an LLVM IR intrinsic, we need both the return type
+ // and the type of the vector base.
+ F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
+ else
+ // This is the "scalar base, vector offset case". The type of the offset
+ // is encoded in the name of the intrinsic. We only need to specify the
+ // return type in order to uniquely map this built-in to an LLVM IR
+ // intrinsic.
+ F = CGM.getIntrinsic(IntID, OverloadedTy);
+
+ // Pass 0 when the offset is missing. This can only be applied when using
+ // the "vector base" addressing mode for which ACLE allows no offset. The
+ // corresponding LLVM IR always requires an offset.
+ if (Ops.size() == 2) {
+ assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
+ Ops.push_back(ConstantInt::get(Int64Ty, 0));
+ }
+
+ // For "vector base, scalar index" scale the index so that it becomes a
+ // scalar offset.
+ if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
+ unsigned BytesPerElt =
+ OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
+ Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
+ Ops[2] = Builder.CreateMul(Ops[2], Scale);
+ }
+
+ Value *Call = Builder.CreateCall(F, Ops);
+
+ // The following sext/zext is only needed when ResultTy != OverloadedTy. In
+ // other cases it's folded into a nop.
+ return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
+ : Builder.CreateSExt(Call, ResultTy);
+}
+
+Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ auto *SrcDataTy = getSVEType(TypeFlags);
+ auto *OverloadedTy =
+ llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
+
+ // In ACLE the source data is passed in the last argument, whereas in LLVM IR
+ // it's the first argument. Move it accordingly.
+ Ops.insert(Ops.begin(), Ops.pop_back_val());
+
+ Function *F = nullptr;
+ if (Ops[2]->getType()->isVectorTy())
+ // This is the "vector base, scalar offset" case. In order to uniquely
+ // map this built-in to an LLVM IR intrinsic, we need both the return type
+ // and the type of the vector base.
+ F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
+ else
+ // This is the "scalar base, vector offset case". The type of the offset
+ // is encoded in the name of the intrinsic. We only need to specify the
+ // return type in order to uniquely map this built-in to an LLVM IR
+ // intrinsic.
+ F = CGM.getIntrinsic(IntID, OverloadedTy);
+
+ // Pass 0 when the offset is missing. This can only be applied when using
+ // the "vector base" addressing mode for which ACLE allows no offset. The
+ // corresponding LLVM IR always requires an offset.
+ if (Ops.size() == 3) {
+ assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
+ Ops.push_back(ConstantInt::get(Int64Ty, 0));
+ }
+
+ // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
+ // folded into a nop.
+ Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
+
+ // At the ACLE level there's only one predicate type, svbool_t, which is
+ // mapped to <n x 16 x i1>. However, this might be incompatible with the
+ // actual type being stored. For example, when storing doubles (i64) the
+ // predicated should be <n x 2 x i1> instead. At the IR level the type of
+ // the predicate and the data being stored must match. Cast accordingly.
+ Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
+
+ // For "vector base, scalar index" scale the index so that it becomes a
+ // scalar offset.
+ if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
+ unsigned BytesPerElt =
+ OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
+ Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
+ Ops[3] = Builder.CreateMul(Ops[3], Scale);
+ }
+
+ return Builder.CreateCall(F, Ops);
+}
+
+Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ // The gather prefetches are overloaded on the vector input - this can either
+ // be the vector of base addresses or vector of offsets.
+ auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
+ if (!OverloadedTy)
+ OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
+
+ // Cast the predicate from svbool_t to the right number of elements.
+ Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
+
+ // vector + imm addressing modes
+ if (Ops[1]->getType()->isVectorTy()) {
+ if (Ops.size() == 3) {
+ // Pass 0 for 'vector+imm' when the index is omitted.
+ Ops.push_back(ConstantInt::get(Int64Ty, 0));
+
+ // The sv_prfop is the last operand in the builtin and IR intrinsic.
+ std::swap(Ops[2], Ops[3]);
+ } else {
+ // Index needs to be passed as scaled offset.
+ llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
+ unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
+ Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
+ Ops[2] = Builder.CreateMul(Ops[2], Scale);
+ }
+ }
+
+ Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
+ return Builder.CreateCall(F, Ops);
+}
+
+Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value*> &Ops,
+ unsigned IntID) {
+ llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
+ auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
+ auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
+
+ unsigned N;
+ switch (IntID) {
+ case Intrinsic::aarch64_sve_ld2:
+ N = 2;
+ break;
+ case Intrinsic::aarch64_sve_ld3:
+ N = 3;
+ break;
+ case Intrinsic::aarch64_sve_ld4:
+ N = 4;
+ break;
+ default:
+ llvm_unreachable("unknown intrinsic!");
+ }
+ auto RetTy = llvm::VectorType::get(VTy->getElementType(),
+ VTy->getElementCount() * N);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
+ Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
+ Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
+ BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
+ BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
+
+ Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
+ return Builder.CreateCall(F, { Predicate, BasePtr });
+}
+
+Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value*> &Ops,
+ unsigned IntID) {
+ llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
+ auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
+ auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
+
+ unsigned N;
+ switch (IntID) {
+ case Intrinsic::aarch64_sve_st2:
+ N = 2;
+ break;
+ case Intrinsic::aarch64_sve_st3:
+ N = 3;
+ break;
+ case Intrinsic::aarch64_sve_st4:
+ N = 4;
+ break;
+ default:
+ llvm_unreachable("unknown intrinsic!");
+ }
+ auto TupleTy =
+ llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
+ Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
+ Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
+ Value *Val = Ops.back();
+ BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
+ BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
+
+ // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
+ // need to break up the tuple vector.
+ SmallVector<llvm::Value*, 5> Operands;
+ Function *FExtr =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
+ for (unsigned I = 0; I < N; ++I)
+ Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
+ Operands.append({Predicate, BasePtr});
+
+ Function *F = CGM.getIntrinsic(IntID, { VTy });
+ return Builder.CreateCall(F, Operands);
+}
+
+// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
+// svpmullt_pair intrinsics, with the exception that their results are bitcast
+// to a wider type.
+Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned BuiltinID) {
+ // Splat scalar operand to vector (intrinsics with _n infix)
+ if (TypeFlags.hasSplatOperand()) {
+ unsigned OpNo = TypeFlags.getSplatOperand();
+ Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
+ }
+
+ // The pair-wise function has a narrower overloaded type.
+ Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
+ Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
+
+ // Now bitcast to the wider result type.
+ llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
+ return EmitSVEReinterpret(Call, Ty);
+}
+
+Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags,
+ ArrayRef<Value *> Ops, unsigned BuiltinID) {
+ llvm::Type *OverloadedTy = getSVEType(TypeFlags);
+ Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
+ return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
+}
+
+Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned BuiltinID) {
+ auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
+ auto *VectorTy = getSVEVectorForElementType(MemEltTy);
+ auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
+ Value *BasePtr = Ops[1];
+
+ // Implement the index operand if not omitted.
+ if (Ops.size() > 3) {
+ BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
+ BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
+ }
+
+ // Prefetch intriniscs always expect an i8*
+ BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
+ Value *PrfOp = Ops.back();
+
+ Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
+ return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
+}
+
+Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
+ llvm::Type *ReturnTy,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned BuiltinID,
+ bool IsZExtReturn) {
+ QualType LangPTy = E->getArg(1)->getType();
+ llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
+ LangPTy->getAs<PointerType>()->getPointeeType());
+
+ // The vector type that is returned may be different from the
+ // eventual type loaded from memory.
+ auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
+ auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
+ Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
+ Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
+ BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
+
+ BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
+ Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
+ Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
+
+ return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
+ : Builder.CreateSExt(Load, VectorTy);
+}
+
+Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned BuiltinID) {
+ QualType LangPTy = E->getArg(1)->getType();
+ llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
+ LangPTy->getAs<PointerType>()->getPointeeType());
+
+ // The vector type that is stored may be different from the
+ // eventual type stored to memory.
+ auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
+ auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
+
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
+ Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
+ Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
+ BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
+
+ // Last value is always the data
+ llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
+
+ BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
+ Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
+ return Builder.CreateCall(F, {Val, Predicate, BasePtr});
+}
+
+// Limit the usage of scalable llvm IR generated by the ACLE by using the
+// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
+Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
+ auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
+ return Builder.CreateCall(F, Scalar);
+}
+
+Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
+ return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
+}
+
+Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
+ // FIXME: For big endian this needs an additional REV, or needs a separate
+ // intrinsic that is code-generated as a no-op, because the LLVM bitcast
+ // instruction is defined as 'bitwise' equivalent from memory point of
+ // view (when storing/reloading), whereas the svreinterpret builtin
+ // implements bitwise equivalent cast from register point of view.
+ // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
+ return Builder.CreateBitCast(Val, Ty);
+}
+
+static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
+ SmallVectorImpl<Value *> &Ops) {
+ auto *SplatZero = Constant::getNullValue(Ty);
+ Ops.insert(Ops.begin(), SplatZero);
+}
+
+static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
+ SmallVectorImpl<Value *> &Ops) {
+ auto *SplatUndef = UndefValue::get(Ty);
+ Ops.insert(Ops.begin(), SplatUndef);
+}
+
+SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
+ SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) {
+ if (TypeFlags.isOverloadNone())
+ return {};
+
+ llvm::Type *DefaultType = getSVEType(TypeFlags);
+
+ if (TypeFlags.isOverloadWhile())
+ return {DefaultType, Ops[1]->getType()};
+
+ if (TypeFlags.isOverloadWhileRW())
+ return {getSVEPredType(TypeFlags), Ops[0]->getType()};
+
+ if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
+ return {Ops[0]->getType(), Ops.back()->getType()};
+
+ if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
+ return {ResultType, Ops[0]->getType()};
+
+ assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
+ return {DefaultType};
+}
+
+Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ llvm::Type *Ty = ConvertType(E->getType());
+ if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
+ BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ return EmitSVEReinterpret(Val, Ty);
+ }
+
+ llvm::SmallVector<Value *, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ if ((ICEArguments & (1 << i)) == 0)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ else {
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ if (!E->getArg(i)->isIntegerConstantExpr(Result, getContext()))
+ llvm_unreachable("Expected argument to be a constant");
+
+ // Immediates for SVE llvm intrinsics are always 32bit. We can safely
+ // truncate because the immediate has been range checked and no valid
+ // immediate requires more than a handful of bits.
+ Result = Result.extOrTrunc(32);
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
+ }
+
+ auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
+ AArch64SVEIntrinsicsProvenSorted);
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ if (TypeFlags.isLoad())
+ return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
+ TypeFlags.isZExtReturn());
+ else if (TypeFlags.isStore())
+ return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isGatherLoad())
+ return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isScatterStore())
+ return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isPrefetch())
+ return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isGatherPrefetch())
+ return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isStructLoad())
+ return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isStructStore())
+ return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isUndef())
+ return UndefValue::get(Ty);
+ else if (Builtin->LLVMIntrinsic != 0) {
+ if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
+ InsertExplicitZeroOperand(Builder, Ty, Ops);
+
+ if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
+ InsertExplicitUndefOperand(Builder, Ty, Ops);
+
+ // Some ACLE builtins leave out the argument to specify the predicate
+ // pattern, which is expected to be expanded to an SV_ALL pattern.
+ if (TypeFlags.isAppendSVALL())
+ Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
+ if (TypeFlags.isInsertOp1SVALL())
+ Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
+
+ // Predicates must match the main datatype.
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
+ if (PredTy->getElementType()->isIntegerTy(1))
+ Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
+
+ // Splat scalar operand to vector (intrinsics with _n infix)
+ if (TypeFlags.hasSplatOperand()) {
+ unsigned OpNo = TypeFlags.getSplatOperand();
+ Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
+ }
+
+ if (TypeFlags.isReverseCompare())
+ std::swap(Ops[1], Ops[2]);
+
+ if (TypeFlags.isReverseUSDOT())
+ std::swap(Ops[1], Ops[2]);
+
+ // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
+ if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
+ llvm::Type *OpndTy = Ops[1]->getType();
+ auto *SplatZero = Constant::getNullValue(OpndTy);
+ Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
+ Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
+ }
+
+ Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
+ getSVEOverloadTypes(TypeFlags, Ty, Ops));
+ Value *Call = Builder.CreateCall(F, Ops);
+
+ // Predicate results must be converted to svbool_t.
+ if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
+ if (PredTy->getScalarType()->isIntegerTy(1))
+ Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
+
+ return Call;
+ }
+
+ switch (BuiltinID) {
+ default:
+ return nullptr;
+
+ case SVE::BI__builtin_sve_svmov_b_z: {
+ // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ llvm::Type* OverloadedTy = getSVEType(TypeFlags);
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
+ return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
+ }
+
+ case SVE::BI__builtin_sve_svnot_b_z: {
+ // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ llvm::Type* OverloadedTy = getSVEType(TypeFlags);
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
+ return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
+ }
+
+ case SVE::BI__builtin_sve_svmovlb_u16:
+ case SVE::BI__builtin_sve_svmovlb_u32:
+ case SVE::BI__builtin_sve_svmovlb_u64:
+ return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
+
+ case SVE::BI__builtin_sve_svmovlb_s16:
+ case SVE::BI__builtin_sve_svmovlb_s32:
+ case SVE::BI__builtin_sve_svmovlb_s64:
+ return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
+
+ case SVE::BI__builtin_sve_svmovlt_u16:
+ case SVE::BI__builtin_sve_svmovlt_u32:
+ case SVE::BI__builtin_sve_svmovlt_u64:
+ return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
+
+ case SVE::BI__builtin_sve_svmovlt_s16:
+ case SVE::BI__builtin_sve_svmovlt_s32:
+ case SVE::BI__builtin_sve_svmovlt_s64:
+ return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
+
+ case SVE::BI__builtin_sve_svpmullt_u16:
+ case SVE::BI__builtin_sve_svpmullt_u64:
+ case SVE::BI__builtin_sve_svpmullt_n_u16:
+ case SVE::BI__builtin_sve_svpmullt_n_u64:
+ return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
+
+ case SVE::BI__builtin_sve_svpmullb_u16:
+ case SVE::BI__builtin_sve_svpmullb_u64:
+ case SVE::BI__builtin_sve_svpmullb_n_u16:
+ case SVE::BI__builtin_sve_svpmullb_n_u64:
+ return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
+
+ case SVE::BI__builtin_sve_svdup_n_b8:
+ case SVE::BI__builtin_sve_svdup_n_b16:
+ case SVE::BI__builtin_sve_svdup_n_b32:
+ case SVE::BI__builtin_sve_svdup_n_b64: {
+ Value *CmpNE =
+ Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
+ llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
+ Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
+ return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
+ }
+
+ case SVE::BI__builtin_sve_svdupq_n_b8:
+ case SVE::BI__builtin_sve_svdupq_n_b16:
+ case SVE::BI__builtin_sve_svdupq_n_b32:
+ case SVE::BI__builtin_sve_svdupq_n_b64:
+ case SVE::BI__builtin_sve_svdupq_n_u8:
+ case SVE::BI__builtin_sve_svdupq_n_s8:
+ case SVE::BI__builtin_sve_svdupq_n_u64:
+ case SVE::BI__builtin_sve_svdupq_n_f64:
+ case SVE::BI__builtin_sve_svdupq_n_s64:
+ case SVE::BI__builtin_sve_svdupq_n_u16:
+ case SVE::BI__builtin_sve_svdupq_n_f16:
+ case SVE::BI__builtin_sve_svdupq_n_bf16:
+ case SVE::BI__builtin_sve_svdupq_n_s16:
+ case SVE::BI__builtin_sve_svdupq_n_u32:
+ case SVE::BI__builtin_sve_svdupq_n_f32:
+ case SVE::BI__builtin_sve_svdupq_n_s32: {
+ // These builtins are implemented by storing each element to an array and using
+ // ld1rq to materialize a vector.
+ unsigned NumOpnds = Ops.size();
+
+ bool IsBoolTy =
+ cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
+
+ // For svdupq_n_b* the element type of is an integer of type 128/numelts,
+ // so that the compare can use the width that is natural for the expected
+ // number of predicate lanes.
+ llvm::Type *EltTy = Ops[0]->getType();
+ if (IsBoolTy)
+ EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
+
+ Address Alloca = CreateTempAlloca(llvm::ArrayType::get(EltTy, NumOpnds),
+ CharUnits::fromQuantity(16));
+ for (unsigned I = 0; I < NumOpnds; ++I)
+ Builder.CreateDefaultAlignedStore(
+ IsBoolTy ? Builder.CreateZExt(Ops[I], EltTy) : Ops[I],
+ Builder.CreateGEP(Alloca.getPointer(),
+ {Builder.getInt64(0), Builder.getInt64(I)}));
+
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ Value *Pred = EmitSVEAllTruePred(TypeFlags);
+
+ llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_ld1rq, OverloadedTy);
+ Value *Alloca0 = Builder.CreateGEP(
+ Alloca.getPointer(), {Builder.getInt64(0), Builder.getInt64(0)});
+ Value *LD1RQ = Builder.CreateCall(F, {Pred, Alloca0});
+
+ if (!IsBoolTy)
+ return LD1RQ;
+
+ // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
+ F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
+ : Intrinsic::aarch64_sve_cmpne_wide,
+ OverloadedTy);
+ Value *Call =
+ Builder.CreateCall(F, {Pred, LD1RQ, EmitSVEDupX(Builder.getInt64(0))});
+ return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
+ }
+
+ case SVE::BI__builtin_sve_svpfalse_b:
+ return ConstantInt::getFalse(Ty);
+
+ case SVE::BI__builtin_sve_svlen_bf16:
+ case SVE::BI__builtin_sve_svlen_f16:
+ case SVE::BI__builtin_sve_svlen_f32:
+ case SVE::BI__builtin_sve_svlen_f64:
+ case SVE::BI__builtin_sve_svlen_s8:
+ case SVE::BI__builtin_sve_svlen_s16:
+ case SVE::BI__builtin_sve_svlen_s32:
+ case SVE::BI__builtin_sve_svlen_s64:
+ case SVE::BI__builtin_sve_svlen_u8:
+ case SVE::BI__builtin_sve_svlen_u16:
+ case SVE::BI__builtin_sve_svlen_u32:
+ case SVE::BI__builtin_sve_svlen_u64: {
+ SVETypeFlags TF(Builtin->TypeModifier);
+ auto VTy = cast<llvm::VectorType>(getSVEType(TF));
+ auto NumEls = llvm::ConstantInt::get(Ty, VTy->getElementCount().Min);
+
+ Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
+ return Builder.CreateMul(NumEls, Builder.CreateCall(F));
+ }
+
+ case SVE::BI__builtin_sve_svtbl2_u8:
+ case SVE::BI__builtin_sve_svtbl2_s8:
+ case SVE::BI__builtin_sve_svtbl2_u16:
+ case SVE::BI__builtin_sve_svtbl2_s16:
+ case SVE::BI__builtin_sve_svtbl2_u32:
+ case SVE::BI__builtin_sve_svtbl2_s32:
+ case SVE::BI__builtin_sve_svtbl2_u64:
+ case SVE::BI__builtin_sve_svtbl2_s64:
+ case SVE::BI__builtin_sve_svtbl2_f16:
+ case SVE::BI__builtin_sve_svtbl2_bf16:
+ case SVE::BI__builtin_sve_svtbl2_f32:
+ case SVE::BI__builtin_sve_svtbl2_f64: {
+ SVETypeFlags TF(Builtin->TypeModifier);
+ auto VTy = cast<llvm::VectorType>(getSVEType(TF));
+ auto TupleTy = llvm::VectorType::get(VTy->getElementType(),
+ VTy->getElementCount() * 2);
+ Function *FExtr =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
+ Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
+ Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
+ return Builder.CreateCall(F, {V0, V1, Ops[1]});
+ }
+ }
+
+ /// Should not happen
+ return nullptr;
+}
+
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
+ if (BuiltinID >= AArch64::FirstSVEBuiltin &&
+ BuiltinID <= AArch64::LastSVEBuiltin)
+ return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
+
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
@@ -7589,9 +8847,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
BuiltinID == AArch64::BI__builtin_arm_wsrp) {
- bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp;
+ SpecialRegisterAccessKind AccessKind = Write;
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp)
+ AccessKind = VolatileRead;
bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsrp;
@@ -7609,7 +8869,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
ValueType = Int32Ty;
}
- return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
+ return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
+ AccessKind);
}
if (BuiltinID == AArch64::BI_ReadStatusReg ||
@@ -7665,7 +8926,27 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
assert(Error == ASTContext::GE_None && "Should not codegen an error");
llvm::SmallVector<Value*, 4> Ops;
+ Address PtrOp0 = Address::invalid();
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
+ if (i == 0) {
+ switch (BuiltinID) {
+ case NEON::BI__builtin_neon_vld1_v:
+ case NEON::BI__builtin_neon_vld1q_v:
+ case NEON::BI__builtin_neon_vld1_dup_v:
+ case NEON::BI__builtin_neon_vld1q_dup_v:
+ case NEON::BI__builtin_neon_vld1_lane_v:
+ case NEON::BI__builtin_neon_vld1q_lane_v:
+ case NEON::BI__builtin_neon_vst1_v:
+ case NEON::BI__builtin_neon_vst1q_v:
+ case NEON::BI__builtin_neon_vst1_lane_v:
+ case NEON::BI__builtin_neon_vst1q_lane_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
+ Ops.push_back(PtrOp0.getPointer());
+ continue;
+ }
+ }
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
} else {
@@ -7680,7 +8961,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
- const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
+ const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
if (Builtin) {
@@ -7896,7 +9177,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
}
case NEON::BI__builtin_neon_vpaddd_s64: {
- llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
+ auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
@@ -7908,8 +9189,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpaddd_f64: {
- llvm::Type *Ty =
- llvm::VectorType::get(DoubleTy, 2);
+ auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
@@ -7921,8 +9201,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateFAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpadds_f32: {
- llvm::Type *Ty =
- llvm::VectorType::get(FloatTy, 2);
+ auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f32, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
@@ -8085,97 +9364,107 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
+ case NEON::BI__builtin_neon_vset_lane_bf16:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
+ case NEON::BI__builtin_neon_vsetq_lane_bf16:
case NEON::BI__builtin_neon_vsetq_lane_f32:
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vset_lane_f64:
// The vector type needs a cast for the v1f64 variant.
- Ops[1] = Builder.CreateBitCast(Ops[1],
- llvm::VectorType::get(DoubleTy, 1));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vsetq_lane_f64:
// The vector type needs a cast for the v2f64 variant.
- Ops[1] = Builder.CreateBitCast(Ops[1],
- llvm::VectorType::get(DoubleTy, 2));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vdupb_lane_i8:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vdupb_laneq_i8:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vduph_lane_i16:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vduph_laneq_i16:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vdups_lane_i32:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdups_lane_f32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(FloatTy, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdups_lane");
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vdups_laneq_i32:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vdupd_lane_i64:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdupd_lane_f64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(DoubleTy, 1));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdupd_lane");
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vdupd_laneq_i64:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_f32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(FloatTy, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vget_lane_f64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(DoubleTy, 1));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vdups_laneq_f32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(FloatTy, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vgetq_lane_f64:
case NEON::BI__builtin_neon_vdupd_laneq_f64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(DoubleTy, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vaddh_f16:
@@ -8190,18 +9479,20 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vdivh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
- case NEON::BI__builtin_neon_vfmah_f16: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ case NEON::BI__builtin_neon_vfmah_f16:
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
- return Builder.CreateCall(F,
- {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
- }
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
+ {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
case NEON::BI__builtin_neon_vfmsh_f16: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ // FIXME: This should be an fneg instruction:
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
+
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
- return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
+ {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
}
case NEON::BI__builtin_neon_vaddd_s64:
case NEON::BI__builtin_neon_vaddd_u64:
@@ -8214,7 +9505,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
- llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
+ auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
@@ -8311,7 +9602,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(Ops[2]));
- llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
+ auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
@@ -8358,10 +9649,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
}
+ case NEON::BI__builtin_neon_vget_lane_bf16:
+ case NEON::BI__builtin_neon_vduph_lane_bf16:
case NEON::BI__builtin_neon_vduph_lane_f16: {
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
}
+ case NEON::BI__builtin_neon_vgetq_lane_bf16:
+ case NEON::BI__builtin_neon_vduph_laneq_bf16:
case NEON::BI__builtin_neon_vduph_laneq_f16: {
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
@@ -8520,8 +9815,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
// Not all intrinsics handled by the common case work for AArch64 yet, so only
// defer to common code if it's been added to our special map.
- Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
- AArch64SIMDIntrinsicsProvenSorted);
+ Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
+ AArch64SIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
@@ -8559,16 +9854,18 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[2] = Addend;
// Now adjust things to handle the lane access.
- llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
- llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
- VTy;
+ auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
+ ? llvm::FixedVectorType::get(VTy->getElementType(),
+ VTy->getNumElements() / 2)
+ : VTy;
llvm::Constant *cst = cast<Constant>(Ops[3]);
- Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
+ Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
Ops.pop_back();
- Int = Intrinsic::fma;
+ Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
+ : Intrinsic::fma;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
}
case NEON::BI__builtin_neon_vfma_laneq_v: {
@@ -8581,31 +9878,35 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
NeonTypeFlags(NeonTypeFlags::Float64, false, true));
Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
- Function *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
- Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+ Value *Result;
+ Result = emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
+ DoubleTy, {Ops[1], Ops[2], Ops[0]});
return Builder.CreateBitCast(Result, Ty);
}
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
- VTy->getNumElements() * 2);
+ auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
+ VTy->getNumElements() * 2);
Ops[2] = Builder.CreateBitCast(Ops[2], STy);
- Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
+ Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
cast<ConstantInt>(Ops[3]));
Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
- return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+ {Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmaq_laneq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
- return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+ {Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmah_lane_f16:
case NEON::BI__builtin_neon_vfmas_lane_f32:
@@ -8615,9 +9916,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vfmad_laneq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(3)));
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
- return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
+ return emitCallMaybeConstrainedFPBuiltin(
+ *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
+ {Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vmull_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
@@ -8657,8 +9959,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
unsigned ArgElts = VTy->getNumElements();
llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
unsigned BitWidth = EltTy->getBitWidth();
- llvm::Type *ArgTy = llvm::VectorType::get(
- llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
+ auto *ArgTy = llvm::FixedVectorType::get(
+ llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
llvm::Type* Tys[2] = { VTy, ArgTy };
Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
SmallVector<llvm::Value*, 1> TmpOps;
@@ -8726,27 +10028,37 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
case NEON::BI__builtin_neon_vrndah_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::round;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_round
+ : Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
}
case NEON::BI__builtin_neon_vrnda_v:
case NEON::BI__builtin_neon_vrndaq_v: {
- Int = Intrinsic::round;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_round
+ : Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
}
case NEON::BI__builtin_neon_vrndih_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::nearbyint;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_nearbyint
+ : Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
}
case NEON::BI__builtin_neon_vrndmh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::floor;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_floor
+ : Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndm_v:
case NEON::BI__builtin_neon_vrndmq_v: {
- Int = Intrinsic::floor;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_floor
+ : Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndnh_f16: {
@@ -8766,32 +10078,44 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vrndph_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::ceil;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_ceil
+ : Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
}
case NEON::BI__builtin_neon_vrndp_v:
case NEON::BI__builtin_neon_vrndpq_v: {
- Int = Intrinsic::ceil;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_ceil
+ : Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
}
case NEON::BI__builtin_neon_vrndxh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::rint;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_rint
+ : Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrndx_v:
case NEON::BI__builtin_neon_vrndxq_v: {
- Int = Intrinsic::rint;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_rint
+ : Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrndh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::trunc;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_trunc
+ : Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vrnd_v:
case NEON::BI__builtin_neon_vrndq_v: {
- Int = Intrinsic::trunc;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_trunc
+ : Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vcvt_f64_v:
@@ -8942,12 +10266,16 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vsqrth_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::sqrt;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_sqrt
+ : Intrinsic::sqrt;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
}
case NEON::BI__builtin_neon_vsqrt_v:
case NEON::BI__builtin_neon_vsqrtq_v: {
- Int = Intrinsic::sqrt;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_sqrt
+ : Intrinsic::sqrt;
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
}
@@ -8963,7 +10291,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddv_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
@@ -8975,7 +10303,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddv_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
@@ -8987,7 +10315,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddvq_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
@@ -8999,7 +10327,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddvq_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
@@ -9008,7 +10336,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9017,7 +10345,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9026,7 +10354,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9035,7 +10363,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9044,7 +10372,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9053,7 +10381,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9062,7 +10390,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9071,7 +10399,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9080,7 +10408,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxv_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
+ VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9089,7 +10417,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxvq_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
+ VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
@@ -9098,7 +10426,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9107,7 +10435,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9116,7 +10444,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9125,7 +10453,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9134,7 +10462,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9143,7 +10471,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9152,7 +10480,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9161,7 +10489,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9170,7 +10498,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminv_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
+ VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9179,7 +10507,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminvq_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
+ VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
@@ -9188,7 +10516,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxnmv_f16: {
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
+ VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
@@ -9197,7 +10525,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmaxnmvq_f16: {
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
+ VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
@@ -9206,7 +10534,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminnmv_f16: {
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
+ VTy = llvm::FixedVectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
@@ -9215,7 +10543,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminnmvq_f16: {
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
+ VTy = llvm::FixedVectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
@@ -9229,7 +10557,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlv_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9238,7 +10566,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlv_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9246,7 +10574,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlvq_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9255,7 +10583,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlvq_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9263,7 +10591,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlv_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9272,7 +10600,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlv_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9280,7 +10608,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlvq_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
+ VTy = llvm::FixedVectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9289,7 +10617,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddlvq_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
+ VTy = llvm::FixedVectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
@@ -9325,24 +10653,20 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
- auto Alignment = CharUnits::fromQuantity(
- BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16);
- return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment);
+ return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
}
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- auto Alignment = CharUnits::fromQuantity(
- BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16);
- Ops[0] =
- Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
+ Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
+ PtrOp0.getAlignment());
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vld1_dup_v:
@@ -9350,10 +10674,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- auto Alignment = CharUnits::fromQuantity(
- BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16);
- Ops[0] =
- Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
+ Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
+ PtrOp0.getAlignment());
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
return EmitNeonSplat(Ops[0], CI);
@@ -9363,8 +10685,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- return Builder.CreateDefaultAlignedStore(Ops[1],
- Builder.CreateBitCast(Ops[0], Ty));
+ return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
+ PtrOp0.getAlignment());
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
@@ -9538,7 +10860,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(i+vi);
Indices.push_back(i+e+vi);
@@ -9557,7 +10879,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(2*i+vi);
@@ -9575,7 +10897,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
+ SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back((i + vi*e) >> 1);
Indices.push_back(((i + vi*e) >> 1)+e);
@@ -9633,33 +10955,103 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- assert(BuiltinID == BPF::BI__builtin_preserve_field_info &&
- "unexpected ARM builtin");
+ assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
+ BuiltinID == BPF::BI__builtin_btf_type_id) &&
+ "unexpected BPF builtin");
- const Expr *Arg = E->getArg(0);
- bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
+ switch (BuiltinID) {
+ default:
+ llvm_unreachable("Unexpected BPF builtin");
+ case BPF::BI__builtin_preserve_field_info: {
+ const Expr *Arg = E->getArg(0);
+ bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
- if (!getDebugInfo()) {
- CGM.Error(E->getExprLoc(), "using builtin_preserve_field_info() without -g");
- return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
- : EmitLValue(Arg).getPointer(*this);
- }
+ if (!getDebugInfo()) {
+ CGM.Error(E->getExprLoc(),
+ "using __builtin_preserve_field_info() without -g");
+ return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
+ : EmitLValue(Arg).getPointer(*this);
+ }
- // Enable underlying preserve_*_access_index() generation.
- bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
- IsInPreservedAIRegion = true;
- Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
- : EmitLValue(Arg).getPointer(*this);
- IsInPreservedAIRegion = OldIsInPreservedAIRegion;
+ // Enable underlying preserve_*_access_index() generation.
+ bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
+ IsInPreservedAIRegion = true;
+ Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
+ : EmitLValue(Arg).getPointer(*this);
+ IsInPreservedAIRegion = OldIsInPreservedAIRegion;
+
+ ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+ Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
+
+ // Built the IR for the preserve_field_info intrinsic.
+ llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
+ {FieldAddr->getType()});
+ return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
+ }
+ case BPF::BI__builtin_btf_type_id: {
+ Value *FieldVal = nullptr;
+
+ // The LValue cannot be converted Value in order to be used as the function
+ // parameter. If it is a structure, it is the "alloca" result of the LValue
+ // (a pointer) is used in the parameter. If it is a simple type,
+ // the value will be loaded from its corresponding "alloca" and used as
+ // the parameter. In our case, let us just get a pointer of the LValue
+ // since we do not really use the parameter. The purpose of parameter
+ // is to prevent the generated IR llvm.bpf.btf.type.id intrinsic call,
+ // which carries metadata, from being changed.
+ bool IsLValue = E->getArg(0)->isLValue();
+ if (IsLValue)
+ FieldVal = EmitLValue(E->getArg(0)).getPointer(*this);
+ else
+ FieldVal = EmitScalarExpr(E->getArg(0));
- ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
- Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
+ if (!getDebugInfo()) {
+ CGM.Error(E->getExprLoc(), "using __builtin_btf_type_id() without -g");
+ return nullptr;
+ }
- // Built the IR for the preserve_field_info intrinsic.
- llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
- &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
- {FieldAddr->getType()});
- return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
+ // Generate debuginfo type for the first argument.
+ llvm::DIType *DbgInfo =
+ getDebugInfo()->getOrCreateStandaloneType(E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc());
+
+ ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
+ Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
+
+ // Built the IR for the btf_type_id intrinsic.
+ //
+ // In the above, we converted LValue argument to a pointer to LValue.
+ // For example, the following
+ // int v;
+ // C1: __builtin_btf_type_id(v, flag);
+ // will be converted to
+ // L1: llvm.bpf.btf.type.id(&v, flag)
+ // This makes it hard to differentiate from
+ // C2: __builtin_btf_type_id(&v, flag);
+ // to
+ // L2: llvm.bpf.btf.type.id(&v, flag)
+ //
+ // If both C1 and C2 are present in the code, the llvm may later
+ // on do CSE on L1 and L2, which will result in incorrect tagged types.
+ //
+ // The C1->L1 transformation only happens if the argument of
+ // __builtin_btf_type_id() is a LValue. So Let us put whether
+ // the argument is an LValue or not into generated IR. This should
+ // prevent potential CSE from causing debuginfo type loss.
+ //
+ // The generated IR intrinsics will hence look like
+ // L1: llvm.bpf.btf.type.id(&v, 1, flag) !di_type_for_{v};
+ // L2: llvm.bpf.btf.type.id(&v, 0, flag) !di_type_for_{&v};
+ Constant *CV = ConstantInt::get(IntTy, IsLValue);
+ llvm::Function *FnBtfTypeId = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id,
+ {FieldVal->getType(), CV->getType()});
+ CallInst *Fn = Builder.CreateCall(FnBtfTypeId, {FieldVal, CV, FlagValue});
+ Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
+ return Fn;
+ }
+ }
}
llvm::Value *CodeGenFunction::
@@ -9679,8 +11071,8 @@ BuildVector(ArrayRef<llvm::Value*> Ops) {
}
// Otherwise, insertelement the values to build the vector.
- Value *Result =
- llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
+ Value *Result = llvm::UndefValue::get(
+ llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
@@ -9692,14 +11084,15 @@ BuildVector(ArrayRef<llvm::Value*> Ops) {
static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
unsigned NumElts) {
- llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
- cast<IntegerType>(Mask->getType())->getBitWidth());
+ auto *MaskTy = llvm::FixedVectorType::get(
+ CGF.Builder.getInt1Ty(),
+ cast<IntegerType>(Mask->getType())->getBitWidth());
Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
// If we have less than 8 elements, then the starting mask was an i8 and
// we need to extract down to the right number of elements.
if (NumElts < 8) {
- uint32_t Indices[4];
+ int Indices[4];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
@@ -9709,42 +11102,40 @@ static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
return MaskVec;
}
-static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops,
- unsigned Align) {
+static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- Ops[1]->getType()->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(
+ CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
- return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
+ return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
}
-static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops, unsigned Align) {
+static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- Ops[1]->getType()->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(
+ CGF, Ops[2], cast<llvm::VectorType>(Ops[1]->getType())->getNumElements());
- return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
+ return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
}
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
- llvm::Type *ResultTy = Ops[1]->getType();
- llvm::Type *PtrTy = ResultTy->getVectorElementType();
+ auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
+ llvm::Type *PtrTy = ResultTy->getElementType();
// Cast the pointer to element type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(PtrTy));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- ResultTy->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
ResultTy);
@@ -9754,10 +11145,9 @@ static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
ArrayRef<Value *> Ops,
bool IsCompress) {
- llvm::Type *ResultTy = Ops[1]->getType();
+ auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- ResultTy->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
: Intrinsic::x86_avx512_mask_expand;
@@ -9767,15 +11157,14 @@ static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
static Value *EmitX86CompressStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
- llvm::Type *ResultTy = Ops[1]->getType();
- llvm::Type *PtrTy = ResultTy->getVectorElementType();
+ auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
+ llvm::Type *PtrTy = ResultTy->getElementType();
// Cast the pointer to element type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(PtrTy));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- ResultTy->getVectorNumElements());
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
ResultTy);
@@ -9804,7 +11193,7 @@ static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
// Funnel shifts amounts are treated as modulo and types are all power-of-2 so
// we only care about the lowest log2 bits anyway.
if (Amt->getType() != Ty) {
- unsigned NumElts = Ty->getVectorNumElements();
+ unsigned NumElts = cast<llvm::VectorType>(Ty)->getNumElements();
Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
}
@@ -9862,7 +11251,8 @@ static Value *EmitX86Select(CodeGenFunction &CGF,
if (C->isAllOnesValue())
return Op0;
- Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements());
+ Mask = getMaskVecValue(
+ CGF, Mask, cast<llvm::VectorType>(Op0->getType())->getNumElements());
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
}
@@ -9874,9 +11264,8 @@ static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
if (C->isAllOnesValue())
return Op0;
- llvm::VectorType *MaskTy =
- llvm::VectorType::get(CGF.Builder.getInt1Ty(),
- Mask->getType()->getIntegerBitWidth());
+ auto *MaskTy = llvm::FixedVectorType::get(
+ CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
@@ -9891,7 +11280,7 @@ static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
}
if (NumElts < 8) {
- uint32_t Indices[8];
+ int Indices[8];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
for (unsigned i = NumElts; i != 8; ++i)
@@ -9909,15 +11298,16 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
bool Signed, ArrayRef<Value *> Ops) {
assert((Ops.size() == 2 || Ops.size() == 4) &&
"Unexpected number of arguments");
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
Value *Cmp;
if (CC == 3) {
Cmp = Constant::getNullValue(
- llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
+ llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
} else if (CC == 7) {
Cmp = Constant::getAllOnesValue(
- llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
+ llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
} else {
ICmpInst::Predicate Pred;
switch (CC) {
@@ -10033,24 +11423,19 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
// Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
if (IID != Intrinsic::not_intrinsic &&
- cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) {
+ (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
+ IsAddSub)) {
Function *Intr = CGF.CGM.getIntrinsic(IID);
Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
} else {
llvm::Type *Ty = A->getType();
- Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
- Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
-
- if (IsAddSub) {
- // Negate even elts in C using a mask.
- unsigned NumElts = Ty->getVectorNumElements();
- SmallVector<uint32_t, 16> Indices(NumElts);
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i + (i % 2) * NumElts;
-
- Value *NegC = CGF.Builder.CreateFNeg(C);
- Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
- Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
+ Function *FMA;
+ if (CGF.Builder.getIsFPConstrained()) {
+ FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
+ Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
+ } else {
+ FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Res = CGF.Builder.CreateCall(FMA, {A, B, C});
}
}
@@ -10108,6 +11493,10 @@ EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
Intrinsic::x86_avx512_vfmadd_f64;
Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
{Ops[0], Ops[1], Ops[2], Ops[4]});
+ } else if (CGF.Builder.getIsFPConstrained()) {
+ Function *FMA = CGF.CGM.getIntrinsic(
+ Intrinsic::experimental_constrained_fma, Ops[0]->getType());
+ Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
} else {
Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
@@ -10132,8 +11521,8 @@ static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
// Arguments have a vXi32 type so cast to vXi64.
- Ty = llvm::VectorType::get(CGF.Int64Ty,
- Ty->getPrimitiveSizeInBits() / 64);
+ Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
+ Ty->getPrimitiveSizeInBits() / 64);
Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
@@ -10187,7 +11576,7 @@ static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
llvm::Type *DstTy) {
- unsigned NumberOfElements = DstTy->getVectorNumElements();
+ unsigned NumberOfElements = cast<llvm::VectorType>(DstTy)->getNumElements();
Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
}
@@ -10209,6 +11598,43 @@ Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
return EmitX86CpuIs(CPUStr);
}
+// Convert F16 halfs to floats.
+static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
+ ArrayRef<Value *> Ops,
+ llvm::Type *DstTy) {
+ assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
+ "Unknown cvtph2ps intrinsic");
+
+ // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
+ if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
+ Function *F =
+ CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
+ return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
+ }
+
+ unsigned NumDstElts = cast<llvm::VectorType>(DstTy)->getNumElements();
+ Value *Src = Ops[0];
+
+ // Extract the subvector.
+ if (NumDstElts != cast<llvm::VectorType>(Src->getType())->getNumElements()) {
+ assert(NumDstElts == 4 && "Unexpected vector size");
+ Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
+ ArrayRef<int>{0, 1, 2, 3});
+ }
+
+ // Bitcast from vXi16 to vXf16.
+ auto *HalfTy = llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
+ Src = CGF.Builder.CreateBitCast(Src, HalfTy);
+
+ // Perform the fp-extension.
+ Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
+
+ if (Ops.size() >= 3)
+ Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
+ return Res;
+}
+
// Convert a BF16 to a float.
static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
const CallExpr *E,
@@ -10245,11 +11671,11 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
#define X86_VENDOR(ENUM, STRING) \
.Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
-#define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \
- .Cases(STR, ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
-#define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) \
+#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
+ .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
+#define X86_CPU_TYPE(ENUM, STR) \
.Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
-#define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) \
+#define X86_CPU_SUBTYPE(ENUM, STR) \
.Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
#include "llvm/Support/X86TargetParser.def"
.Default({0, 0});
@@ -10279,7 +11705,7 @@ CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
for (const StringRef &FeatureStr : FeatureStrs) {
unsigned Feature =
StringSwitch<unsigned>(FeatureStr)
-#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL)
+#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
#include "llvm/Support/X86TargetParser.def"
;
FeaturesMask |= (1ULL << Feature);
@@ -10404,8 +11830,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// TODO: The builtins could be removed if the SSE header files used vector
// extension comparisons directly (vector ordered/unordered may need
// additional support via __builtin_isnan()).
- auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred) {
- Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
+ auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred,
+ bool IsSignaling) {
+ Value *Cmp;
+ if (IsSignaling)
+ Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
+ else
+ Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
@@ -10484,7 +11915,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_ext_v16hi:
case X86::BI__builtin_ia32_vec_ext_v8si:
case X86::BI__builtin_ia32_vec_ext_v4di: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
@@ -10499,7 +11931,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_set_v16hi:
case X86::BI__builtin_ia32_vec_set_v8si:
case X86::BI__builtin_ia32_vec_set_v4di: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
@@ -10587,12 +12020,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storedquqi512_mask:
case X86::BI__builtin_ia32_storeupd512_mask:
case X86::BI__builtin_ia32_storeups512_mask:
- return EmitX86MaskedStore(*this, Ops, 1);
+ return EmitX86MaskedStore(*this, Ops, Align(1));
case X86::BI__builtin_ia32_storess128_mask:
- case X86::BI__builtin_ia32_storesd128_mask: {
- return EmitX86MaskedStore(*this, Ops, 1);
- }
+ case X86::BI__builtin_ia32_storesd128_mask:
+ return EmitX86MaskedStore(*this, Ops, Align(1));
+
case X86::BI__builtin_ia32_vpopcntb_128:
case X86::BI__builtin_ia32_vpopcntd_128:
case X86::BI__builtin_ia32_vpopcntq_128:
@@ -10678,10 +12111,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vfmaddpd512_mask3:
case X86::BI__builtin_ia32_vfmsubpd512_mask3:
return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
- case X86::BI__builtin_ia32_vfmaddsubps:
- case X86::BI__builtin_ia32_vfmaddsubpd:
- case X86::BI__builtin_ia32_vfmaddsubps256:
- case X86::BI__builtin_ia32_vfmaddsubpd256:
case X86::BI__builtin_ia32_vfmaddsubps512_mask:
case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
@@ -10703,11 +12132,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movdqa32store512_mask:
case X86::BI__builtin_ia32_movdqa64store512_mask:
case X86::BI__builtin_ia32_storeaps512_mask:
- case X86::BI__builtin_ia32_storeapd512_mask: {
- unsigned Align =
- getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
- return EmitX86MaskedStore(*this, Ops, Align);
- }
+ case X86::BI__builtin_ia32_storeapd512_mask:
+ return EmitX86MaskedStore(
+ *this, Ops,
+ getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
+
case X86::BI__builtin_ia32_loadups128_mask:
case X86::BI__builtin_ia32_loadups256_mask:
case X86::BI__builtin_ia32_loadups512_mask:
@@ -10726,11 +12155,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_loaddqudi128_mask:
case X86::BI__builtin_ia32_loaddqudi256_mask:
case X86::BI__builtin_ia32_loaddqudi512_mask:
- return EmitX86MaskedLoad(*this, Ops, 1);
+ return EmitX86MaskedLoad(*this, Ops, Align(1));
case X86::BI__builtin_ia32_loadss128_mask:
case X86::BI__builtin_ia32_loadsd128_mask:
- return EmitX86MaskedLoad(*this, Ops, 1);
+ return EmitX86MaskedLoad(*this, Ops, Align(1));
case X86::BI__builtin_ia32_loadaps128_mask:
case X86::BI__builtin_ia32_loadaps256_mask:
@@ -10743,11 +12172,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movdqa32load512_mask:
case X86::BI__builtin_ia32_movdqa64load128_mask:
case X86::BI__builtin_ia32_movdqa64load256_mask:
- case X86::BI__builtin_ia32_movdqa64load512_mask: {
- unsigned Align =
- getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
- return EmitX86MaskedLoad(*this, Ops, Align);
- }
+ case X86::BI__builtin_ia32_movdqa64load512_mask:
+ return EmitX86MaskedLoad(
+ *this, Ops,
+ getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
case X86::BI__builtin_ia32_expandloaddf128_mask:
case X86::BI__builtin_ia32_expandloaddf256_mask:
@@ -10930,8 +12358,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
- unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(),
- Ops[2]->getType()->getVectorNumElements());
+ unsigned MinElts =
+ std::min(cast<llvm::VectorType>(Ops[0]->getType())->getNumElements(),
+ cast<llvm::VectorType>(Ops[2]->getType())->getNumElements());
Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
@@ -11038,8 +12467,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
- unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(),
- Ops[3]->getType()->getVectorNumElements());
+ unsigned MinElts =
+ std::min(cast<llvm::VectorType>(Ops[2]->getType())->getNumElements(),
+ cast<llvm::VectorType>(Ops[3]->getType())->getNumElements());
Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
@@ -11061,16 +12491,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_extracti64x2_256_mask:
case X86::BI__builtin_ia32_extractf64x2_512_mask:
case X86::BI__builtin_ia32_extracti64x2_512_mask: {
- llvm::Type *DstTy = ConvertType(E->getType());
- unsigned NumElts = DstTy->getVectorNumElements();
- unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements();
+ auto *DstTy = cast<llvm::VectorType>(ConvertType(E->getType()));
+ unsigned NumElts = DstTy->getNumElements();
+ unsigned SrcNumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
unsigned SubVectors = SrcNumElts / NumElts;
unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
Index &= SubVectors - 1; // Remove any extra bits.
Index *= NumElts;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + Index;
@@ -11100,15 +12531,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_inserti64x2_256:
case X86::BI__builtin_ia32_insertf64x2_512:
case X86::BI__builtin_ia32_inserti64x2_512: {
- unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements();
- unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements();
+ unsigned DstNumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ unsigned SrcNumElts =
+ cast<llvm::VectorType>(Ops[1]->getType())->getNumElements();
unsigned SubVectors = DstNumElts / SrcNumElts;
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
Index &= SubVectors - 1; // Remove any extra bits.
Index *= SrcNumElts;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned i = 0; i != DstNumElts; ++i)
Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
@@ -11165,10 +12598,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pblendw256:
case X86::BI__builtin_ia32_pblendd128:
case X86::BI__builtin_ia32_pblendd256: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- uint32_t Indices[16];
+ int Indices[16];
// If there are more than 8 elements, the immediate is used twice so make
// sure we handle that.
for (unsigned i = 0; i != NumElts; ++i)
@@ -11182,13 +12616,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pshuflw256:
case X86::BI__builtin_ia32_pshuflw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[32];
+ int Indices[32];
for (unsigned l = 0; l != NumElts; l += 8) {
for (unsigned i = 0; i != 4; ++i) {
Indices[l + i] = l + (Imm & 3);
@@ -11206,13 +12640,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pshufhw256:
case X86::BI__builtin_ia32_pshufhw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[32];
+ int Indices[32];
for (unsigned l = 0; l != NumElts; l += 8) {
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + i;
@@ -11236,15 +12670,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vpermilpd512:
case X86::BI__builtin_ia32_vpermilps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
Indices[i + l] = (Imm % NumLaneElts) + l;
@@ -11263,15 +12697,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_shufps256:
case X86::BI__builtin_ia32_shufps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
unsigned Index = Imm % NumLaneElts;
@@ -11291,11 +12725,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_permdi512:
case X86::BI__builtin_ia32_permdf512: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
// These intrinsics operate on 256-bit lanes of four 64-bit elements.
- uint32_t Indices[8];
+ int Indices[8];
for (unsigned l = 0; l != NumElts; l += 4)
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
@@ -11309,7 +12743,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_palignr512: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
assert(NumElts % 16 == 0);
// If palignr is shifting the pair of vectors more than the size of two
@@ -11325,7 +12760,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
}
- uint32_t Indices[64];
+ int Indices[64];
// 256-bit palignr operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
@@ -11346,13 +12781,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_alignq128:
case X86::BI__builtin_ia32_alignq256:
case X86::BI__builtin_ia32_alignq512: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
// Mask the shift amount to width of two vectors.
ShiftVal &= (2 * NumElts) - 1;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + ShiftVal;
@@ -11369,12 +12805,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_shuf_i32x4:
case X86::BI__builtin_ia32_shuf_i64x2: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
+ auto *Ty = cast<llvm::VectorType>(Ops[0]->getType());
+ unsigned NumElts = Ty->getNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
unsigned NumLaneElts = NumElts / NumLanes;
- uint32_t Indices[16];
+ int Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
unsigned Index = (Imm % NumLanes) * NumLaneElts;
Imm /= NumLanes; // Discard the bits we just used.
@@ -11395,7 +12831,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vperm2f128_si256:
case X86::BI__builtin_ia32_permti256: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
// This takes a very simple approach since there are two lanes and a
// shuffle can have 2 inputs. So we reserve the first input for the first
@@ -11403,7 +12840,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// duplicate sources, but this can be dealt with in the backend.
Value *OutOps[2];
- uint32_t Indices[8];
+ int Indices[8];
for (unsigned l = 0; l != 2; ++l) {
// Determine the source for this lane.
if (Imm & (1 << ((l * 4) + 3)))
@@ -11433,15 +12870,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- llvm::Type *ResultType = Ops[0]->getType();
+ auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
// Builtin type is vXi64 so multiply by 8 to get bytes.
- unsigned NumElts = ResultType->getVectorNumElements() * 8;
+ unsigned NumElts = ResultType->getNumElements() * 8;
// If pslldq is shifting the vector more than 15 bytes, emit zero.
if (ShiftVal >= 16)
return llvm::Constant::getNullValue(ResultType);
- uint32_t Indices[64];
+ int Indices[64];
// 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
@@ -11451,7 +12888,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
- llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
+ auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = Builder.CreateShuffleVector(Zero, Cast,
@@ -11463,15 +12900,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrldqi256_byteshift:
case X86::BI__builtin_ia32_psrldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- llvm::Type *ResultType = Ops[0]->getType();
+ auto *ResultType = cast<llvm::VectorType>(Ops[0]->getType());
// Builtin type is vXi64 so multiply by 8 to get bytes.
- unsigned NumElts = ResultType->getVectorNumElements() * 8;
+ unsigned NumElts = ResultType->getNumElements() * 8;
// If psrldq is shifting the vector more than 15 bytes, emit zero.
if (ShiftVal >= 16)
return llvm::Constant::getNullValue(ResultType);
- uint32_t Indices[64];
+ int Indices[64];
// 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
@@ -11481,7 +12918,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
- llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
+ auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = Builder.CreateShuffleVector(Cast, Zero,
@@ -11501,7 +12938,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *In = getMaskVecValue(*this, Ops[0], NumElts);
- uint32_t Indices[64];
+ int Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = NumElts + i - ShiftVal;
@@ -11523,7 +12960,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *In = getMaskVecValue(*this, Ops[0], NumElts);
- uint32_t Indices[64];
+ int Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + ShiftVal;
@@ -11555,7 +12992,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Unaligned nontemporal store of the scalar value.
StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
- SI->setAlignment(llvm::Align::None());
+ SI->setAlignment(llvm::Align(1));
return SI;
}
// Rotate is a special case of funnel shift - 1st 2 args are the same.
@@ -11803,7 +13240,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
- uint32_t Indices[64];
+ int Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
@@ -11832,8 +13269,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_sqrtss:
case X86::BI__builtin_ia32_sqrtsd: {
Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
- A = Builder.CreateCall(F, {A});
+ Function *F;
+ if (Builder.getIsFPConstrained()) {
+ F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ A->getType());
+ A = Builder.CreateConstrainedFPCall(F, {A});
+ } else {
+ F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
+ A = Builder.CreateCall(F, {A});
+ }
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
}
case X86::BI__builtin_ia32_sqrtsd_round_mask:
@@ -11848,8 +13292,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
- A = Builder.CreateCall(F, A);
+ Function *F;
+ if (Builder.getIsFPConstrained()) {
+ F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ A->getType());
+ A = Builder.CreateConstrainedFPCall(F, A);
+ } else {
+ F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
+ A = Builder.CreateCall(F, A);
+ }
Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
@@ -11871,8 +13322,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
}
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
- return Builder.CreateCall(F, Ops[0]);
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ Ops[0]->getType());
+ return Builder.CreateConstrainedFPCall(F, Ops[0]);
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
+ return Builder.CreateCall(F, Ops[0]);
+ }
}
case X86::BI__builtin_ia32_pabsb128:
case X86::BI__builtin_ia32_pabsw128:
@@ -12089,7 +13546,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_fpclasspd128_mask:
case X86::BI__builtin_ia32_fpclasspd256_mask:
case X86::BI__builtin_ia32_fpclasspd512_mask: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
Value *MaskIn = Ops[2];
Ops.erase(&Ops[2]);
@@ -12126,7 +13584,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vp2intersect_d_512:
case X86::BI__builtin_ia32_vp2intersect_d_256:
case X86::BI__builtin_ia32_vp2intersect_d_128: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
Intrinsic::ID ID;
switch (BuiltinID) {
@@ -12184,7 +13643,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
Value *MaskIn = Ops[2];
Ops.erase(&Ops[2]);
@@ -12209,28 +13669,28 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// packed comparison intrinsics
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpeqpd:
- return getVectorFCmpIR(CmpInst::FCMP_OEQ);
+ return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpltps:
case X86::BI__builtin_ia32_cmpltpd:
- return getVectorFCmpIR(CmpInst::FCMP_OLT);
+ return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpleps:
case X86::BI__builtin_ia32_cmplepd:
- return getVectorFCmpIR(CmpInst::FCMP_OLE);
+ return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpunordps:
case X86::BI__builtin_ia32_cmpunordpd:
- return getVectorFCmpIR(CmpInst::FCMP_UNO);
+ return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpneqps:
case X86::BI__builtin_ia32_cmpneqpd:
- return getVectorFCmpIR(CmpInst::FCMP_UNE);
+ return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpnltps:
case X86::BI__builtin_ia32_cmpnltpd:
- return getVectorFCmpIR(CmpInst::FCMP_UGE);
+ return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpnleps:
case X86::BI__builtin_ia32_cmpnlepd:
- return getVectorFCmpIR(CmpInst::FCMP_UGT);
+ return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
case X86::BI__builtin_ia32_cmpordps:
case X86::BI__builtin_ia32_cmpordpd:
- return getVectorFCmpIR(CmpInst::FCMP_ORD);
+ return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
case X86::BI__builtin_ia32_cmpps:
case X86::BI__builtin_ia32_cmpps256:
case X86::BI__builtin_ia32_cmppd:
@@ -12255,42 +13715,90 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Ignoring requested signaling behaviour,
// e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
FCmpInst::Predicate Pred;
- switch (CC) {
- case 0x00: Pred = FCmpInst::FCMP_OEQ; break;
- case 0x01: Pred = FCmpInst::FCMP_OLT; break;
- case 0x02: Pred = FCmpInst::FCMP_OLE; break;
- case 0x03: Pred = FCmpInst::FCMP_UNO; break;
- case 0x04: Pred = FCmpInst::FCMP_UNE; break;
- case 0x05: Pred = FCmpInst::FCMP_UGE; break;
- case 0x06: Pred = FCmpInst::FCMP_UGT; break;
- case 0x07: Pred = FCmpInst::FCMP_ORD; break;
- case 0x08: Pred = FCmpInst::FCMP_UEQ; break;
- case 0x09: Pred = FCmpInst::FCMP_ULT; break;
- case 0x0a: Pred = FCmpInst::FCMP_ULE; break;
- case 0x0b: Pred = FCmpInst::FCMP_FALSE; break;
- case 0x0c: Pred = FCmpInst::FCMP_ONE; break;
- case 0x0d: Pred = FCmpInst::FCMP_OGE; break;
- case 0x0e: Pred = FCmpInst::FCMP_OGT; break;
- case 0x0f: Pred = FCmpInst::FCMP_TRUE; break;
- case 0x10: Pred = FCmpInst::FCMP_OEQ; break;
- case 0x11: Pred = FCmpInst::FCMP_OLT; break;
- case 0x12: Pred = FCmpInst::FCMP_OLE; break;
- case 0x13: Pred = FCmpInst::FCMP_UNO; break;
- case 0x14: Pred = FCmpInst::FCMP_UNE; break;
- case 0x15: Pred = FCmpInst::FCMP_UGE; break;
- case 0x16: Pred = FCmpInst::FCMP_UGT; break;
- case 0x17: Pred = FCmpInst::FCMP_ORD; break;
- case 0x18: Pred = FCmpInst::FCMP_UEQ; break;
- case 0x19: Pred = FCmpInst::FCMP_ULT; break;
- case 0x1a: Pred = FCmpInst::FCMP_ULE; break;
- case 0x1b: Pred = FCmpInst::FCMP_FALSE; break;
- case 0x1c: Pred = FCmpInst::FCMP_ONE; break;
- case 0x1d: Pred = FCmpInst::FCMP_OGE; break;
- case 0x1e: Pred = FCmpInst::FCMP_OGT; break;
- case 0x1f: Pred = FCmpInst::FCMP_TRUE; break;
+ bool IsSignaling;
+ // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
+ // behavior is inverted. We'll handle that after the switch.
+ switch (CC & 0xf) {
+ case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
+ case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
+ case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
+ case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
+ case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
+ case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
+ case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
+ case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
+ case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
+ case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
+ case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
+ case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
+ case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
+ case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
+ case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
+ case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
default: llvm_unreachable("Unhandled CC");
}
+ // Invert the signalling behavior for 16-31.
+ if (CC & 0x10)
+ IsSignaling = !IsSignaling;
+
+ // If the predicate is true or false and we're using constrained intrinsics,
+ // we don't have a compare intrinsic we can use. Just use the legacy X86
+ // specific intrinsic.
+ if ((Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE) &&
+ Builder.getIsFPConstrained()) {
+
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unexpected builtin");
+ case X86::BI__builtin_ia32_cmpps:
+ IID = Intrinsic::x86_sse_cmp_ps;
+ break;
+ case X86::BI__builtin_ia32_cmpps256:
+ IID = Intrinsic::x86_avx_cmp_ps_256;
+ break;
+ case X86::BI__builtin_ia32_cmppd:
+ IID = Intrinsic::x86_sse2_cmp_pd;
+ break;
+ case X86::BI__builtin_ia32_cmppd256:
+ IID = Intrinsic::x86_avx_cmp_pd_256;
+ break;
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ IID = Intrinsic::x86_avx512_cmp_ps_512;
+ break;
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ IID = Intrinsic::x86_avx512_cmp_pd_512;
+ break;
+ case X86::BI__builtin_ia32_cmpps128_mask:
+ IID = Intrinsic::x86_avx512_cmp_ps_128;
+ break;
+ case X86::BI__builtin_ia32_cmpps256_mask:
+ IID = Intrinsic::x86_avx512_cmp_ps_256;
+ break;
+ case X86::BI__builtin_ia32_cmppd128_mask:
+ IID = Intrinsic::x86_avx512_cmp_pd_128;
+ break;
+ case X86::BI__builtin_ia32_cmppd256_mask:
+ IID = Intrinsic::x86_avx512_cmp_pd_256;
+ break;
+ }
+
+ Function *Intr = CGM.getIntrinsic(IID);
+ if (cast<llvm::VectorType>(Intr->getReturnType())
+ ->getElementType()
+ ->isIntegerTy(1)) {
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ Value *MaskIn = Ops[3];
+ Ops.erase(&Ops[3]);
+
+ Value *Cmp = Builder.CreateCall(Intr, Ops);
+ return EmitX86MaskedCompareResult(*this, Cmp, NumElts, MaskIn);
+ }
+
+ return Builder.CreateCall(Intr, Ops);
+ }
+
// Builtins without the _mask suffix return a vector of integers
// of the same width as the input vectors
switch (BuiltinID) {
@@ -12300,12 +13808,18 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmpps256_mask:
case X86::BI__builtin_ia32_cmppd128_mask:
case X86::BI__builtin_ia32_cmppd256_mask: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
+ // FIXME: Support SAE.
+ unsigned NumElts =
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
+ Value *Cmp;
+ if (IsSignaling)
+ Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
+ else
+ Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
}
default:
- return getVectorFCmpIR(Pred);
+ return getVectorFCmpIR(Pred, IsSignaling);
}
}
@@ -12343,10 +13857,19 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmpordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
+ // f16c half2float intrinsics
+ case X86::BI__builtin_ia32_vcvtph2ps:
+ case X86::BI__builtin_ia32_vcvtph2ps256:
+ case X86::BI__builtin_ia32_vcvtph2ps_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps256_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
+
// AVX512 bf16 intrinsics
case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
- Ops[2] = getMaskVecValue(*this, Ops[2],
- Ops[0]->getType()->getVectorNumElements());
+ Ops[2] = getMaskVecValue(
+ *this, Ops[2],
+ cast<llvm::VectorType>(Ops[0]->getType())->getNumElements());
Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
@@ -12506,7 +14029,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__stosb: {
// We treat __stosb as a volatile memset - it may not generate "rep stosb"
// instruction, but it will create a memset that won't be optimized away.
- return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align::None(), true);
+ return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
}
case X86::BI__ud2:
// llvm.trap makes a ud2a instruction on x86.
@@ -12731,9 +14254,14 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_xvsqrtdp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
- ID = Intrinsic::sqrt;
- llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, X);
+ if (Builder.getIsFPConstrained()) {
+ llvm::Function *F = CGM.getIntrinsic(
+ Intrinsic::experimental_constrained_sqrt, ResultType);
+ return Builder.CreateConstrainedFPCall(F, X);
+ } else {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
+ return Builder.CreateCall(F, X);
+ }
}
// Count leading zeros
case PPC::BI__builtin_altivec_vclzb:
@@ -12790,21 +14318,32 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
BuiltinID == PPC::BI__builtin_vsx_xvrspim)
- ID = Intrinsic::floor;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_floor
+ : Intrinsic::floor;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
BuiltinID == PPC::BI__builtin_vsx_xvrspi)
- ID = Intrinsic::round;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_round
+ : Intrinsic::round;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
BuiltinID == PPC::BI__builtin_vsx_xvrspic)
- ID = Intrinsic::nearbyint;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_nearbyint
+ : Intrinsic::nearbyint;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
BuiltinID == PPC::BI__builtin_vsx_xvrspip)
- ID = Intrinsic::ceil;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_ceil
+ : Intrinsic::ceil;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
- ID = Intrinsic::trunc;
+ ID = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_trunc
+ : Intrinsic::trunc;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, X);
+ return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
+ : Builder.CreateCall(F, X);
}
// Absolute value
@@ -12829,25 +14368,43 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ llvm::Function *F;
+ if (Builder.getIsFPConstrained())
+ F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ else
+ F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
switch (BuiltinID) {
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
- return Builder.CreateCall(F, {X, Y, Z});
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
+ else
+ return Builder.CreateCall(F, {X, Y, Z});
case PPC::BI__builtin_vsx_xvnmaddadp:
case PPC::BI__builtin_vsx_xvnmaddasp:
- return Builder.CreateFSub(Zero,
- Builder.CreateCall(F, {X, Y, Z}), "sub");
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateFNeg(
+ Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
+ else
+ return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
case PPC::BI__builtin_vsx_xvmsubadp:
case PPC::BI__builtin_vsx_xvmsubasp:
- return Builder.CreateCall(F,
- {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateConstrainedFPCall(
+ F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ else
+ return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp:
- Value *FsubRes =
- Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
- return Builder.CreateFSub(Zero, FsubRes, "sub");
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateFNeg(
+ Builder.CreateConstrainedFPCall(
+ F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
+ "neg");
+ else
+ return Builder.CreateFNeg(
+ Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
+ "neg");
}
llvm_unreachable("Unknown FMA operation");
return nullptr; // Suppress no-return warning
@@ -12873,25 +14430,22 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// Need to cast the second argument from a vector of unsigned int to a
// vector of long long.
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
if (getTarget().isLittleEndian()) {
- // Create a shuffle mask of (1, 0)
- Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
- ConstantInt::get(Int32Ty, 0)
- };
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
-
// Reverse the double words in the vector we will extract from.
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
- Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ShuffleMask);
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
+ Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
// Reverse the index.
Index = MaxIndex - Index;
}
// Intrinsic expects the first arg to be a vector of int.
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
return Builder.CreateCall(F, Ops);
}
@@ -12900,7 +14454,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
// Intrinsic expects the first argument to be a vector of doublewords.
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
// The second argument is a compile time constant int that needs to
// be clamped to the range [0, 12].
@@ -12918,13 +14473,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// Emit the call, then reverse the double words of the results vector.
Value *Call = Builder.CreateCall(F, Ops);
- // Create a shuffle mask of (1, 0)
- Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
- ConstantInt::get(Int32Ty, 0)
- };
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
-
- Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, ShuffleMask);
+ Value *ShuffleCall =
+ Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
return ShuffleCall;
} else {
Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
@@ -12937,21 +14487,20 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
assert(ArgCI && "Third arg must be constant integer!");
unsigned Index = ArgCI->getZExtValue();
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
// Account for endianness by treating this as just a shuffle. So we use the
// same indices for both LE and BE in order to produce expected results in
// both cases.
- unsigned ElemIdx0 = (Index & 2) >> 1;
- unsigned ElemIdx1 = 2 + (Index & 1);
-
- Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0),
- ConstantInt::get(Int32Ty, ElemIdx1)};
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
+ int ElemIdx0 = (Index & 2) >> 1;
+ int ElemIdx1 = 2 + (Index & 1);
+ int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
+ Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
@@ -12961,14 +14510,16 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
assert(ArgCI && "Third argument must be a compile time constant");
unsigned Index = ArgCI->getZExtValue() & 0x3;
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int32Ty, 4));
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
// Create a shuffle mask
- unsigned ElemIdx0;
- unsigned ElemIdx1;
- unsigned ElemIdx2;
- unsigned ElemIdx3;
+ int ElemIdx0;
+ int ElemIdx1;
+ int ElemIdx2;
+ int ElemIdx3;
if (getTarget().isLittleEndian()) {
// Little endian element N comes from element 8+N-Index of the
// concatenated wide vector (of course, using modulo arithmetic on
@@ -12985,14 +14536,9 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
ElemIdx3 = Index + 3;
}
- Constant *ShuffleElts[4] = {ConstantInt::get(Int32Ty, ElemIdx0),
- ConstantInt::get(Int32Ty, ElemIdx1),
- ConstantInt::get(Int32Ty, ElemIdx2),
- ConstantInt::get(Int32Ty, ElemIdx3)};
-
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
+ int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
+ Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
@@ -13001,7 +14547,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_pack_vector_int128: {
bool isLittleEndian = getTarget().isLittleEndian();
Value *UndefValue =
- llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), 2));
+ llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
Value *Res = Builder.CreateInsertElement(
UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
Res = Builder.CreateInsertElement(Res, Ops[1],
@@ -13012,7 +14558,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_unpack_vector_int128: {
ConstantInt *Index = cast<ConstantInt>(Ops[1]);
Value *Unpacked = Builder.CreateBitCast(
- Ops[0], llvm::VectorType::get(ConvertType(E->getType()), 2));
+ Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
if (getTarget().isLittleEndian())
Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
@@ -13022,8 +14568,91 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
}
+namespace {
+// If \p E is not null pointer, insert address space cast to match return
+// type of \p E if necessary.
+Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
+ const CallExpr *E = nullptr) {
+ auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
+ auto *Call = CGF.Builder.CreateCall(F);
+ Call->addAttribute(
+ AttributeList::ReturnIndex,
+ Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
+ Call->addAttribute(AttributeList::ReturnIndex,
+ Attribute::getWithAlignment(Call->getContext(), Align(4)));
+ if (!E)
+ return Call;
+ QualType BuiltinRetType = E->getType();
+ auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
+ if (RetTy == Call->getType())
+ return Call;
+ return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
+}
+
+// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
+Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
+ const unsigned XOffset = 4;
+ auto *DP = EmitAMDGPUDispatchPtr(CGF);
+ // Indexing the HSA kernel_dispatch_packet struct.
+ auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
+ auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
+ auto *DstTy =
+ CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
+ auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
+ auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
+ llvm::MDBuilder MDHelper(CGF.getLLVMContext());
+ llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
+ APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
+ LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
+ LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(CGF.getLLVMContext(), None));
+ return LD;
+}
+} // namespace
+
+// For processing memory ordering and memory scope arguments of various
+// amdgcn builtins.
+// \p Order takes a C++11 comptabile memory-ordering specifier and converts
+// it into LLVM's memory ordering specifier using atomic C ABI, and writes
+// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
+// specific SyncScopeID and writes it to \p SSID.
+bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
+ llvm::AtomicOrdering &AO,
+ llvm::SyncScope::ID &SSID) {
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+
+ // Map C11/C++11 memory ordering to LLVM memory ordering
+ switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
+ case llvm::AtomicOrderingCABI::acquire:
+ AO = llvm::AtomicOrdering::Acquire;
+ break;
+ case llvm::AtomicOrderingCABI::release:
+ AO = llvm::AtomicOrdering::Release;
+ break;
+ case llvm::AtomicOrderingCABI::acq_rel:
+ AO = llvm::AtomicOrdering::AcquireRelease;
+ break;
+ case llvm::AtomicOrderingCABI::seq_cst:
+ AO = llvm::AtomicOrdering::SequentiallyConsistent;
+ break;
+ case llvm::AtomicOrderingCABI::consume:
+ case llvm::AtomicOrderingCABI::relaxed:
+ break;
+ }
+
+ StringRef scp;
+ llvm::getConstantStringInfo(Scope, scp);
+ SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
+ return true;
+ }
+ return false;
+}
+
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
+ llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
+ llvm::SyncScope::ID SSID;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_div_scale:
case AMDGPU::BI__builtin_amdgcn_div_scalef: {
@@ -13091,6 +14720,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_rcpf:
case AMDGPU::BI__builtin_amdgcn_rcph:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
+ case AMDGPU::BI__builtin_amdgcn_sqrt:
+ case AMDGPU::BI__builtin_amdgcn_sqrtf:
+ case AMDGPU::BI__builtin_amdgcn_sqrth:
+ return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
case AMDGPU::BI__builtin_amdgcn_rsq:
case AMDGPU::BI__builtin_amdgcn_rsqf:
case AMDGPU::BI__builtin_amdgcn_rsqh:
@@ -13104,6 +14737,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_cosf:
case AMDGPU::BI__builtin_amdgcn_cosh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
+ case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
+ return EmitAMDGPUDispatchPtr(*this, E);
case AMDGPU::BI__builtin_amdgcn_log_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
case AMDGPU::BI__builtin_amdgcn_ldexp:
@@ -13146,7 +14781,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
// FIXME-GFX10: How should 32 bit mask be handled?
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
{ Builder.getInt64Ty(), Src0->getType() });
return Builder.CreateCall(F, { Src0, Src1, Src2 });
}
@@ -13157,7 +14792,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
// FIXME-GFX10: How should 32 bit mask be handled?
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
{ Builder.getInt64Ty(), Src0->getType() });
return Builder.CreateCall(F, { Src0, Src1, Src2 });
}
@@ -13178,7 +14813,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
}
case AMDGPU::BI__builtin_amdgcn_read_exec: {
CallInst *CI = cast<CallInst>(
- EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec"));
+ EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
CI->setConvergent();
return CI;
}
@@ -13187,7 +14822,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
"exec_lo" : "exec_hi";
CallInst *CI = cast<CallInst>(
- EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, true, RegName));
+ EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
CI->setConvergent();
return CI;
}
@@ -13199,6 +14834,14 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
+ // amdgcn workgroup size
+ case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
+ return EmitAMDGPUWorkGroupSize(*this, 0);
+ case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
+ return EmitAMDGPUWorkGroupSize(*this, 1);
+ case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
+ return EmitAMDGPUWorkGroupSize(*this, 2);
+
// r600 intrinsics
case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
@@ -13209,6 +14852,61 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
case AMDGPU::BI__builtin_r600_read_tidig_z:
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
+ case AMDGPU::BI__builtin_amdgcn_alignbit: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
+ Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
+ return Builder.CreateCall(F, { Src0, Src1, Src2 });
+ }
+
+ case AMDGPU::BI__builtin_amdgcn_fence: {
+ if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)), AO, SSID))
+ return Builder.CreateFence(AO, SSID);
+ LLVM_FALLTHROUGH;
+ }
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
+ unsigned BuiltinAtomicOp;
+ llvm::Type *ResultType = ConvertType(E->getType());
+
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
+ BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
+ BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
+ break;
+ }
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Val = EmitScalarExpr(E->getArg(1));
+
+ llvm::Function *F =
+ CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
+
+ if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)), AO, SSID)) {
+
+ // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
+ // scope as unsigned values
+ Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
+ Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
+
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+ Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
+
+ return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
+ }
+ LLVM_FALLTHROUGH;
+ }
default:
return nullptr;
}
@@ -13306,8 +15004,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
case SystemZ::BI__builtin_s390_vfsqdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
- return Builder.CreateCall(F, X);
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
+ return Builder.CreateConstrainedFPCall(F, { X });
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
+ return Builder.CreateCall(F, X);
+ }
}
case SystemZ::BI__builtin_s390_vfmasb:
case SystemZ::BI__builtin_s390_vfmadb: {
@@ -13315,8 +15018,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- return Builder.CreateCall(F, {X, Y, Z});
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ return Builder.CreateCall(F, {X, Y, Z});
+ }
}
case SystemZ::BI__builtin_s390_vfmssb:
case SystemZ::BI__builtin_s390_vfmsdb: {
@@ -13324,8 +15032,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ }
}
case SystemZ::BI__builtin_s390_vfnmasb:
case SystemZ::BI__builtin_s390_vfnmadb: {
@@ -13333,8 +15046,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
+ }
}
case SystemZ::BI__builtin_s390_vfnmssb:
case SystemZ::BI__builtin_s390_vfnmsdb: {
@@ -13342,9 +15060,15 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- Value *NegZ = Builder.CreateFNeg(Z, "neg");
- return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ Value *NegZ = Builder.CreateFNeg(Z, "sub");
+ return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
+ Value *NegZ = Builder.CreateFNeg(Z, "neg");
+ return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
+ }
}
case SystemZ::BI__builtin_s390_vflpsb:
case SystemZ::BI__builtin_s390_vflpdb: {
@@ -13373,30 +15097,42 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some combinations of M4 and M5.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ Intrinsic::ID CI;
switch (M4.getZExtValue()) {
default: break;
case 0: // IEEE-inexact exception allowed
switch (M5.getZExtValue()) {
default: break;
- case 0: ID = Intrinsic::rint; break;
+ case 0: ID = Intrinsic::rint;
+ CI = Intrinsic::experimental_constrained_rint; break;
}
break;
case 4: // IEEE-inexact exception suppressed
switch (M5.getZExtValue()) {
default: break;
- case 0: ID = Intrinsic::nearbyint; break;
- case 1: ID = Intrinsic::round; break;
- case 5: ID = Intrinsic::trunc; break;
- case 6: ID = Intrinsic::ceil; break;
- case 7: ID = Intrinsic::floor; break;
+ case 0: ID = Intrinsic::nearbyint;
+ CI = Intrinsic::experimental_constrained_nearbyint; break;
+ case 1: ID = Intrinsic::round;
+ CI = Intrinsic::experimental_constrained_round; break;
+ case 5: ID = Intrinsic::trunc;
+ CI = Intrinsic::experimental_constrained_trunc; break;
+ case 6: ID = Intrinsic::ceil;
+ CI = Intrinsic::experimental_constrained_ceil; break;
+ case 7: ID = Intrinsic::floor;
+ CI = Intrinsic::experimental_constrained_floor; break;
}
break;
}
if (ID != Intrinsic::not_intrinsic) {
- Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, X);
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(CI, ResultType);
+ return Builder.CreateConstrainedFPCall(F, X);
+ } else {
+ Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, X);
+ }
}
- switch (BuiltinID) {
+ switch (BuiltinID) { // FIXME: constrained version?
case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
default: llvm_unreachable("Unknown BuiltinID");
@@ -13419,13 +15155,20 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ Intrinsic::ID CI;
switch (M4.getZExtValue()) {
default: break;
- case 4: ID = Intrinsic::maxnum; break;
+ case 4: ID = Intrinsic::maxnum;
+ CI = Intrinsic::experimental_constrained_maxnum; break;
}
if (ID != Intrinsic::not_intrinsic) {
- Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, {X, Y});
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(CI, ResultType);
+ return Builder.CreateConstrainedFPCall(F, {X, Y});
+ } else {
+ Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, {X, Y});
+ }
}
switch (BuiltinID) {
case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
@@ -13449,13 +15192,20 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ Intrinsic::ID CI;
switch (M4.getZExtValue()) {
default: break;
- case 4: ID = Intrinsic::minnum; break;
+ case 4: ID = Intrinsic::minnum;
+ CI = Intrinsic::experimental_constrained_minnum; break;
}
if (ID != Intrinsic::not_intrinsic) {
- Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, {X, Y});
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(CI, ResultType);
+ return Builder.CreateConstrainedFPCall(F, {X, Y});
+ } else {
+ Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, {X, Y});
+ }
}
switch (BuiltinID) {
case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
@@ -13815,7 +15565,7 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
auto MakeLdg = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
clang::CharUnits Align =
- getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
+ CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
return Builder.CreateCall(
CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
Ptr->getType()}),
@@ -14344,7 +16094,7 @@ RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
Result = Builder.CreatePointerCast(Result, Args.SrcType);
// Emit an alignment assumption to ensure that the new alignment is
// propagated to loads/stores, etc.
- EmitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
+ emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
}
assert(Result->getType() == Args.SrcType);
return RValue::get(Result);
@@ -14368,30 +16118,6 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
return Builder.CreateCall(Callee, Args);
}
- case WebAssembly::BI__builtin_wasm_memory_init: {
- llvm::APSInt SegConst;
- if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- llvm::APSInt MemConst;
- if (!E->getArg(1)->isIntegerConstantExpr(MemConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- if (!MemConst.isNullValue())
- ErrorUnsupported(E, "non-zero memory index");
- Value *Args[] = {llvm::ConstantInt::get(getLLVMContext(), SegConst),
- llvm::ConstantInt::get(getLLVMContext(), MemConst),
- EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)),
- EmitScalarExpr(E->getArg(4))};
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_init);
- return Builder.CreateCall(Callee, Args);
- }
- case WebAssembly::BI__builtin_wasm_data_drop: {
- llvm::APSInt SegConst;
- if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- Value *Arg = llvm::ConstantInt::get(getLLVMContext(), SegConst);
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_data_drop);
- return Builder.CreateCall(Callee, {Arg});
- }
case WebAssembly::BI__builtin_wasm_tls_size: {
llvm::Type *ResultType = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
@@ -14460,8 +16186,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: {
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
@@ -14472,8 +16197,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: {
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
@@ -14500,6 +16224,55 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
+ case WebAssembly::BI__builtin_wasm_pmin_f32x4:
+ case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_pmax_f32x4:
+ case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_ceil_f32x4:
+ case WebAssembly::BI__builtin_wasm_floor_f32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_f32x4:
+ case WebAssembly::BI__builtin_wasm_nearest_f32x4:
+ case WebAssembly::BI__builtin_wasm_ceil_f64x2:
+ case WebAssembly::BI__builtin_wasm_floor_f64x2:
+ case WebAssembly::BI__builtin_wasm_trunc_f64x2:
+ case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_ceil_f32x4:
+ case WebAssembly::BI__builtin_wasm_ceil_f64x2:
+ IntNo = Intrinsic::wasm_ceil;
+ break;
+ case WebAssembly::BI__builtin_wasm_floor_f32x4:
+ case WebAssembly::BI__builtin_wasm_floor_f64x2:
+ IntNo = Intrinsic::wasm_floor;
+ break;
+ case WebAssembly::BI__builtin_wasm_trunc_f32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_f64x2:
+ IntNo = Intrinsic::wasm_trunc;
+ break;
+ case WebAssembly::BI__builtin_wasm_nearest_f32x4:
+ case WebAssembly::BI__builtin_wasm_nearest_f64x2:
+ IntNo = Intrinsic::wasm_nearest;
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ Value *Value = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, Value);
+ }
case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
Value *Src = EmitScalarExpr(E->getArg(0));
Value *Indices = EmitScalarExpr(E->getArg(1));
@@ -14551,7 +16324,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
- llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType();
+ llvm::Type *ElemType =
+ cast<llvm::VectorType>(ConvertType(E->getType()))->getElementType();
Value *Trunc = Builder.CreateTrunc(Val, ElemType);
return Builder.CreateInsertElement(Vec, Trunc, Lane);
}
@@ -14598,6 +16372,56 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
+ case WebAssembly::BI__builtin_wasm_abs_i8x16:
+ case WebAssembly::BI__builtin_wasm_abs_i16x8:
+ case WebAssembly::BI__builtin_wasm_abs_i32x4: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Neg = Builder.CreateNeg(Vec, "neg");
+ Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
+ Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
+ return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
+ }
+ case WebAssembly::BI__builtin_wasm_min_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_min_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_max_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_max_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_min_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_min_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_max_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_max_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_min_s_i32x4:
+ case WebAssembly::BI__builtin_wasm_min_u_i32x4:
+ case WebAssembly::BI__builtin_wasm_max_s_i32x4:
+ case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *ICmp;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_min_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_min_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_min_s_i32x4:
+ ICmp = Builder.CreateICmpSLT(LHS, RHS);
+ break;
+ case WebAssembly::BI__builtin_wasm_min_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_min_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_min_u_i32x4:
+ ICmp = Builder.CreateICmpULT(LHS, RHS);
+ break;
+ case WebAssembly::BI__builtin_wasm_max_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_max_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_max_s_i32x4:
+ ICmp = Builder.CreateICmpSGT(LHS, RHS);
+ break;
+ case WebAssembly::BI__builtin_wasm_max_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_max_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_max_u_i32x4:
+ ICmp = Builder.CreateICmpUGT(LHS, RHS);
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ return Builder.CreateSelect(ICmp, LHS, RHS);
+ }
case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
Value *LHS = EmitScalarExpr(E->getArg(0));
@@ -14649,6 +16473,14 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
+ case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
+ case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
+ case WebAssembly::BI__builtin_wasm_bitmask_i32x4: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
+ return Builder.CreateCall(Callee, {Vec});
+ }
case WebAssembly::BI__builtin_wasm_abs_f32x4:
case WebAssembly::BI__builtin_wasm_abs_f64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
@@ -14741,68 +16573,124 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Vec->getType()});
return Builder.CreateCall(Callee, Vec);
}
+ case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
+ Value *Ops[18];
+ size_t OpIdx = 0;
+ Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
+ Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
+ while (OpIdx < 18) {
+ llvm::APSInt LaneConst;
+ if (!E->getArg(OpIdx)->isIntegerConstantExpr(LaneConst, getContext()))
+ llvm_unreachable("Constant arg isn't actually constant?");
+ Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
+ }
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
+ return Builder.CreateCall(Callee, Ops);
+ }
default:
return nullptr;
}
}
+static std::pair<Intrinsic::ID, unsigned>
+getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
+ struct Info {
+ unsigned BuiltinID;
+ Intrinsic::ID IntrinsicID;
+ unsigned VecLen;
+ };
+ Info Infos[] = {
+#define CUSTOM_BUILTIN_MAPPING(x,s) \
+ { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
+ CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
+#include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
+#undef CUSTOM_BUILTIN_MAPPING
+ };
+
+ auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
+ static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
+ (void)SortOnce;
+
+ const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
+ Info{BuiltinID, 0, 0}, CmpInfo);
+ if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
+ return {Intrinsic::not_intrinsic, 0};
+
+ return {F->IntrinsicID, F->VecLen};
+}
+
Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- SmallVector<llvm::Value *, 4> Ops;
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ Intrinsic::ID ID;
+ unsigned VecLen;
+ std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
- auto MakeCircLd = [&](unsigned IntID, bool HasImm) {
+ auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
// The base pointer is passed by address, so it needs to be loaded.
- Address BP = EmitPointerWithAlignment(E->getArg(0));
- BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
- BP.getAlignment());
+ Address A = EmitPointerWithAlignment(E->getArg(0));
+ Address BP = Address(
+ Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
- // Operands are Base, Increment, Modifier, Start.
- if (HasImm)
- Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)) };
- else
- Ops = { Base, EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)) };
+ // The treatment of both loads and stores is the same: the arguments for
+ // the builtin are the same as the arguments for the intrinsic.
+ // Load:
+ // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
+ // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
+ // Store:
+ // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
+ // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
+ SmallVector<llvm::Value*,5> Ops = { Base };
+ for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
- llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1);
- llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
- NewBase->getType()->getPointerTo());
+ // The load intrinsics generate two results (Value, NewBase), stores
+ // generate one (NewBase). The new base address needs to be stored.
+ llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
+ : Result;
+ llvm::Value *LV = Builder.CreateBitCast(
+ EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
Address Dest = EmitPointerWithAlignment(E->getArg(0));
- // The intrinsic generates two results. The new value for the base pointer
- // needs to be stored.
- Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
- return Builder.CreateExtractValue(Result, 0);
- };
-
- auto MakeCircSt = [&](unsigned IntID, bool HasImm) {
- // The base pointer is passed by address, so it needs to be loaded.
- Address BP = EmitPointerWithAlignment(E->getArg(0));
- BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
- BP.getAlignment());
- llvm::Value *Base = Builder.CreateLoad(BP);
- // Operands are Base, Increment, Modifier, Value, Start.
- if (HasImm)
- Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) };
- else
- Ops = { Base, EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) };
-
- llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
- llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
- NewBase->getType()->getPointerTo());
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- // The intrinsic generates one result, which is the new value for the base
- // pointer. It needs to be stored.
- return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
+ llvm::Value *RetVal =
+ Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
+ if (IsLoad)
+ RetVal = Builder.CreateExtractValue(Result, 0);
+ return RetVal;
};
// Handle the conversion of bit-reverse load intrinsics to bit code.
// The intrinsic call after this function only reads from memory and the
// write to memory is dealt by the store instruction.
- auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) {
+ auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
// The intrinsic generates one result, which is the new value for the base
// pointer. It needs to be returned. The result of the load instruction is
// passed to intrinsic by address, so the value needs to be stored.
@@ -14820,9 +16708,9 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// Operands are Base, Dest, Modifier.
// The intrinsic format in LLVM IR is defined as
// { ValueType, i8* } (i8*, i32).
- Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))};
+ llvm::Value *Result = Builder.CreateCall(
+ CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
// The value needs to be stored as the variable is passed by reference.
llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
@@ -14838,95 +16726,65 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractValue(Result, 1);
};
+ auto V2Q = [this, VecLen] (llvm::Value *Vec) {
+ Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
+ : Intrinsic::hexagon_V6_vandvrt;
+ return Builder.CreateCall(CGM.getIntrinsic(ID),
+ {Vec, Builder.getInt32(-1)});
+ };
+ auto Q2V = [this, VecLen] (llvm::Value *Pred) {
+ Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
+ : Intrinsic::hexagon_V6_vandqrt;
+ return Builder.CreateCall(CGM.getIntrinsic(ID),
+ {Pred, Builder.getInt32(-1)});
+ };
+
switch (BuiltinID) {
+ // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
+ // and the corresponding C/C++ builtins use loads/stores to update
+ // the predicate.
case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
- case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: {
- Address Dest = EmitPointerWithAlignment(E->getArg(2));
- unsigned Size;
- if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vaddcarry) {
- Size = 512;
- ID = Intrinsic::hexagon_V6_vaddcarry;
- } else {
- Size = 1024;
- ID = Intrinsic::hexagon_V6_vaddcarry_128B;
- }
- Dest = Builder.CreateBitCast(Dest,
- llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
- LoadInst *QLd = Builder.CreateLoad(Dest);
- Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
- llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
- Vprd->getType()->getPointerTo(0));
- Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
- return Builder.CreateExtractValue(Result, 0);
- }
+ case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
- Address Dest = EmitPointerWithAlignment(E->getArg(2));
- unsigned Size;
- if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vsubcarry) {
- Size = 512;
- ID = Intrinsic::hexagon_V6_vsubcarry;
- } else {
- Size = 1024;
- ID = Intrinsic::hexagon_V6_vsubcarry_128B;
- }
- Dest = Builder.CreateBitCast(Dest,
- llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
- LoadInst *QLd = Builder.CreateLoad(Dest);
- Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
- llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
- Vprd->getType()->getPointerTo(0));
- Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
+ // Get the type from the 0-th argument.
+ llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
+ Address PredAddr = Builder.CreateBitCast(
+ EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
+ llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
+ llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
+ {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
+
+ llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
+ Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
+ PredAddr.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
+
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false);
+ return MakeCircOp(ID, /*IsLoad=*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false);
+ return MakeCircOp(ID, /*IsLoad=*/false);
case Hexagon::BI__builtin_brev_ldub:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
case Hexagon::BI__builtin_brev_ldb:
@@ -14939,8 +16797,40 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
case Hexagon::BI__builtin_brev_ldd:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
- default:
- break;
+
+ default: {
+ if (ID == Intrinsic::not_intrinsic)
+ return nullptr;
+
+ auto IsVectorPredTy = [](llvm::Type *T) {
+ return T->isVectorTy() &&
+ cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
+ };
+
+ llvm::Function *IntrFn = CGM.getIntrinsic(ID);
+ llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
+ SmallVector<llvm::Value*,4> Ops;
+ for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
+ llvm::Type *T = IntrTy->getParamType(i);
+ const Expr *A = E->getArg(i);
+ if (IsVectorPredTy(T)) {
+ // There will be an implicit cast to a boolean vector. Strip it.
+ if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
+ if (Cast->getCastKind() == CK_BitCast)
+ A = Cast->getSubExpr();
+ }
+ Ops.push_back(V2Q(EmitScalarExpr(A)));
+ } else {
+ Ops.push_back(EmitScalarExpr(A));
+ }
+ }
+
+ llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
+ if (IsVectorPredTy(IntrTy->getReturnType()))
+ Call = Q2V(Call);
+
+ return Call;
+ } // default
} // switch
return nullptr;
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index 5c5cbaff0252..baf2c79cc2b6 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -50,7 +50,7 @@ private:
struct VarInfo {
llvm::GlobalVariable *Var;
const VarDecl *D;
- unsigned Flag;
+ DeviceVarFlags Flags;
};
llvm::SmallVector<VarInfo, 16> DeviceVars;
/// Keeps track of variable containing handle of GPU binary. Populated by
@@ -117,23 +117,38 @@ private:
void emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args);
void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
- std::string getDeviceSideName(const Decl *ND);
+ std::string getDeviceSideName(const NamedDecl *ND) override;
public:
CGNVCUDARuntime(CodeGenModule &CGM);
void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
- unsigned Flags) override {
- DeviceVars.push_back({&Var, VD, Flags});
+ bool Extern, bool Constant) override {
+ DeviceVars.push_back({&Var,
+ VD,
+ {DeviceVarFlags::Variable, Extern, Constant,
+ /*Normalized*/ false, /*Type*/ 0}});
+ }
+ void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
+ bool Extern, int Type) override {
+ DeviceVars.push_back({&Var,
+ VD,
+ {DeviceVarFlags::Surface, Extern, /*Constant*/ false,
+ /*Normalized*/ false, Type}});
+ }
+ void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
+ bool Extern, int Type, bool Normalized) override {
+ DeviceVars.push_back({&Var,
+ VD,
+ {DeviceVarFlags::Texture, Extern, /*Constant*/ false,
+ Normalized, Type}});
}
/// Creates module constructor function
llvm::Function *makeModuleCtorFunction() override;
/// Creates module destructor function
llvm::Function *makeModuleDtorFunction() override;
- /// Construct and return the stub name of a kernel.
- std::string getDeviceStubName(llvm::StringRef Name) const override;
};
}
@@ -204,40 +219,30 @@ llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
return llvm::FunctionType::get(VoidTy, Params, false);
}
-std::string CGNVCUDARuntime::getDeviceSideName(const Decl *D) {
- auto *ND = cast<const NamedDecl>(D);
+std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
+ GlobalDecl GD;
+ // D could be either a kernel or a variable.
+ if (auto *FD = dyn_cast<FunctionDecl>(ND))
+ GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
+ else
+ GD = GlobalDecl(ND);
std::string DeviceSideName;
if (DeviceMC->shouldMangleDeclName(ND)) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- DeviceMC->mangleName(ND, Out);
- DeviceSideName = Out.str();
+ DeviceMC->mangleName(GD, Out);
+ DeviceSideName = std::string(Out.str());
} else
- DeviceSideName = ND->getIdentifier()->getName();
+ DeviceSideName = std::string(ND->getIdentifier()->getName());
return DeviceSideName;
}
void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
FunctionArgList &Args) {
- // Ensure either we have different ABIs between host and device compilations,
- // says host compilation following MSVC ABI but device compilation follows
- // Itanium C++ ABI or, if they follow the same ABI, kernel names after
- // mangling should be the same after name stubbing. The later checking is
- // very important as the device kernel name being mangled in host-compilation
- // is used to resolve the device binaries to be executed. Inconsistent naming
- // result in undefined behavior. Even though we cannot check that naming
- // directly between host- and device-compilations, the host- and
- // device-mangling in host compilation could help catching certain ones.
- assert((CGF.CGM.getContext().getAuxTargetInfo() &&
- (CGF.CGM.getContext().getAuxTargetInfo()->getCXXABI() !=
- CGF.CGM.getContext().getTargetInfo().getCXXABI())) ||
- getDeviceStubName(getDeviceSideName(CGF.CurFuncDecl)) ==
- CGF.CurFn->getName());
-
EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
CudaFeature::CUDA_USES_NEW_LAUNCH) ||
- CGF.getLangOpts().HIPUseNewLaunchAPI)
+ (CGF.getLangOpts().HIP && CGF.getLangOpts().HIPUseNewLaunchAPI))
emitDeviceStubBodyNew(CGF, Args);
else
emitDeviceStubBodyLegacy(CGF, Args);
@@ -418,7 +423,8 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
// each emitted kernel.
llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin();
for (auto &&I : EmittedKernels) {
- llvm::Constant *KernelName = makeConstantString(getDeviceSideName(I.D));
+ llvm::Constant *KernelName =
+ makeConstantString(getDeviceSideName(cast<NamedDecl>(I.D)));
llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
@@ -434,30 +440,70 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
Builder.CreateCall(RegisterFunc, Args);
}
+ llvm::Type *VarSizeTy = IntTy;
+ // For HIP or CUDA 9.0+, device variable size is type of `size_t`.
+ if (CGM.getLangOpts().HIP ||
+ ToCudaVersion(CGM.getTarget().getSDKVersion()) >= CudaVersion::CUDA_90)
+ VarSizeTy = SizeTy;
+
// void __cudaRegisterVar(void **, char *, char *, const char *,
// int, int, int, int)
llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
- CharPtrTy, IntTy, IntTy,
+ CharPtrTy, IntTy, VarSizeTy,
IntTy, IntTy};
llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, RegisterVarParams, false),
+ llvm::FunctionType::get(VoidTy, RegisterVarParams, false),
addUnderscoredPrefixToName("RegisterVar"));
+ // void __cudaRegisterSurface(void **, const struct surfaceReference *,
+ // const void **, const char *, int, int);
+ llvm::FunctionCallee RegisterSurf = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(
+ VoidTy, {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy},
+ false),
+ addUnderscoredPrefixToName("RegisterSurface"));
+ // void __cudaRegisterTexture(void **, const struct textureReference *,
+ // const void **, const char *, int, int, int)
+ llvm::FunctionCallee RegisterTex = CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(
+ VoidTy,
+ {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy, IntTy},
+ false),
+ addUnderscoredPrefixToName("RegisterTexture"));
for (auto &&Info : DeviceVars) {
llvm::GlobalVariable *Var = Info.Var;
- unsigned Flags = Info.Flag;
llvm::Constant *VarName = makeConstantString(getDeviceSideName(Info.D));
- uint64_t VarSize =
- CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
- llvm::Value *Args[] = {
- &GpuBinaryHandlePtr,
- Builder.CreateBitCast(Var, VoidPtrTy),
- VarName,
- VarName,
- llvm::ConstantInt::get(IntTy, (Flags & ExternDeviceVar) ? 1 : 0),
- llvm::ConstantInt::get(IntTy, VarSize),
- llvm::ConstantInt::get(IntTy, (Flags & ConstantDeviceVar) ? 1 : 0),
- llvm::ConstantInt::get(IntTy, 0)};
- Builder.CreateCall(RegisterVar, Args);
+ switch (Info.Flags.getKind()) {
+ case DeviceVarFlags::Variable: {
+ uint64_t VarSize =
+ CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
+ llvm::Value *Args[] = {
+ &GpuBinaryHandlePtr,
+ Builder.CreateBitCast(Var, VoidPtrTy),
+ VarName,
+ VarName,
+ llvm::ConstantInt::get(IntTy, Info.Flags.isExtern()),
+ llvm::ConstantInt::get(VarSizeTy, VarSize),
+ llvm::ConstantInt::get(IntTy, Info.Flags.isConstant()),
+ llvm::ConstantInt::get(IntTy, 0)};
+ Builder.CreateCall(RegisterVar, Args);
+ break;
+ }
+ case DeviceVarFlags::Surface:
+ Builder.CreateCall(
+ RegisterSurf,
+ {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
+ VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
+ llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
+ break;
+ case DeviceVarFlags::Texture:
+ Builder.CreateCall(
+ RegisterTex,
+ {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
+ VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
+ llvm::ConstantInt::get(IntTy, Info.Flags.isNormalized()),
+ llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
+ break;
+ }
}
Builder.CreateRetVoid();
@@ -551,8 +597,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
if (CudaGpuBinary) {
// If fatbin is available from early finalization, create a string
// literal containing the fat binary loaded from the given file.
- FatBinStr = makeConstantString(CudaGpuBinary->getBuffer(), "",
- FatbinConstantName, 8);
+ FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()),
+ "", FatbinConstantName, 8);
} else {
// If fatbin is not available, create an external symbol
// __hip_fatbin in section .hip_fatbin. The external symbol is supposed
@@ -586,7 +632,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// For CUDA, create a string literal containing the fat binary loaded from
// the given file.
- FatBinStr = makeConstantString(CudaGpuBinary->getBuffer(), "",
+ FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()), "",
FatbinConstantName, 8);
FatMagic = CudaFatMagic;
}
@@ -691,8 +737,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
SmallString<64> ModuleID;
llvm::raw_svector_ostream OS(ModuleID);
OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
- llvm::Constant *ModuleIDConstant =
- makeConstantString(ModuleID.str(), "", ModuleIDSectionName, 32);
+ llvm::Constant *ModuleIDConstant = makeConstantString(
+ std::string(ModuleID.str()), "", ModuleIDSectionName, 32);
// Create an alias for the FatbinWrapper that nvcc will look for.
llvm::GlobalAlias::create(llvm::GlobalValue::ExternalLinkage,
@@ -797,12 +843,6 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
return ModuleDtorFunc;
}
-std::string CGNVCUDARuntime::getDeviceStubName(llvm::StringRef Name) const {
- if (!CGM.getLangOpts().HIP)
- return Name;
- return (Name + ".stub").str();
-}
-
CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
return new CGNVCUDARuntime(CGM);
}
diff --git a/clang/lib/CodeGen/CGCUDARuntime.h b/clang/lib/CodeGen/CGCUDARuntime.h
index e548a3a546d4..19e70a2022a5 100644
--- a/clang/lib/CodeGen/CGCUDARuntime.h
+++ b/clang/lib/CodeGen/CGCUDARuntime.h
@@ -25,6 +25,7 @@ class GlobalVariable;
namespace clang {
class CUDAKernelCallExpr;
+class NamedDecl;
class VarDecl;
namespace CodeGen {
@@ -41,9 +42,30 @@ protected:
public:
// Global variable properties that must be passed to CUDA runtime.
- enum DeviceVarFlags {
- ExternDeviceVar = 0x01, // extern
- ConstantDeviceVar = 0x02, // __constant__
+ class DeviceVarFlags {
+ public:
+ enum DeviceVarKind {
+ Variable, // Variable
+ Surface, // Builtin surface
+ Texture, // Builtin texture
+ };
+
+ private:
+ unsigned Kind : 2;
+ unsigned Extern : 1;
+ unsigned Constant : 1; // Constant variable.
+ unsigned Normalized : 1; // Normalized texture.
+ int SurfTexType; // Type of surface/texutre.
+
+ public:
+ DeviceVarFlags(DeviceVarKind K, bool E, bool C, bool N, int T)
+ : Kind(K), Extern(E), Constant(C), Normalized(N), SurfTexType(T) {}
+
+ DeviceVarKind getKind() const { return static_cast<DeviceVarKind>(Kind); }
+ bool isExtern() const { return Extern; }
+ bool isConstant() const { return Constant; }
+ bool isNormalized() const { return Normalized; }
+ int getSurfTexType() const { return SurfTexType; }
};
CGCUDARuntime(CodeGenModule &CGM) : CGM(CGM) {}
@@ -56,7 +78,11 @@ public:
/// Emits a kernel launch stub.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) = 0;
virtual void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
- unsigned Flags) = 0;
+ bool Extern, bool Constant) = 0;
+ virtual void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
+ bool Extern, int Type) = 0;
+ virtual void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
+ bool Extern, int Type, bool Normalized) = 0;
/// Constructs and returns a module initialization function or nullptr if it's
/// not needed. Must be called after all kernels have been emitted.
@@ -66,8 +92,9 @@ public:
/// Must be called after ModuleCtorFunction
virtual llvm::Function *makeModuleDtorFunction() = 0;
- /// Construct and return the stub name of a kernel.
- virtual std::string getDeviceStubName(llvm::StringRef Name) const = 0;
+ /// Returns function or variable name on device side even if the current
+ /// compilation is for host.
+ virtual std::string getDeviceSideName(const NamedDecl *ND) = 0;
};
/// Creates an instance of a CUDA runtime class.
diff --git a/clang/lib/CodeGen/CGCXX.cpp b/clang/lib/CodeGen/CGCXX.cpp
index 1928e0df3809..a4bd2c6d5da0 100644
--- a/clang/lib/CodeGen/CGCXX.cpp
+++ b/clang/lib/CodeGen/CGCXX.cpp
@@ -263,8 +263,8 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
AddressPoint.AddressPointIndex;
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
- llvm::Value *VFunc =
- CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
+ llvm::Value *VFunc = CGF.Builder.CreateAlignedLoad(
+ VFuncPtr, llvm::Align(CGF.PointerAlignInBytes));
CGCallee Callee(GD, VFunc);
return Callee;
}
diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp
index 7ada4032b3ee..65327a2435b5 100644
--- a/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/clang/lib/CodeGen/CGCXXABI.cpp
@@ -156,6 +156,8 @@ void CGCXXABI::setCXXABIThisValue(CodeGenFunction &CGF, llvm::Value *ThisPtr) {
void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType) {
+ assert(!CGF.hasAggregateEvaluationKind(ResultType) &&
+ "cannot handle aggregates");
CGF.EmitReturnOfRValue(RV, ResultType);
}
@@ -313,3 +315,20 @@ CatchTypeInfo CGCXXABI::getCatchAllTypeInfo() {
std::vector<CharUnits> CGCXXABI::getVBPtrOffsets(const CXXRecordDecl *RD) {
return std::vector<CharUnits>();
}
+
+CGCXXABI::AddedStructorArgCounts CGCXXABI::addImplicitConstructorArgs(
+ CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
+ bool ForVirtualBase, bool Delegating, CallArgList &Args) {
+ AddedStructorArgs AddedArgs =
+ getImplicitConstructorArgs(CGF, D, Type, ForVirtualBase, Delegating);
+ for (size_t i = 0; i < AddedArgs.Prefix.size(); ++i) {
+ Args.insert(Args.begin() + 1 + i,
+ CallArg(RValue::get(AddedArgs.Prefix[i].Value),
+ AddedArgs.Prefix[i].Type));
+ }
+ for (const auto &arg : AddedArgs.Suffix) {
+ Args.add(RValue::get(arg.Value), arg.Type);
+ }
+ return AddedStructorArgCounts(AddedArgs.Prefix.size(),
+ AddedArgs.Suffix.size());
+}
diff --git a/clang/lib/CodeGen/CGCXXABI.h b/clang/lib/CodeGen/CGCXXABI.h
index bff49be7a3c4..f5b3fc13bbbd 100644
--- a/clang/lib/CodeGen/CGCXXABI.h
+++ b/clang/lib/CodeGen/CGCXXABI.h
@@ -16,6 +16,7 @@
#include "CodeGenFunction.h"
#include "clang/Basic/LLVM.h"
+#include "clang/CodeGen/CodeGenABITypes.h"
namespace llvm {
class Constant;
@@ -107,6 +108,8 @@ public:
virtual bool hasMostDerivedReturn(GlobalDecl GD) const { return false; }
+ virtual bool useSinitAndSterm() const { return false; }
+
/// Returns true if the target allows calling a function through a pointer
/// with a different signature than the actual function (or equivalently,
/// bitcasting a function or function pointer to a different function type).
@@ -287,24 +290,44 @@ public:
/// Emit constructor variants required by this ABI.
virtual void EmitCXXConstructors(const CXXConstructorDecl *D) = 0;
- /// Notes how many arguments were added to the beginning (Prefix) and ending
- /// (Suffix) of an arg list.
+ /// Additional implicit arguments to add to the beginning (Prefix) and end
+ /// (Suffix) of a constructor / destructor arg list.
///
- /// Note that Prefix actually refers to the number of args *after* the first
- /// one: `this` arguments always come first.
+ /// Note that Prefix should actually be inserted *after* the first existing
+ /// arg; `this` arguments always come first.
struct AddedStructorArgs {
+ struct Arg {
+ llvm::Value *Value;
+ QualType Type;
+ };
+ SmallVector<Arg, 1> Prefix;
+ SmallVector<Arg, 1> Suffix;
+ AddedStructorArgs() = default;
+ AddedStructorArgs(SmallVector<Arg, 1> P, SmallVector<Arg, 1> S)
+ : Prefix(std::move(P)), Suffix(std::move(S)) {}
+ static AddedStructorArgs prefix(SmallVector<Arg, 1> Args) {
+ return {std::move(Args), {}};
+ }
+ static AddedStructorArgs suffix(SmallVector<Arg, 1> Args) {
+ return {{}, std::move(Args)};
+ }
+ };
+
+ /// Similar to AddedStructorArgs, but only notes the number of additional
+ /// arguments.
+ struct AddedStructorArgCounts {
unsigned Prefix = 0;
unsigned Suffix = 0;
- AddedStructorArgs() = default;
- AddedStructorArgs(unsigned P, unsigned S) : Prefix(P), Suffix(S) {}
- static AddedStructorArgs prefix(unsigned N) { return {N, 0}; }
- static AddedStructorArgs suffix(unsigned N) { return {0, N}; }
+ AddedStructorArgCounts() = default;
+ AddedStructorArgCounts(unsigned P, unsigned S) : Prefix(P), Suffix(S) {}
+ static AddedStructorArgCounts prefix(unsigned N) { return {N, 0}; }
+ static AddedStructorArgCounts suffix(unsigned N) { return {0, N}; }
};
/// Build the signature of the given constructor or destructor variant by
/// adding any required parameters. For convenience, ArgTys has been
/// initialized with the type of 'this'.
- virtual AddedStructorArgs
+ virtual AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) = 0;
@@ -365,14 +388,26 @@ public:
/// Emit the ABI-specific prolog for the function.
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
+ virtual AddedStructorArgs
+ getImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
+ CXXCtorType Type, bool ForVirtualBase,
+ bool Delegating) = 0;
+
/// Add any ABI-specific implicit arguments needed to call a constructor.
///
/// \return The number of arguments added at the beginning and end of the
/// call, which is typically zero or one.
- virtual AddedStructorArgs
+ AddedStructorArgCounts
addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
- bool Delegating, CallArgList &Args) = 0;
+ bool Delegating, CallArgList &Args);
+
+ /// Get the implicit (second) parameter that comes after the "this" pointer,
+ /// or nullptr if there is isn't one.
+ virtual llvm::Value *
+ getCXXDestructorImplicitParam(CodeGenFunction &CGF,
+ const CXXDestructorDecl *DD, CXXDtorType Type,
+ bool ForVirtualBase, bool Delegating) = 0;
/// Emit the destructor call.
virtual void EmitDestructorCall(CodeGenFunction &CGF,
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index e4803fde230f..e8235c775d8f 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -16,6 +16,7 @@
#include "CGBlocks.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
+#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
@@ -325,7 +326,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
if (PassParams)
appendParameterTypes(*this, argTypes, paramInfos, FTP);
- CGCXXABI::AddedStructorArgs AddedArgs =
+ CGCXXABI::AddedStructorArgCounts AddedArgs =
TheCXXABI.buildStructorSignature(GD, argTypes);
if (!paramInfos.empty()) {
// Note: prefix implies after the first param.
@@ -815,6 +816,7 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->ASTCallingConvention = info.getCC();
FI->InstanceMethod = instanceMethod;
FI->ChainCall = chainCall;
+ FI->CmseNSCall = info.getCmseNSCall();
FI->NoReturn = info.getNoReturn();
FI->ReturnsRetained = info.getProducesResult();
FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
@@ -1014,8 +1016,8 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF,
}
}
-void CodeGenFunction::ExpandTypeFromArgs(
- QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
+void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
+ llvm::Function::arg_iterator &AI) {
assert(LV.isSimple() &&
"Unexpected non-simple lvalue during struct expansion.");
@@ -1044,17 +1046,17 @@ void CodeGenFunction::ExpandTypeFromArgs(
ExpandTypeFromArgs(FD->getType(), SubLV, AI);
}
} else if (isa<ComplexExpansion>(Exp.get())) {
- auto realValue = *AI++;
- auto imagValue = *AI++;
+ auto realValue = &*AI++;
+ auto imagValue = &*AI++;
EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
} else {
// Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
// primitive store.
assert(isa<NoExpansion>(Exp.get()));
if (LV.isBitField())
- EmitStoreThroughLValue(RValue::get(*AI++), LV);
+ EmitStoreThroughLValue(RValue::get(&*AI++), LV);
else
- EmitStoreOfScalar(*AI++, LV);
+ EmitStoreOfScalar(&*AI++, LV);
}
}
@@ -1232,7 +1234,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
- SrcTy = Src.getType()->getElementType();
+ SrcTy = Src.getElementType();
}
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -1260,11 +1262,9 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
// Otherwise do coercion through memory. This is stupid, but simple.
Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
- Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
- Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
- CGF.Builder.CreateMemCpy(Casted, SrcCasted,
- llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
- false);
+ CGF.Builder.CreateMemCpy(Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
+ Src.getPointer(), Src.getAlignment().getAsAlign(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize));
return CGF.Builder.CreateLoad(Tmp);
}
@@ -1272,18 +1272,17 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
// store the elements rather than the aggregate to be more friendly to
// fast-isel.
// FIXME: Do we need to recurse here?
-static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
- Address Dest, bool DestIsVolatile) {
+void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
+ bool DestIsVolatile) {
// Prefer scalar stores to first-class aggregate stores.
- if (llvm::StructType *STy =
- dyn_cast<llvm::StructType>(Val->getType())) {
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
- llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
- CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
+ Address EltPtr = Builder.CreateStructGEP(Dest, i);
+ llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
+ Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
}
} else {
- CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
+ Builder.CreateStore(Val, Dest, DestIsVolatile);
}
}
@@ -1298,7 +1297,7 @@ static void CreateCoercedStore(llvm::Value *Src,
bool DstIsVolatile,
CodeGenFunction &CGF) {
llvm::Type *SrcTy = Src->getType();
- llvm::Type *DstTy = Dst.getType()->getElementType();
+ llvm::Type *DstTy = Dst.getElementType();
if (SrcTy == DstTy) {
CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
return;
@@ -1308,7 +1307,7 @@ static void CreateCoercedStore(llvm::Value *Src,
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
- DstTy = Dst.getType()->getElementType();
+ DstTy = Dst.getElementType();
}
llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
@@ -1334,7 +1333,7 @@ static void CreateCoercedStore(llvm::Value *Src,
// If store is legal, just bitcast the src pointer.
if (SrcSize <= DstSize) {
Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
- BuildAggStore(CGF, Src, Dst, DstIsVolatile);
+ CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
} else {
// Otherwise do coercion through memory. This is stupid, but
// simple.
@@ -1347,11 +1346,9 @@ static void CreateCoercedStore(llvm::Value *Src,
// to that information.
Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
CGF.Builder.CreateStore(Src, Tmp);
- Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
- Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
- CGF.Builder.CreateMemCpy(DstCasted, Casted,
- llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
- false);
+ CGF.Builder.CreateMemCpy(Dst.getPointer(), Dst.getAlignment().getAsAlign(),
+ Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, DstSize));
}
}
@@ -1702,8 +1699,9 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
-void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
- bool AttrOnCallSite,
+void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
+ bool HasOptnone,
+ bool AttrOnCallSite,
llvm::AttrBuilder &FuncAttrs) {
// OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
if (!HasOptnone) {
@@ -1746,13 +1744,20 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
if (CodeGenOpts.NullPointerIsValid)
- FuncAttrs.addAttribute("null-pointer-is-valid", "true");
- if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::Invalid)
+ FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
+
+ if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
FuncAttrs.addAttribute("denormal-fp-math",
- llvm::denormalModeName(CodeGenOpts.FPDenormalMode));
+ CodeGenOpts.FPDenormalMode.str());
+ if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
+ FuncAttrs.addAttribute(
+ "denormal-fp-math-f32",
+ CodeGenOpts.FP32DenormalMode.str());
+ }
FuncAttrs.addAttribute("no-trapping-math",
- llvm::toStringRef(CodeGenOpts.NoTrappingMath));
+ llvm::toStringRef(LangOpts.getFPExceptionMode() ==
+ LangOptions::FPE_Ignore));
// Strict (compliant) code is the default, so only add this attribute to
// indicate that we are trying to workaround a problem case.
@@ -1762,25 +1767,21 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
// TODO: Are these all needed?
// unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
FuncAttrs.addAttribute("no-infs-fp-math",
- llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
+ llvm::toStringRef(LangOpts.NoHonorInfs));
FuncAttrs.addAttribute("no-nans-fp-math",
- llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
+ llvm::toStringRef(LangOpts.NoHonorNaNs));
FuncAttrs.addAttribute("unsafe-fp-math",
- llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
+ llvm::toStringRef(LangOpts.UnsafeFPMath));
FuncAttrs.addAttribute("use-soft-float",
llvm::toStringRef(CodeGenOpts.SoftFloat));
FuncAttrs.addAttribute("stack-protector-buffer-size",
llvm::utostr(CodeGenOpts.SSPBufferSize));
FuncAttrs.addAttribute("no-signed-zeros-fp-math",
- llvm::toStringRef(CodeGenOpts.NoSignedZeros));
+ llvm::toStringRef(LangOpts.NoSignedZero));
FuncAttrs.addAttribute(
"correctly-rounded-divide-sqrt-fp-math",
llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
- if (getLangOpts().OpenCL)
- FuncAttrs.addAttribute("denorms-are-zero",
- llvm::toStringRef(CodeGenOpts.FlushDenorm));
-
// TODO: Reciprocal estimate codegen options should apply to instructions?
const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
if (!Recips.empty())
@@ -1796,6 +1797,8 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
FuncAttrs.addAttribute("stackrealign");
if (CodeGenOpts.Backchain)
FuncAttrs.addAttribute("backchain");
+ if (CodeGenOpts.EnableSegmentedStacks)
+ FuncAttrs.addAttribute("split-stack");
if (CodeGenOpts.SpeculativeLoadHardening)
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
@@ -1813,10 +1816,6 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
// Exceptions aren't supported in CUDA device code.
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
-
- // Respect -fcuda-flush-denormals-to-zero.
- if (CodeGenOpts.FlushDenorm)
- FuncAttrs.addAttribute("nvptx-f32ftz", "true");
}
for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
@@ -1826,31 +1825,100 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
}
}
-void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
+void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
llvm::AttrBuilder FuncAttrs;
- ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
- /* AttrOnCallSite = */ false, FuncAttrs);
+ getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
+ /* AttrOnCallSite = */ false, FuncAttrs);
+ // TODO: call GetCPUAndFeaturesAttributes?
F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
}
+void CodeGenModule::addDefaultFunctionDefinitionAttributes(
+ llvm::AttrBuilder &attrs) {
+ getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
+ /*for call*/ false, attrs);
+ GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
+}
+
+static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
+ const LangOptions &LangOpts,
+ const NoBuiltinAttr *NBA = nullptr) {
+ auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
+ SmallString<32> AttributeName;
+ AttributeName += "no-builtin-";
+ AttributeName += BuiltinName;
+ FuncAttrs.addAttribute(AttributeName);
+ };
+
+ // First, handle the language options passed through -fno-builtin.
+ if (LangOpts.NoBuiltin) {
+ // -fno-builtin disables them all.
+ FuncAttrs.addAttribute("no-builtins");
+ return;
+ }
+
+ // Then, add attributes for builtins specified through -fno-builtin-<name>.
+ llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
+
+ // Now, let's check the __attribute__((no_builtin("...")) attribute added to
+ // the source.
+ if (!NBA)
+ return;
+
+ // If there is a wildcard in the builtin names specified through the
+ // attribute, disable them all.
+ if (llvm::is_contained(NBA->builtinNames(), "*")) {
+ FuncAttrs.addAttribute("no-builtins");
+ return;
+ }
+
+ // And last, add the rest of the builtin names.
+ llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
+}
+
+/// Construct the IR attribute list of a function or call.
+///
+/// When adding an attribute, please consider where it should be handled:
+///
+/// - getDefaultFunctionAttributes is for attributes that are essentially
+/// part of the global target configuration (but perhaps can be
+/// overridden on a per-function basis). Adding attributes there
+/// will cause them to also be set in frontends that build on Clang's
+/// target-configuration logic, as well as for code defined in library
+/// modules such as CUDA's libdevice.
+///
+/// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
+/// and adds declaration-specific, convention-specific, and
+/// frontend-specific logic. The last is of particular importance:
+/// attributes that restrict how the frontend generates code must be
+/// added here rather than getDefaultFunctionAttributes.
+///
void CodeGenModule::ConstructAttributeList(
StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
+ // Collect function IR attributes from the CC lowering.
+ // We'll collect the paramete and result attributes later.
CallingConv = FI.getEffectiveCallingConvention();
if (FI.isNoReturn())
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
+ if (FI.isCmseNSCall())
+ FuncAttrs.addAttribute("cmse_nonsecure_call");
- // If we have information about the function prototype, we can learn
- // attributes from there.
+ // Collect function IR attributes from the callee prototype if we have one.
AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
CalleeInfo.getCalleeFunctionProtoType());
const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
bool HasOptnone = false;
+ // The NoBuiltinAttr attached to the target FunctionDecl.
+ const NoBuiltinAttr *NBA = nullptr;
+
+ // Collect function IR attributes based on declaration-specific
+ // information.
// FIXME: handle sseregparm someday...
if (TargetDecl) {
if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
@@ -1869,6 +1937,13 @@ void CodeGenModule::ConstructAttributeList(
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
AddAttributesFromFunctionProtoType(
getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
+ if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
+ // A sane operator new returns a non-aliasing pointer.
+ auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
+ if (getCodeGenOpts().AssumeSaneOperatorNew &&
+ (Kind == OO_New || Kind == OO_Array_New))
+ RetAttrs.addAttribute(llvm::Attribute::NoAlias);
+ }
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
const bool IsVirtualCall = MD && MD->isVirtual();
// Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
@@ -1876,22 +1951,7 @@ void CodeGenModule::ConstructAttributeList(
if (!(AttrOnCallSite && IsVirtualCall)) {
if (Fn->isNoReturn())
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
-
- const auto *NBA = Fn->getAttr<NoBuiltinAttr>();
- bool HasWildcard = NBA && llvm::is_contained(NBA->builtinNames(), "*");
- if (getLangOpts().NoBuiltin || HasWildcard)
- FuncAttrs.addAttribute("no-builtins");
- else {
- auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
- SmallString<32> AttributeName;
- AttributeName += "no-builtin-";
- AttributeName += BuiltinName;
- FuncAttrs.addAttribute(AttributeName);
- };
- llvm::for_each(getLangOpts().NoBuiltinFuncs, AddNoBuiltinAttr);
- if (NBA)
- llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
- }
+ NBA = Fn->getAttr<NoBuiltinAttr>();
}
}
@@ -1924,70 +1984,93 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
NumElemsParam);
}
+
+ if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
+ if (getLangOpts().OpenCLVersion <= 120) {
+ // OpenCL v1.2 Work groups are always uniform
+ FuncAttrs.addAttribute("uniform-work-group-size", "true");
+ } else {
+ // OpenCL v2.0 Work groups may be whether uniform or not.
+ // '-cl-uniform-work-group-size' compile option gets a hint
+ // to the compiler that the global work-size be a multiple of
+ // the work-group size specified to clEnqueueNDRangeKernel
+ // (i.e. work groups are uniform).
+ FuncAttrs.addAttribute("uniform-work-group-size",
+ llvm::toStringRef(CodeGenOpts.UniformWGSize));
+ }
+ }
}
- ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
+ // Attach "no-builtins" attributes to:
+ // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
+ // * definitions: "no-builtins" or "no-builtin-<name>" only.
+ // The attributes can come from:
+ // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
+ // * FunctionDecl attributes: __attribute__((no_builtin(...)))
+ addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
+
+ // Collect function IR attributes based on global settiings.
+ getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
- // This must run after constructing the default function attribute list
- // to ensure that the speculative load hardening attribute is removed
- // in the case where the -mspeculative-load-hardening flag was passed.
+ // Override some default IR attributes based on declaration-specific
+ // information.
if (TargetDecl) {
if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
- }
-
- if (CodeGenOpts.EnableSegmentedStacks &&
- !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
- FuncAttrs.addAttribute("split-stack");
-
- // Add NonLazyBind attribute to function declarations when -fno-plt
- // is used.
- if (TargetDecl && CodeGenOpts.NoPLT) {
- if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
- if (!Fn->isDefined() && !AttrOnCallSite) {
- FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
+ if (TargetDecl->hasAttr<NoSplitStackAttr>())
+ FuncAttrs.removeAttribute("split-stack");
+
+ // Add NonLazyBind attribute to function declarations when -fno-plt
+ // is used.
+ // FIXME: what if we just haven't processed the function definition
+ // yet, or if it's an external definition like C99 inline?
+ if (CodeGenOpts.NoPLT) {
+ if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
+ if (!Fn->isDefined() && !AttrOnCallSite) {
+ FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
+ }
}
}
}
- if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
- if (getLangOpts().OpenCLVersion <= 120) {
- // OpenCL v1.2 Work groups are always uniform
- FuncAttrs.addAttribute("uniform-work-group-size", "true");
- } else {
- // OpenCL v2.0 Work groups may be whether uniform or not.
- // '-cl-uniform-work-group-size' compile option gets a hint
- // to the compiler that the global work-size be a multiple of
- // the work-group size specified to clEnqueueNDRangeKernel
- // (i.e. work groups are uniform).
- FuncAttrs.addAttribute("uniform-work-group-size",
- llvm::toStringRef(CodeGenOpts.UniformWGSize));
- }
- }
-
+ // Collect non-call-site function IR attributes from declaration-specific
+ // information.
if (!AttrOnCallSite) {
- bool DisableTailCalls = false;
+ if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
+ FuncAttrs.addAttribute("cmse_nonsecure_entry");
+
+ // Whether tail calls are enabled.
+ auto shouldDisableTailCalls = [&] {
+ // Should this be honored in getDefaultFunctionAttributes?
+ if (CodeGenOpts.DisableTailCalls)
+ return true;
+
+ if (!TargetDecl)
+ return false;
- if (CodeGenOpts.DisableTailCalls)
- DisableTailCalls = true;
- else if (TargetDecl) {
if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
TargetDecl->hasAttr<AnyX86InterruptAttr>())
- DisableTailCalls = true;
- else if (CodeGenOpts.NoEscapingBlockTailCalls) {
+ return true;
+
+ if (CodeGenOpts.NoEscapingBlockTailCalls) {
if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
if (!BD->doesNotEscape())
- DisableTailCalls = true;
+ return true;
}
- }
+ return false;
+ };
FuncAttrs.addAttribute("disable-tail-calls",
- llvm::toStringRef(DisableTailCalls));
+ llvm::toStringRef(shouldDisableTailCalls()));
+
+ // CPU/feature overrides. addDefaultFunctionDefinitionAttributes
+ // handles these separately to set them based on the global defaults.
GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
}
+ // Collect attributes from arguments and return values.
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
QualType RetTy = FI.getReturnType();
@@ -2024,11 +2107,16 @@ void CodeGenModule::ConstructAttributeList(
if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
QualType PTy = RefTy->getPointeeType();
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
- RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
- .getQuantity());
- else if (getContext().getTargetAddressSpace(PTy) == 0 &&
- !CodeGenOpts.NullPointerIsValid)
+ RetAttrs.addDereferenceableAttr(
+ getMinimumObjectSize(PTy).getQuantity());
+ if (getContext().getTargetAddressSpace(PTy) == 0 &&
+ !CodeGenOpts.NullPointerIsValid)
RetAttrs.addAttribute(llvm::Attribute::NonNull);
+ if (PTy->isObjectType()) {
+ llvm::Align Alignment =
+ getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
+ RetAttrs.addAlignmentAttr(Alignment);
+ }
}
bool hasUsedSRet = false;
@@ -2041,6 +2129,7 @@ void CodeGenModule::ConstructAttributeList(
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
+ SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
}
@@ -2134,11 +2223,16 @@ void CodeGenModule::ConstructAttributeList(
if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
QualType PTy = RefTy->getPointeeType();
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
- Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
- .getQuantity());
- else if (getContext().getTargetAddressSpace(PTy) == 0 &&
- !CodeGenOpts.NullPointerIsValid)
+ Attrs.addDereferenceableAttr(
+ getMinimumObjectSize(PTy).getQuantity());
+ if (getContext().getTargetAddressSpace(PTy) == 0 &&
+ !CodeGenOpts.NullPointerIsValid)
Attrs.addAttribute(llvm::Attribute::NonNull);
+ if (PTy->isObjectType()) {
+ llvm::Align Alignment =
+ getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
+ Attrs.addAlignmentAttr(Alignment);
+ }
}
switch (FI.getExtParameterInfo(ArgNo).getABI()) {
@@ -2161,8 +2255,7 @@ void CodeGenModule::ConstructAttributeList(
if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
auto info = getContext().getTypeInfoInChars(PTy);
Attrs.addDereferenceableAttr(info.first.getQuantity());
- Attrs.addAttribute(llvm::Attribute::getWithAlignment(
- getLLVMContext(), info.second.getAsAlign()));
+ Attrs.addAlignmentAttr(info.second.getAsAlign());
}
break;
}
@@ -2278,19 +2371,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// simplify.
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
- // Flattened function arguments.
- SmallVector<llvm::Value *, 16> FnArgs;
- FnArgs.reserve(IRFunctionArgs.totalIRArgs());
- for (auto &Arg : Fn->args()) {
- FnArgs.push_back(&Arg);
- }
- assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
+ assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
Address ArgStruct = Address::invalid();
if (IRFunctionArgs.hasInallocaArg()) {
- ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
+ ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
FI.getArgStructAlignment());
assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
@@ -2298,7 +2385,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Name the struct return parameter.
if (IRFunctionArgs.hasSRetArg()) {
- auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
+ auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
AI->setName("agg.result");
AI->addAttr(llvm::Attribute::NoAlias);
}
@@ -2340,13 +2427,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
auto FieldIndex = ArgI.getInAllocaFieldIndex();
Address V =
Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
+ if (ArgI.getInAllocaIndirect())
+ V = Address(Builder.CreateLoad(V),
+ getContext().getTypeAlignInChars(Ty));
ArgVals.push_back(ParamValue::forIndirect(V));
break;
}
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
- Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
+ Address ParamAddr =
+ Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
@@ -2361,10 +2452,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// FIXME: We should have a common utility for generating an aggregate
// copy.
CharUnits Size = getContext().getTypeSizeInChars(Ty);
- auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
- Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
- Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
- Builder.CreateMemCpy(Dst, Src, SizeVal, false);
+ Builder.CreateMemCpy(
+ AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
+ ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
+ llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
V = AlignedTemp;
}
ArgVals.push_back(ParamValue::forIndirect(V));
@@ -2382,16 +2473,18 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
-
- // If we have the trivial case, handle it with no muss and fuss.
- if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
- ArgI.getCoerceToType() == ConvertType(Ty) &&
- ArgI.getDirectOffset() == 0) {
+ auto AI = Fn->getArg(FirstIRArg);
+ llvm::Type *LTy = ConvertType(Arg->getType());
+
+ // Prepare parameter attributes. So far, only attributes for pointer
+ // parameters are prepared. See
+ // http://llvm.org/docs/LangRef.html#paramattrs.
+ if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
+ ArgI.getCoerceToType()->isPointerTy()) {
assert(NumIRArgs == 1);
- llvm::Value *V = FnArgs[FirstIRArg];
- auto AI = cast<llvm::Argument>(V);
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
+ // Set `nonnull` attribute if any.
if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
PVD->getFunctionScopeIndex()) &&
!CGM.getCodeGenOpts().NullPointerIsValid)
@@ -2411,9 +2504,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
ArrSize) {
llvm::AttrBuilder Attrs;
Attrs.addDereferenceableAttr(
- getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
+ getContext().getTypeSizeInChars(ETy).getQuantity() *
+ ArrSize);
AI->addAttrs(Attrs);
- } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
+ } else if (getContext().getTargetInfo().getNullPointerValue(
+ ETy.getAddressSpace()) == 0 &&
!CGM.getCodeGenOpts().NullPointerIsValid) {
AI->addAttr(llvm::Attribute::NonNull);
}
@@ -2429,6 +2524,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AI->addAttr(llvm::Attribute::NonNull);
}
+ // Set `align` attribute if any.
const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
if (!AVAttr)
if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
@@ -2437,21 +2533,33 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If alignment-assumption sanitizer is enabled, we do *not* add
// alignment attribute here, but emit normal alignment assumption,
// so the UBSAN check could function.
- llvm::Value *AlignmentValue =
- EmitScalarExpr(AVAttr->getAlignment());
llvm::ConstantInt *AlignmentCI =
- cast<llvm::ConstantInt>(AlignmentValue);
- unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
- +llvm::Value::MaximumAlignment);
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
+ cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
+ unsigned AlignmentInt =
+ AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
+ if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
+ AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
+ AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
+ llvm::Align(AlignmentInt)));
+ }
}
}
+ // Set 'noalias' if an argument type has the `restrict` qualifier.
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::Attribute::NoAlias);
+ }
+
+ // Prepare the argument value. If we have the trivial case, handle it
+ // with no muss and fuss.
+ if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
+ assert(NumIRArgs == 1);
// LLVM expects swifterror parameters to be used in very restricted
// ways. Copy the value into a less-restricted temporary.
+ llvm::Value *V = AI;
if (FI.getExtParameterInfo(ArgNo).getABI()
== ParameterABI::SwiftErrorResult) {
QualType pointeeTy = Ty->getPointeeType();
@@ -2513,7 +2621,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = FnArgs[FirstIRArg + i];
+ auto AI = Fn->getArg(FirstIRArg + i);
AI->setName(Arg->getName() + ".coerce" + Twine(i));
Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
Builder.CreateStore(AI, EltPtr);
@@ -2526,7 +2634,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
} else {
// Simple case, just do a coerced store of the argument into the alloca.
assert(NumIRArgs == 1);
- auto AI = FnArgs[FirstIRArg];
+ auto AI = Fn->getArg(FirstIRArg);
AI->setName(Arg->getName() + ".coerce");
CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
}
@@ -2559,7 +2667,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
continue;
auto eltAddr = Builder.CreateStructGEP(alloca, i);
- auto elt = FnArgs[argIndex++];
+ auto elt = Fn->getArg(argIndex++);
Builder.CreateStore(elt, eltAddr);
}
assert(argIndex == FirstIRArg + NumIRArgs);
@@ -2574,11 +2682,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
LValue LV = MakeAddrLValue(Alloca, Ty);
ArgVals.push_back(ParamValue::forIndirect(Alloca));
- auto FnArgIter = FnArgs.begin() + FirstIRArg;
+ auto FnArgIter = Fn->arg_begin() + FirstIRArg;
ExpandTypeFromArgs(Ty, LV, FnArgIter);
- assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
+ assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
- auto AI = FnArgs[FirstIRArg + i];
+ auto AI = Fn->getArg(FirstIRArg + i);
AI->setName(Arg->getName() + "." + Twine(i));
}
break;
@@ -2655,10 +2763,10 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
bool doRetainAutorelease;
- if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
+ if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
doRetainAutorelease = true;
- } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
- .objc_retainAutoreleasedReturnValue) {
+ } else if (call->getCalledOperand() ==
+ CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
doRetainAutorelease = false;
// If we emitted an assembly marker for this call (and the
@@ -2674,8 +2782,8 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
assert(prev);
}
assert(isa<llvm::CallInst>(prev));
- assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
- CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
+ assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
+ CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
InstsToKill.push_back(prev);
}
} else {
@@ -2718,8 +2826,8 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
// Look for a retain call.
llvm::CallInst *retainCall =
dyn_cast<llvm::CallInst>(result->stripPointerCasts());
- if (!retainCall ||
- retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
+ if (!retainCall || retainCall->getCalledOperand() !=
+ CGF.CGM.getObjCEntrypoints().objc_retain)
return nullptr;
// Look for an ordinary load of 'self'.
@@ -2825,6 +2933,199 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
return store;
}
+// Helper functions for EmitCMSEClearRecord
+
+// Set the bits corresponding to a field having width `BitWidth` and located at
+// offset `BitOffset` (from the least significant bit) within a storage unit of
+// `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
+// Use little-endian layout, i.e.`Bits[0]` is the LSB.
+static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
+ int BitWidth, int CharWidth) {
+ assert(CharWidth <= 64);
+ assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
+
+ int Pos = 0;
+ if (BitOffset >= CharWidth) {
+ Pos += BitOffset / CharWidth;
+ BitOffset = BitOffset % CharWidth;
+ }
+
+ const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
+ if (BitOffset + BitWidth >= CharWidth) {
+ Bits[Pos++] |= (Used << BitOffset) & Used;
+ BitWidth -= CharWidth - BitOffset;
+ BitOffset = 0;
+ }
+
+ while (BitWidth >= CharWidth) {
+ Bits[Pos++] = Used;
+ BitWidth -= CharWidth;
+ }
+
+ if (BitWidth > 0)
+ Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
+}
+
+// Set the bits corresponding to a field having width `BitWidth` and located at
+// offset `BitOffset` (from the least significant bit) within a storage unit of
+// `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
+// `Bits` corresponds to one target byte. Use target endian layout.
+static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
+ int StorageSize, int BitOffset, int BitWidth,
+ int CharWidth, bool BigEndian) {
+
+ SmallVector<uint64_t, 8> TmpBits(StorageSize);
+ setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
+
+ if (BigEndian)
+ std::reverse(TmpBits.begin(), TmpBits.end());
+
+ for (uint64_t V : TmpBits)
+ Bits[StorageOffset++] |= V;
+}
+
+static void setUsedBits(CodeGenModule &, QualType, int,
+ SmallVectorImpl<uint64_t> &);
+
+// Set the bits in `Bits`, which correspond to the value representations of
+// the actual members of the record type `RTy`. Note that this function does
+// not handle base classes, virtual tables, etc, since they cannot happen in
+// CMSE function arguments or return. The bit mask corresponds to the target
+// memory layout, i.e. it's endian dependent.
+static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
+ SmallVectorImpl<uint64_t> &Bits) {
+ ASTContext &Context = CGM.getContext();
+ int CharWidth = Context.getCharWidth();
+ const RecordDecl *RD = RTy->getDecl()->getDefinition();
+ const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
+ const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
+
+ int Idx = 0;
+ for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
+ const FieldDecl *F = *I;
+
+ if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
+ F->getType()->isIncompleteArrayType())
+ continue;
+
+ if (F->isBitField()) {
+ const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
+ setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
+ BFI.StorageSize / CharWidth, BFI.Offset,
+ BFI.Size, CharWidth,
+ CGM.getDataLayout().isBigEndian());
+ continue;
+ }
+
+ setUsedBits(CGM, F->getType(),
+ Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
+ }
+}
+
+// Set the bits in `Bits`, which correspond to the value representations of
+// the elements of an array type `ATy`.
+static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
+ int Offset, SmallVectorImpl<uint64_t> &Bits) {
+ const ASTContext &Context = CGM.getContext();
+
+ QualType ETy = Context.getBaseElementType(ATy);
+ int Size = Context.getTypeSizeInChars(ETy).getQuantity();
+ SmallVector<uint64_t, 4> TmpBits(Size);
+ setUsedBits(CGM, ETy, 0, TmpBits);
+
+ for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
+ auto Src = TmpBits.begin();
+ auto Dst = Bits.begin() + Offset + I * Size;
+ for (int J = 0; J < Size; ++J)
+ *Dst++ |= *Src++;
+ }
+}
+
+// Set the bits in `Bits`, which correspond to the value representations of
+// the type `QTy`.
+static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
+ SmallVectorImpl<uint64_t> &Bits) {
+ if (const auto *RTy = QTy->getAs<RecordType>())
+ return setUsedBits(CGM, RTy, Offset, Bits);
+
+ ASTContext &Context = CGM.getContext();
+ if (const auto *ATy = Context.getAsConstantArrayType(QTy))
+ return setUsedBits(CGM, ATy, Offset, Bits);
+
+ int Size = Context.getTypeSizeInChars(QTy).getQuantity();
+ if (Size <= 0)
+ return;
+
+ std::fill_n(Bits.begin() + Offset, Size,
+ (uint64_t(1) << Context.getCharWidth()) - 1);
+}
+
+static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
+ int Pos, int Size, int CharWidth,
+ bool BigEndian) {
+ assert(Size > 0);
+ uint64_t Mask = 0;
+ if (BigEndian) {
+ for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
+ ++P)
+ Mask = (Mask << CharWidth) | *P;
+ } else {
+ auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
+ do
+ Mask = (Mask << CharWidth) | *--P;
+ while (P != End);
+ }
+ return Mask;
+}
+
+// Emit code to clear the bits in a record, which aren't a part of any user
+// declared member, when the record is a function return.
+llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
+ llvm::IntegerType *ITy,
+ QualType QTy) {
+ assert(Src->getType() == ITy);
+ assert(ITy->getScalarSizeInBits() <= 64);
+
+ const llvm::DataLayout &DataLayout = CGM.getDataLayout();
+ int Size = DataLayout.getTypeStoreSize(ITy);
+ SmallVector<uint64_t, 4> Bits(Size);
+ setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
+
+ int CharWidth = CGM.getContext().getCharWidth();
+ uint64_t Mask =
+ buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
+
+ return Builder.CreateAnd(Src, Mask, "cmse.clear");
+}
+
+// Emit code to clear the bits in a record, which aren't a part of any user
+// declared member, when the record is a function argument.
+llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
+ llvm::ArrayType *ATy,
+ QualType QTy) {
+ const llvm::DataLayout &DataLayout = CGM.getDataLayout();
+ int Size = DataLayout.getTypeStoreSize(ATy);
+ SmallVector<uint64_t, 16> Bits(Size);
+ setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
+
+ // Clear each element of the LLVM array.
+ int CharWidth = CGM.getContext().getCharWidth();
+ int CharsPerElt =
+ ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
+ int MaskIndex = 0;
+ llvm::Value *R = llvm::UndefValue::get(ATy);
+ for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
+ uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
+ DataLayout.isBigEndian());
+ MaskIndex += CharsPerElt;
+ llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
+ llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
+ R = Builder.CreateInsertValue(R, T1, I);
+ }
+
+ return R;
+}
+
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
bool EmitRetDbgLoc,
SourceLocation EndLoc) {
@@ -2991,6 +3292,14 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
llvm::Instruction *Ret;
if (RV) {
+ if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
+ // For certain return types, clear padding bits, as they may reveal
+ // sensitive information.
+ // Small struct/union types are passed as integers.
+ auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
+ if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
+ RV = EmitCMSEClearRecord(RV, ITy, RetTy);
+ }
EmitReturnValueCheck(RV);
Ret = Builder.CreateRet(RV);
} else {
@@ -3006,6 +3315,11 @@ void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
if (!CurCodeDecl)
return;
+ // If the return block isn't reachable, neither is this check, so don't emit
+ // it.
+ if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
+ return;
+
ReturnsNonNullAttr *RetNNAttr = nullptr;
if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
@@ -3026,7 +3340,7 @@ void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
} else {
if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
if (auto *TSI = DD->getTypeSourceInfo())
- if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
+ if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
CheckKind = SanitizerKind::NullabilityReturn;
Handler = SanitizerHandler::NullabilityReturn;
@@ -3811,6 +4125,110 @@ void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
DeferredReplacements.push_back(std::make_pair(Old, New));
}
+namespace {
+
+/// Specify given \p NewAlign as the alignment of return value attribute. If
+/// such attribute already exists, re-set it to the maximal one of two options.
+LLVM_NODISCARD llvm::AttributeList
+maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
+ const llvm::AttributeList &Attrs,
+ llvm::Align NewAlign) {
+ llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
+ if (CurAlign >= NewAlign)
+ return Attrs;
+ llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
+ return Attrs
+ .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
+ llvm::Attribute::AttrKind::Alignment)
+ .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
+}
+
+template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
+protected:
+ CodeGenFunction &CGF;
+
+ /// We do nothing if this is, or becomes, nullptr.
+ const AlignedAttrTy *AA = nullptr;
+
+ llvm::Value *Alignment = nullptr; // May or may not be a constant.
+ llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
+
+ AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
+ : CGF(CGF_) {
+ if (!FuncDecl)
+ return;
+ AA = FuncDecl->getAttr<AlignedAttrTy>();
+ }
+
+public:
+ /// If we can, materialize the alignment as an attribute on return value.
+ LLVM_NODISCARD llvm::AttributeList
+ TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
+ if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
+ return Attrs;
+ const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
+ if (!AlignmentCI)
+ return Attrs;
+ // We may legitimately have non-power-of-2 alignment here.
+ // If so, this is UB land, emit it via `@llvm.assume` instead.
+ if (!AlignmentCI->getValue().isPowerOf2())
+ return Attrs;
+ llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
+ CGF.getLLVMContext(), Attrs,
+ llvm::Align(
+ AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
+ AA = nullptr; // We're done. Disallow doing anything else.
+ return NewAttrs;
+ }
+
+ /// Emit alignment assumption.
+ /// This is a general fallback that we take if either there is an offset,
+ /// or the alignment is variable or we are sanitizing for alignment.
+ void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
+ if (!AA)
+ return;
+ CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
+ AA->getLocation(), Alignment, OffsetCI);
+ AA = nullptr; // We're done. Disallow doing anything else.
+ }
+};
+
+/// Helper data structure to emit `AssumeAlignedAttr`.
+class AssumeAlignedAttrEmitter final
+ : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
+public:
+ AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
+ : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
+ if (!AA)
+ return;
+ // It is guaranteed that the alignment/offset are constants.
+ Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
+ if (Expr *Offset = AA->getOffset()) {
+ OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
+ if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
+ OffsetCI = nullptr;
+ }
+ }
+};
+
+/// Helper data structure to emit `AllocAlignAttr`.
+class AllocAlignAttrEmitter final
+ : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
+public:
+ AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
+ const CallArgList &CallArgs)
+ : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
+ if (!AA)
+ return;
+ // Alignment may or may not be a constant, and that is okay.
+ Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
+ .getRValue(CGF)
+ .getScalarVal();
+ }
+};
+
+} // namespace
+
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const CGCallee &Callee,
ReturnValueSlot ReturnValue,
@@ -3829,7 +4247,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
// We can only guarantee that a function is called from the correct
// context/function based on the appropriate target attributes,
// so only check in the case where we have both always_inline and target
@@ -3840,6 +4258,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
TargetDecl->hasAttr<TargetAttr>())
checkTargetFeatures(Loc, FD);
+ // Some architectures (such as x86-64) have the ABI changed based on
+ // attribute-target/features. Give them a chance to diagnose.
+ CGM.getTargetCodeGenInfo().checkFunctionCallABI(
+ CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
+ }
+
#ifndef NDEBUG
if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
// For an inalloca varargs function, we don't expect CallInfo to match the
@@ -3940,18 +4364,39 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (I->isAggregate()) {
- // Replace the placeholder with the appropriate argument slot GEP.
Address Addr = I->hasLValue()
? I->getKnownLValue().getAddress(*this)
: I->getKnownRValue().getAggregateAddress();
llvm::Instruction *Placeholder =
cast<llvm::Instruction>(Addr.getPointer());
- CGBuilderTy::InsertPoint IP = Builder.saveIP();
- Builder.SetInsertPoint(Placeholder);
- Addr =
- Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
- Builder.restoreIP(IP);
+
+ if (!ArgInfo.getInAllocaIndirect()) {
+ // Replace the placeholder with the appropriate argument slot GEP.
+ CGBuilderTy::InsertPoint IP = Builder.saveIP();
+ Builder.SetInsertPoint(Placeholder);
+ Addr = Builder.CreateStructGEP(ArgMemory,
+ ArgInfo.getInAllocaFieldIndex());
+ Builder.restoreIP(IP);
+ } else {
+ // For indirect things such as overaligned structs, replace the
+ // placeholder with a regular aggregate temporary alloca. Store the
+ // address of this alloca into the struct.
+ Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
+ Address ArgSlot = Builder.CreateStructGEP(
+ ArgMemory, ArgInfo.getInAllocaFieldIndex());
+ Builder.CreateStore(Addr.getPointer(), ArgSlot);
+ }
deferPlaceholderReplacement(Placeholder, Addr.getPointer());
+ } else if (ArgInfo.getInAllocaIndirect()) {
+ // Make a temporary alloca and store the address of it into the argument
+ // struct.
+ Address Addr = CreateMemTempWithoutCast(
+ I->Ty, getContext().getTypeAlignInChars(I->Ty),
+ "indirect-arg-temp");
+ I->copyInto(*this, Addr);
+ Address ArgSlot =
+ Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
+ Builder.CreateStore(Addr.getPointer(), ArgSlot);
} else {
// Store the RValue into the argument struct.
Address Addr =
@@ -4001,8 +4446,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
bool NeedCopy = false;
if (Addr.getAlignment() < Align &&
- llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
- Align.getQuantity()) {
+ llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
+ Align.getAsAlign()) {
NeedCopy = true;
} else if (I->hasLValue()) {
auto LV = I->getKnownLValue();
@@ -4128,7 +4573,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
- llvm::Type *SrcTy = Src.getType()->getElementType();
+ llvm::Type *SrcTy = Src.getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
@@ -4156,8 +4601,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else {
// In the simple case, just pass the coerced loaded value.
assert(NumIRArgs == 1);
- IRCallArgs[FirstIRArg] =
- CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
+ llvm::Value *Load =
+ CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
+
+ if (CallInfo.isCmseNSCall()) {
+ // For certain parameter types, clear padding bits, as they may reveal
+ // sensitive information.
+ // Small struct/union types are passed as integer arrays.
+ auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
+ if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
+ Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
+ }
+ IRCallArgs[FirstIRArg] = Load;
}
break;
@@ -4328,8 +4783,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Update the largest vector width if any arguments have vector types.
for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
}
// Compute the calling convention and attributes.
@@ -4346,6 +4802,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::StrictFP);
+ // Add call-site nomerge attribute if exists.
+ if (InNoMergeAttributedStmt)
+ Attrs =
+ Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoMerge);
+
// Apply some call-site-specific attributes.
// TODO: work this into building the attribute set.
@@ -4378,8 +4840,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CannotThrow = true;
} else {
// Otherwise, nounwind call sites will never throw.
- CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoUnwind);
+ CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
}
// If we made a temporary, be sure to clean up after ourselves. Note that we
@@ -4402,6 +4863,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::StrictFP);
+ AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
+ Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
+
+ AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
+ Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
+
// Emit the actual call/invoke instruction.
llvm::CallBase *CI;
if (!InvokeDest) {
@@ -4437,8 +4904,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Update largest vector width from the return type.
if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
@@ -4461,7 +4929,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Add metadata for calls to MSAllocator functions
if (getDebugInfo() && TargetDecl &&
TargetDecl->hasAttr<MSAllocatorAttr>())
- getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
+ getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
// 4. Finish the call.
@@ -4581,7 +5049,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
DestPtr = CreateMemTemp(RetTy, "agg.tmp");
DestIsVolatile = false;
}
- BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
+ EmitAggregateStore(CI, DestPtr, DestIsVolatile);
return RValue::getAggregate(DestPtr);
}
case TEK_Scalar: {
@@ -4620,22 +5088,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Emit the assume_aligned check on the return value.
if (Ret.isScalar() && TargetDecl) {
- if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
- llvm::Value *OffsetValue = nullptr;
- if (const auto *Offset = AA->getOffset())
- OffsetValue = EmitScalarExpr(Offset);
-
- llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
- llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
- EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
- AlignmentCI, OffsetValue);
- } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
- llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
- .getRValue(*this)
- .getScalarVal();
- EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
- AlignmentVal);
- }
+ AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
+ AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
}
// Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
@@ -4643,6 +5097,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
LifetimeEnd.Emit(*this, /*Flags=*/{});
+ if (!ReturnValue.isExternallyDestructed() &&
+ RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
+ pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
+ RetTy);
+
return Ret;
}
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index 34558be5adb1..509ca43a9784 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -16,6 +16,7 @@
#include "CGValue.h"
#include "EHScopeStack.h"
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
@@ -357,27 +358,26 @@ class FunctionArgList : public SmallVector<const VarDecl *, 16> {};
/// ReturnValueSlot - Contains the address where the return value of a
/// function can be stored, and whether the address is volatile or not.
class ReturnValueSlot {
- llvm::PointerIntPair<llvm::Value *, 2, unsigned int> Value;
- CharUnits Alignment;
+ Address Addr = Address::invalid();
// Return value slot flags
- enum Flags {
- IS_VOLATILE = 0x1,
- IS_UNUSED = 0x2,
- };
+ unsigned IsVolatile : 1;
+ unsigned IsUnused : 1;
+ unsigned IsExternallyDestructed : 1;
public:
- ReturnValueSlot() {}
- ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false)
- : Value(Addr.isValid() ? Addr.getPointer() : nullptr,
- (IsVolatile ? IS_VOLATILE : 0) | (IsUnused ? IS_UNUSED : 0)),
- Alignment(Addr.isValid() ? Addr.getAlignment() : CharUnits::Zero()) {}
-
- bool isNull() const { return !getValue().isValid(); }
-
- bool isVolatile() const { return Value.getInt() & IS_VOLATILE; }
- Address getValue() const { return Address(Value.getPointer(), Alignment); }
- bool isUnused() const { return Value.getInt() & IS_UNUSED; }
+ ReturnValueSlot()
+ : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {}
+ ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false,
+ bool IsExternallyDestructed = false)
+ : Addr(Addr), IsVolatile(IsVolatile), IsUnused(IsUnused),
+ IsExternallyDestructed(IsExternallyDestructed) {}
+
+ bool isNull() const { return !Addr.isValid(); }
+ bool isVolatile() const { return IsVolatile; }
+ Address getValue() const { return Addr; }
+ bool isUnused() const { return IsUnused; }
+ bool isExternallyDestructed() const { return IsExternallyDestructed; }
};
} // end namespace CodeGen
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 3f3825b76275..4d143e3e1bdf 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -35,20 +35,37 @@ using namespace CodeGen;
/// Return the best known alignment for an unknown pointer to a
/// particular class.
CharUnits CodeGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) {
- if (!RD->isCompleteDefinition())
+ if (!RD->hasDefinition())
return CharUnits::One(); // Hopefully won't be used anywhere.
auto &layout = getContext().getASTRecordLayout(RD);
// If the class is final, then we know that the pointer points to an
// object of that type and can use the full alignment.
- if (RD->hasAttr<FinalAttr>()) {
+ if (RD->isEffectivelyFinal())
return layout.getAlignment();
// Otherwise, we have to assume it could be a subclass.
- } else {
- return layout.getNonVirtualAlignment();
- }
+ return layout.getNonVirtualAlignment();
+}
+
+/// Return the smallest possible amount of storage that might be allocated
+/// starting from the beginning of an object of a particular class.
+///
+/// This may be smaller than sizeof(RD) if RD has virtual base classes.
+CharUnits CodeGenModule::getMinimumClassObjectSize(const CXXRecordDecl *RD) {
+ if (!RD->hasDefinition())
+ return CharUnits::One();
+
+ auto &layout = getContext().getASTRecordLayout(RD);
+
+ // If the class is final, then we know that the pointer points to an
+ // object of that type and can use the full alignment.
+ if (RD->isEffectivelyFinal())
+ return layout.getSize();
+
+ // Otherwise, we have to assume it could be a subclass.
+ return std::max(layout.getNonVirtualSize(), CharUnits::One());
}
/// Return the best known alignment for a pointer to a virtual base,
@@ -138,8 +155,8 @@ CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
memberPtr, memberPtrType);
QualType memberType = memberPtrType->getPointeeType();
- CharUnits memberAlign = getNaturalTypeAlignment(memberType, BaseInfo,
- TBAAInfo);
+ CharUnits memberAlign =
+ CGM.getNaturalTypeAlignment(memberType, BaseInfo, TBAAInfo);
memberAlign =
CGM.getDynamicOffsetAlignment(base.getAlignment(),
memberPtrType->getClass()->getAsCXXRecordDecl(),
@@ -236,8 +253,13 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
// Compute the offset from the static and dynamic components.
llvm::Value *baseOffset;
if (!nonVirtualOffset.isZero()) {
- baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
- nonVirtualOffset.getQuantity());
+ llvm::Type *OffsetType =
+ (CGF.CGM.getTarget().getCXXABI().isItaniumFamily() &&
+ CGF.CGM.getItaniumVTableContext().isRelativeLayout())
+ ? CGF.Int32Ty
+ : CGF.PtrDiffTy;
+ baseOffset =
+ llvm::ConstantInt::get(OffsetType, nonVirtualOffset.getQuantity());
if (virtualOffset) {
baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
}
@@ -730,7 +752,7 @@ bool CodeGenFunction::IsConstructorDelegationValid(
// parameters
// - etc.
// If we ever add any of the above cases, remember that:
- // - function-try-blocks will always blacklist this optimization
+ // - function-try-blocks will always exclude this optimization
// - we need to perform the constructor prologue and cleanup in
// EmitConstructorBody.
@@ -2128,7 +2150,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
Address Src(Args[1].getRValue(*this).getScalarVal(),
- getNaturalTypeAlignment(SrcTy));
+ CGM.getNaturalTypeAlignment(SrcTy));
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
QualType DestTy = getContext().getTypeDeclType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
@@ -2148,7 +2170,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
}
// Insert any ABI-specific implicit constructor arguments.
- CGCXXABI::AddedStructorArgs ExtraArgs =
+ CGCXXABI::AddedStructorArgCounts ExtraArgs =
CGM.getCXXABI().addImplicitConstructorArgs(*this, D, Type, ForVirtualBase,
Delegating, Args);
@@ -2157,7 +2179,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall(
Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs);
CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(D, Type));
- EmitCall(Info, Callee, ReturnValueSlot(), Args);
+ EmitCall(Info, Callee, ReturnValueSlot(), Args, nullptr, Loc);
// Generate vtable assumptions if we're constructing a complete object
// with a vtable. We don't do this for base subobjects for two reasons:
@@ -2641,7 +2663,9 @@ void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
if (SanOpts.has(SanitizerKind::CFIVCall))
EmitVTablePtrCheckForCall(RD, VTable, CodeGenFunction::CFITCK_VCall, Loc);
else if (CGM.getCodeGenOpts().WholeProgramVTables &&
- CGM.HasHiddenLTOVisibility(RD)) {
+ // Don't insert type test assumes if we are forcing public std
+ // visibility.
+ !CGM.HasLTOVisibilityPublicStd(RD)) {
llvm::Metadata *MD =
CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
llvm::Value *TypeId =
@@ -2850,7 +2874,9 @@ void CodeGenFunction::EmitForwardingCallToLambda(
if (!resultType->isVoidType() &&
calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
!hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
- returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
+ returnSlot =
+ ReturnValueSlot(ReturnValue, resultType.isVolatileQualified(),
+ /*IsUnused=*/false, /*IsExternallyDestructed=*/true);
// We don't need to separately arrange the call arguments because
// the call can't be variadic anyway --- it's impossible to forward
diff --git a/clang/lib/CodeGen/CGCleanup.cpp b/clang/lib/CodeGen/CGCleanup.cpp
index c117dd5c25c1..ad543ef86c1a 100644
--- a/clang/lib/CodeGen/CGCleanup.cpp
+++ b/clang/lib/CodeGen/CGCleanup.cpp
@@ -179,12 +179,10 @@ void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
bool IsNormalCleanup = Kind & NormalCleanup;
bool IsEHCleanup = Kind & EHCleanup;
- bool IsActive = !(Kind & InactiveCleanup);
bool IsLifetimeMarker = Kind & LifetimeMarker;
EHCleanupScope *Scope =
new (Buffer) EHCleanupScope(IsNormalCleanup,
IsEHCleanup,
- IsActive,
Size,
BranchFixups.size(),
InnermostNormalCleanup,
@@ -309,9 +307,9 @@ static void createStoreInstBefore(llvm::Value *value, Address addr,
static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name,
llvm::Instruction *beforeInst) {
- auto load = new llvm::LoadInst(addr.getPointer(), name, beforeInst);
- load->setAlignment(addr.getAlignment().getAsAlign());
- return load;
+ return new llvm::LoadInst(addr.getElementType(), addr.getPointer(), name,
+ false, addr.getAlignment().getAsAlign(),
+ beforeInst);
}
/// All the branch fixups on the EH stack have propagated out past the
@@ -859,6 +857,9 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// TODO: base this on the number of branch-afters and fixups
const unsigned SwitchCapacity = 10;
+ // pass the abnormal exit flag to Fn (SEH cleanup)
+ cleanupFlags.setHasExitSwitch();
+
llvm::LoadInst *Load =
createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest",
nullptr);
diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h
index ffe0f9d9dd20..ef4f6b9ec133 100644
--- a/clang/lib/CodeGen/CGCleanup.h
+++ b/clang/lib/CodeGen/CGCleanup.h
@@ -102,7 +102,7 @@ protected:
};
public:
- enum Kind { Cleanup, Catch, Terminate, Filter, PadEnd };
+ enum Kind { Cleanup, Catch, Terminate, Filter };
EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope)
: CachedLandingPad(nullptr), CachedEHDispatchBlock(nullptr),
@@ -284,8 +284,8 @@ public:
return sizeof(EHCleanupScope) + CleanupBits.CleanupSize;
}
- EHCleanupScope(bool isNormal, bool isEH, bool isActive,
- unsigned cleanupSize, unsigned fixupDepth,
+ EHCleanupScope(bool isNormal, bool isEH, unsigned cleanupSize,
+ unsigned fixupDepth,
EHScopeStack::stable_iterator enclosingNormal,
EHScopeStack::stable_iterator enclosingEH)
: EHScope(EHScope::Cleanup, enclosingEH),
@@ -293,7 +293,7 @@ public:
ActiveFlag(nullptr), ExtInfo(nullptr), FixupDepth(fixupDepth) {
CleanupBits.IsNormalCleanup = isNormal;
CleanupBits.IsEHCleanup = isEH;
- CleanupBits.IsActive = isActive;
+ CleanupBits.IsActive = true;
CleanupBits.IsLifetimeMarker = false;
CleanupBits.TestFlagInNormalCleanup = false;
CleanupBits.TestFlagInEHCleanup = false;
@@ -487,17 +487,6 @@ public:
}
};
-class EHPadEndScope : public EHScope {
-public:
- EHPadEndScope(EHScopeStack::stable_iterator enclosingEHScope)
- : EHScope(PadEnd, enclosingEHScope) {}
- static size_t getSize() { return sizeof(EHPadEndScope); }
-
- static bool classof(const EHScope *scope) {
- return scope->getKind() == PadEnd;
- }
-};
-
/// A non-stable pointer into the scope stack.
class EHScopeStack::iterator {
char *Ptr;
@@ -535,10 +524,6 @@ public:
case EHScope::Terminate:
Size = EHTerminateScope::getSize();
break;
-
- case EHScope::PadEnd:
- Size = EHPadEndScope::getSize();
- break;
}
Ptr += llvm::alignTo(Size, ScopeStackAlignment);
return *this;
diff --git a/clang/lib/CodeGen/CGCoroutine.cpp b/clang/lib/CodeGen/CGCoroutine.cpp
index aee5a927a055..5c57ad0685d5 100644
--- a/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/clang/lib/CodeGen/CGCoroutine.cpp
@@ -275,9 +275,9 @@ RValue CodeGenFunction::EmitCoyieldExpr(const CoyieldExpr &E,
void CodeGenFunction::EmitCoreturnStmt(CoreturnStmt const &S) {
++CurCoro.Data->CoreturnCount;
const Expr *RV = S.getOperand();
- if (RV && RV->getType()->isVoidType()) {
- // Make sure to evaluate the expression of a co_return with a void
- // expression for side effects.
+ if (RV && RV->getType()->isVoidType() && !isa<InitListExpr>(RV)) {
+ // Make sure to evaluate the non initlist expression of a co_return
+ // with a void expression for side effects.
RunCleanupsScope cleanupScope(*this);
EmitIgnoredExpr(RV);
}
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index cbd524eda9d0..6965c4a1209c 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -231,9 +231,16 @@ PrintingPolicy CGDebugInfo::getPrintingPolicy() const {
// If we're emitting codeview, it's important to try to match MSVC's naming so
// that visualizers written for MSVC will trigger for our class names. In
// particular, we can't have spaces between arguments of standard templates
- // like basic_string and vector.
- if (CGM.getCodeGenOpts().EmitCodeView)
+ // like basic_string and vector, but we must have spaces between consecutive
+ // angle brackets that close nested template argument lists.
+ if (CGM.getCodeGenOpts().EmitCodeView) {
PP.MSVCFormatting = true;
+ PP.SplitTemplateClosers = true;
+ } else {
+ // For DWARF, printing rules are underspecified.
+ // SplitTemplateClosers yields better interop with GCC and GDB (PR46052).
+ PP.SplitTemplateClosers = true;
+ }
// Apply -fdebug-prefix-map.
PP.Callbacks = &PrintCB;
@@ -470,10 +477,14 @@ CGDebugInfo::createFile(StringRef FileName,
}
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
+ if (DebugPrefixMap.empty())
+ return Path.str();
+
+ SmallString<256> P = Path;
for (const auto &Entry : DebugPrefixMap)
- if (Path.startswith(Entry.first))
- return (Twine(Entry.second) + Path.substr(Entry.first.size())).str();
- return Path.str();
+ if (llvm::sys::path::replace_path_prefix(P, Entry.first, Entry.second))
+ break;
+ return P.str().str();
}
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
@@ -532,11 +543,12 @@ void CGDebugInfo::CreateCompileUnit() {
// file to determine the real absolute path for the file.
std::string MainFileDir;
if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
- MainFileDir = MainFile->getDir()->getName();
+ MainFileDir = std::string(MainFile->getDir()->getName());
if (!llvm::sys::path::is_absolute(MainFileName)) {
llvm::SmallString<1024> MainFileDirSS(MainFileDir);
llvm::sys::path::append(MainFileDirSS, MainFileName);
- MainFileName = llvm::sys::path::remove_leading_dotslash(MainFileDirSS);
+ MainFileName =
+ std::string(llvm::sys::path::remove_leading_dotslash(MainFileDirSS));
}
// If the main file name provided is identical to the input file name, and
// if the input file is a preprocessed source, use the module name for
@@ -610,6 +622,16 @@ void CGDebugInfo::CreateCompileUnit() {
remapDIPath(MainFileName), remapDIPath(getCurrentDirname()), CSInfo,
getSource(SM, SM.getMainFileID()));
+ StringRef Sysroot, SDK;
+ if (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB) {
+ Sysroot = CGM.getHeaderSearchOpts().Sysroot;
+ auto B = llvm::sys::path::rbegin(Sysroot);
+ auto E = llvm::sys::path::rend(Sysroot);
+ auto It = std::find_if(B, E, [](auto SDK) { return SDK.endswith(".sdk"); });
+ if (It != E)
+ SDK = *It;
+ }
+
// Create new compile unit.
TheCU = DBuilder.createCompileUnit(
LangTag, CUFile, CGOpts.EmitVersionIdentMetadata ? Producer : "",
@@ -620,7 +642,7 @@ void CGDebugInfo::CreateCompileUnit() {
? llvm::DICompileUnit::DebugNameTableKind::None
: static_cast<llvm::DICompileUnit::DebugNameTableKind>(
CGOpts.DebugNameTable),
- CGOpts.DebugRangesBaseAddress);
+ CGOpts.DebugRangesBaseAddress, remapDIPath(Sysroot), SDK);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -750,6 +772,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::Float:
case BuiltinType::LongDouble:
case BuiltinType::Float16:
+ case BuiltinType::BFloat16:
case BuiltinType::Float128:
case BuiltinType::Double:
// FIXME: For targets where long double and __float128 have the same size,
@@ -811,6 +834,21 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return DBuilder.createBasicType(BTName, Size, Encoding);
}
+llvm::DIType *CGDebugInfo::CreateType(const AutoType *Ty) {
+ return DBuilder.createUnspecifiedType("auto");
+}
+
+llvm::DIType *CGDebugInfo::CreateType(const ExtIntType *Ty) {
+
+ StringRef Name = Ty->isUnsigned() ? "unsigned _ExtInt" : "_ExtInt";
+ llvm::dwarf::TypeKind Encoding = Ty->isUnsigned()
+ ? llvm::dwarf::DW_ATE_unsigned
+ : llvm::dwarf::DW_ATE_signed;
+
+ return DBuilder.createBasicType(Name, CGM.getContext().getTypeSize(Ty),
+ Encoding);
+}
+
llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) {
// Bit size and offset of the type.
llvm::dwarf::TypeKind Encoding = llvm::dwarf::DW_ATE_complex_float;
@@ -976,11 +1014,21 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
uint64_t Size = 0;
uint32_t Align = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagFwdDecl;
+
+ // Add flag to nontrivial forward declarations. To be consistent with MSVC,
+ // add the flag if a record has no definition because we don't know whether
+ // it will be trivial or not.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (!CXXRD->hasDefinition() ||
+ (CXXRD->hasDefinition() && !CXXRD->isTrivial()))
+ Flags |= llvm::DINode::FlagNonTrivial;
+
// Create the type.
SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
llvm::DICompositeType *RetTy = DBuilder.createReplaceableCompositeType(
- getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align,
- llvm::DINode::FlagFwdDecl, Identifier);
+ getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align, Flags,
+ Identifier);
if (CGM.getCodeGenOpts().DebugFwdTemplateParams)
if (auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD))
DBuilder.replaceArrays(RetTy, llvm::DINodeArray(),
@@ -1458,16 +1506,18 @@ void CGDebugInfo::CollectRecordFields(
llvm::DISubroutineType *
CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
- llvm::DIFile *Unit) {
+ llvm::DIFile *Unit, bool decl) {
const FunctionProtoType *Func = Method->getType()->getAs<FunctionProtoType>();
if (Method->isStatic())
return cast_or_null<llvm::DISubroutineType>(
getOrCreateType(QualType(Func, 0), Unit));
- return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit);
+ return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit, decl);
}
-llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
- QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile *Unit) {
+llvm::DISubroutineType *
+CGDebugInfo::getOrCreateInstanceMethodType(QualType ThisPtr,
+ const FunctionProtoType *Func,
+ llvm::DIFile *Unit, bool decl) {
// Add "this" pointer.
llvm::DITypeRefArray Args(
cast<llvm::DISubroutineType>(getOrCreateType(QualType(Func, 0), Unit))
@@ -1475,9 +1525,12 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
assert(Args.size() && "Invalid number of arguments!");
SmallVector<llvm::Metadata *, 16> Elts;
-
// First element is always return type. For 'void' functions it is NULL.
- Elts.push_back(Args[0]);
+ QualType temp = Func->getReturnType();
+ if (temp->getTypeClass() == Type::Auto && decl)
+ Elts.push_back(CreateType(cast<AutoType>(temp)));
+ else
+ Elts.push_back(Args[0]);
// "this" pointer is always first argument.
const CXXRecordDecl *RD = ThisPtr->getPointeeCXXRecordDecl();
@@ -1536,7 +1589,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
StringRef MethodName = getFunctionName(Method);
- llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit);
+ llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit, true);
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
@@ -1773,18 +1826,38 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
const TemplateArgument &TA = TAList[i];
StringRef Name;
+ bool defaultParameter = false;
if (TPList)
Name = TPList->getParam(i)->getName();
switch (TA.getKind()) {
case TemplateArgument::Type: {
llvm::DIType *TTy = getOrCreateType(TA.getAsType(), Unit);
- TemplateParams.push_back(
- DBuilder.createTemplateTypeParameter(TheCU, Name, TTy));
+
+ if (TPList)
+ if (auto *templateType =
+ dyn_cast_or_null<TemplateTypeParmDecl>(TPList->getParam(i)))
+ if (templateType->hasDefaultArgument())
+ defaultParameter =
+ templateType->getDefaultArgument() == TA.getAsType();
+
+ TemplateParams.push_back(DBuilder.createTemplateTypeParameter(
+ TheCU, Name, TTy, defaultParameter));
+
} break;
case TemplateArgument::Integral: {
llvm::DIType *TTy = getOrCreateType(TA.getIntegralType(), Unit);
+ if (TPList && CGM.getCodeGenOpts().DwarfVersion >= 5)
+ if (auto *templateType =
+ dyn_cast_or_null<NonTypeTemplateParmDecl>(TPList->getParam(i)))
+ if (templateType->hasDefaultArgument() &&
+ !templateType->getDefaultArgument()->isValueDependent())
+ defaultParameter = llvm::APSInt::isSameValue(
+ templateType->getDefaultArgument()->EvaluateKnownConstInt(
+ CGM.getContext()),
+ TA.getAsIntegral());
+
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
- TheCU, Name, TTy,
+ TheCU, Name, TTy, defaultParameter,
llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral())));
} break;
case TemplateArgument::Declaration: {
@@ -1818,12 +1891,14 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
CharUnits chars =
CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset);
V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars);
+ } else if (const auto *GD = dyn_cast<MSGuidDecl>(D)) {
+ V = CGM.GetAddrOfMSGuidDecl(GD).getPointer();
}
assert(V && "Failed to find template parameter pointer");
V = V->stripPointerCasts();
}
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
- TheCU, Name, TTy, cast_or_null<llvm::Constant>(V)));
+ TheCU, Name, TTy, defaultParameter, cast_or_null<llvm::Constant>(V)));
} break;
case TemplateArgument::NullPtr: {
QualType T = TA.getNullPtrType();
@@ -1841,8 +1916,8 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
V = CGM.getCXXABI().EmitNullMemberPointer(MPT);
if (!V)
V = llvm::ConstantInt::get(CGM.Int8Ty, 0);
- TemplateParams.push_back(
- DBuilder.createTemplateValueParameter(TheCU, Name, TTy, V));
+ TemplateParams.push_back(DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy, defaultParameter, V));
} break;
case TemplateArgument::Template:
TemplateParams.push_back(DBuilder.createTemplateTemplateParameter(
@@ -1863,7 +1938,7 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
assert(V && "Expression in template argument isn't constant");
llvm::DIType *TTy = getOrCreateType(T, Unit);
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
- TheCU, Name, TTy, V->stripPointerCasts()));
+ TheCU, Name, TTy, defaultParameter, V->stripPointerCasts()));
} break;
// And the following should never occur:
case TemplateArgument::TemplateExpansion:
@@ -2071,16 +2146,17 @@ llvm::DIType *CGDebugInfo::getOrCreateStandaloneType(QualType D,
return T;
}
-void CGDebugInfo::addHeapAllocSiteMetadata(llvm::Instruction *CI,
- QualType D,
+void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
+ QualType AllocatedTy,
SourceLocation Loc) {
+ if (CGM.getCodeGenOpts().getDebugInfo() <=
+ codegenoptions::DebugLineTablesOnly)
+ return;
llvm::MDNode *node;
- if (D.getTypePtr()->isVoidPointerType()) {
+ if (AllocatedTy->isVoidType())
node = llvm::MDNode::get(CGM.getLLVMContext(), None);
- } else {
- QualType PointeeTy = D.getTypePtr()->getPointeeType();
- node = getOrCreateType(PointeeTy, getOrCreateFile(Loc));
- }
+ else
+ node = getOrCreateType(AllocatedTy, getOrCreateFile(Loc));
CI->setMetadata("heapallocsite", node);
}
@@ -2221,12 +2297,11 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
// constructor is emitted. Skip this optimization if the class or any of
// its methods are marked dllimport.
if (DebugKind == codegenoptions::DebugInfoConstructor &&
- !CXXDecl->isLambda() && !isClassOrMethodDLLImport(CXXDecl)) {
- for (const auto *Ctor : CXXDecl->ctors()) {
+ !CXXDecl->isLambda() && !CXXDecl->hasConstexprNonCopyMoveConstructor() &&
+ !isClassOrMethodDLLImport(CXXDecl))
+ for (const auto *Ctor : CXXDecl->ctors())
if (Ctor->isUserProvided())
return true;
- }
- }
TemplateSpecializationKind Spec = TSK_Undeclared;
if (const auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
@@ -2399,9 +2474,8 @@ llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
return CreateTypeDefinition(Ty, Unit);
}
-llvm::DIModule *
-CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
- bool CreateSkeletonCU) {
+llvm::DIModule *CGDebugInfo::getOrCreateModuleRef(ASTSourceDescriptor Mod,
+ bool CreateSkeletonCU) {
// Use the Module pointer as the key into the cache. This is a
// nullptr if the "Module" is a PCH, which is safe because we don't
// support chained PCH debug info, so there can only be a single PCH.
@@ -2446,32 +2520,51 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
assert(StringRef(M->Name).startswith(CGM.getLangOpts().ModuleName) &&
"clang module without ASTFile must be specified by -fmodule-name");
+ // Return a StringRef to the remapped Path.
+ auto RemapPath = [this](StringRef Path) -> std::string {
+ std::string Remapped = remapDIPath(Path);
+ StringRef Relative(Remapped);
+ StringRef CompDir = TheCU->getDirectory();
+ if (Relative.consume_front(CompDir))
+ Relative.consume_front(llvm::sys::path::get_separator());
+
+ return Relative.str();
+ };
+
if (CreateSkeletonCU && IsRootModule && !Mod.getASTFile().empty()) {
// PCH files don't have a signature field in the control block,
// but LLVM detects skeleton CUs by looking for a non-zero DWO id.
// We use the lower 64 bits for debug info.
- uint64_t Signature =
- Mod.getSignature()
- ? (uint64_t)Mod.getSignature()[1] << 32 | Mod.getSignature()[0]
- : ~1ULL;
+
+ uint64_t Signature = 0;
+ if (const auto &ModSig = Mod.getSignature()) {
+ for (unsigned I = 0; I != sizeof(Signature); ++I)
+ Signature |= (uint64_t)ModSig[I] << (I * 8);
+ } else {
+ Signature = ~1ULL;
+ }
llvm::DIBuilder DIB(CGM.getModule());
- DIB.createCompileUnit(TheCU->getSourceLanguage(),
- // TODO: Support "Source" from external AST providers?
- DIB.createFile(Mod.getModuleName(), Mod.getPath()),
- TheCU->getProducer(), true, StringRef(), 0,
- Mod.getASTFile(), llvm::DICompileUnit::FullDebug,
- Signature);
+ SmallString<0> PCM;
+ if (!llvm::sys::path::is_absolute(Mod.getASTFile()))
+ PCM = Mod.getPath();
+ llvm::sys::path::append(PCM, Mod.getASTFile());
+ DIB.createCompileUnit(
+ TheCU->getSourceLanguage(),
+ // TODO: Support "Source" from external AST providers?
+ DIB.createFile(Mod.getModuleName(), TheCU->getDirectory()),
+ TheCU->getProducer(), false, StringRef(), 0, RemapPath(PCM),
+ llvm::DICompileUnit::FullDebug, Signature);
DIB.finalize();
}
llvm::DIModule *Parent =
IsRootModule ? nullptr
- : getOrCreateModuleRef(
- ExternalASTSource::ASTSourceDescriptor(*M->Parent),
- CreateSkeletonCU);
+ : getOrCreateModuleRef(ASTSourceDescriptor(*M->Parent),
+ CreateSkeletonCU);
+ std::string IncludePath = Mod.getPath().str();
llvm::DIModule *DIMod =
DBuilder.createModule(Parent, Mod.getModuleName(), ConfigMacros,
- Mod.getPath(), CGM.getHeaderSearchOpts().Sysroot);
+ RemapPath(IncludePath));
ModuleCache[M].reset(DIMod);
return DIMod;
}
@@ -2649,9 +2742,17 @@ llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
QualType QTy(Ty, 0);
auto SizeExpr = SizeExprCache.find(QTy);
if (SizeExpr != SizeExprCache.end())
- Subscript = DBuilder.getOrCreateSubrange(0, SizeExpr->getSecond());
- else
- Subscript = DBuilder.getOrCreateSubrange(0, Count ? Count : -1);
+ Subscript = DBuilder.getOrCreateSubrange(
+ SizeExpr->getSecond() /*count*/, nullptr /*lowerBound*/,
+ nullptr /*upperBound*/, nullptr /*stride*/);
+ else {
+ auto *CountNode =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), Count ? Count : -1));
+ Subscript = DBuilder.getOrCreateSubrange(
+ CountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
+ nullptr /*stride*/);
+ }
llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
@@ -2660,6 +2761,33 @@ llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
}
+llvm::DIType *CGDebugInfo::CreateType(const ConstantMatrixType *Ty,
+ llvm::DIFile *Unit) {
+ // FIXME: Create another debug type for matrices
+ // For the time being, it treats it like a nested ArrayType.
+
+ llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit);
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint32_t Align = getTypeAlignIfRequired(Ty, CGM.getContext());
+
+ // Create ranges for both dimensions.
+ llvm::SmallVector<llvm::Metadata *, 2> Subscripts;
+ auto *ColumnCountNode =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), Ty->getNumColumns()));
+ auto *RowCountNode =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), Ty->getNumRows()));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(
+ ColumnCountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
+ nullptr /*stride*/));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(
+ RowCountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
+ nullptr /*stride*/));
+ llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
+ return DBuilder.createArrayType(Size, Align, ElementTy, SubscriptArray);
+}
+
llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
uint64_t Size;
uint32_t Align;
@@ -2710,10 +2838,17 @@ llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
auto SizeNode = SizeExprCache.find(EltTy);
if (SizeNode != SizeExprCache.end())
- Subscripts.push_back(
- DBuilder.getOrCreateSubrange(0, SizeNode->getSecond()));
- else
- Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Count));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(
+ SizeNode->getSecond() /*count*/, nullptr /*lowerBound*/,
+ nullptr /*upperBound*/, nullptr /*stride*/));
+ else {
+ auto *CountNode =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), Count));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(
+ CountNode /*count*/, nullptr /*lowerBound*/, nullptr /*upperBound*/,
+ nullptr /*stride*/));
+ }
EltTy = Ty->getElementType();
}
@@ -2772,7 +2907,7 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
return DBuilder.createMemberPointerType(
getOrCreateInstanceMethodType(
CXXMethodDecl::getThisType(FPT, Ty->getMostRecentCXXRecordDecl()),
- FPT, U),
+ FPT, U, false),
ClassType, Size, /*Align=*/0, Flags);
}
@@ -3025,7 +3160,7 @@ llvm::DIModule *CGDebugInfo::getParentModuleOrNull(const Decl *D) {
// option.
if (Module *M = D->getOwningModule()) {
// This is a (sub-)module.
- auto Info = ExternalASTSource::ASTSourceDescriptor(*M);
+ auto Info = ASTSourceDescriptor(*M);
return getOrCreateModuleRef(Info, /*SkeletonCU=*/false);
} else {
// This the precompiled header being built.
@@ -3053,6 +3188,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::ExtVector:
case Type::Vector:
return CreateType(cast<VectorType>(Ty), Unit);
+ case Type::ConstantMatrix:
+ return CreateType(cast<ConstantMatrixType>(Ty), Unit);
case Type::ObjCObjectPointer:
return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
case Type::ObjCObject:
@@ -3094,6 +3231,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Atomic:
return CreateType(cast<AtomicType>(Ty), Unit);
+ case Type::ExtInt:
+ return CreateType(cast<ExtIntType>(Ty));
case Type::Pipe:
return CreateType(cast<PipeType>(Ty), Unit);
@@ -3547,7 +3686,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
return DBuilder.createSubroutineType(DBuilder.getOrCreateTypeArray(None));
if (const auto *Method = dyn_cast<CXXMethodDecl>(D))
- return getOrCreateMethodType(Method, F);
+ return getOrCreateMethodType(Method, F, false);
const auto *FTy = FnType->getAs<FunctionType>();
CallingConv CC = FTy ? FTy->getCallConv() : CallingConv::CC_C;
@@ -3651,8 +3790,11 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
Name = getDynamicInitializerName(cast<VarDecl>(D), GD.getDynamicInitKind(),
Fn);
} else {
- // Use llvm function name.
Name = Fn->getName();
+
+ if (isa<BlockDecl>(D))
+ LinkageName = Name;
+
Flags |= llvm::DINode::FlagPrototyped;
}
if (Name.startswith("\01"))
@@ -3764,7 +3906,7 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
if (IsDeclForCallSite)
Fn->setSubprogram(SP);
- DBuilder.retainType(SP);
+ DBuilder.finalizeSubprogram(SP);
}
void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
@@ -3778,12 +3920,12 @@ void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
if (Func->getSubprogram())
return;
- // Do not emit a declaration subprogram for a builtin or if call site info
- // isn't required. Also, elide declarations for functions with reserved names,
- // as call site-related features aren't interesting in this case (& also, the
- // compiler may emit calls to these functions without debug locations, which
- // makes the verifier complain).
- if (CalleeDecl->getBuiltinID() != 0 ||
+ // Do not emit a declaration subprogram for a builtin, a function with nodebug
+ // attribute, or if call site info isn't required. Also, elide declarations
+ // for functions with reserved names, as call site-related features aren't
+ // interesting in this case (& also, the compiler may emit calls to these
+ // functions without debug locations, which makes the verifier complain).
+ if (CalleeDecl->getBuiltinID() != 0 || CalleeDecl->hasAttr<NoDebugAttr>() ||
getCallSiteRelatedAttrs() == llvm::DINode::FlagZero)
return;
if (const auto *Id = CalleeDecl->getIdentifier())
@@ -4680,7 +4822,7 @@ void CGDebugInfo::EmitImportDecl(const ImportDecl &ID) {
if (CGM.getCodeGenOpts().getDebuggerTuning() != llvm::DebuggerKind::LLDB)
return;
if (Module *M = ID.getImportedModule()) {
- auto Info = ExternalASTSource::ASTSourceDescriptor(*M);
+ auto Info = ASTSourceDescriptor(*M);
auto Loc = ID.getLocation();
DBuilder.createImportedDeclaration(
getCurrentContextDescriptor(cast<Decl>(ID.getDeclContext())),
@@ -4844,8 +4986,7 @@ llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
(CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB ||
CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::GDB);
- if (!SupportsDWARFv4Ext && CGM.getCodeGenOpts().DwarfVersion < 5 &&
- !CGM.getCodeGenOpts().EnableDebugEntryValues)
+ if (!SupportsDWARFv4Ext && CGM.getCodeGenOpts().DwarfVersion < 5)
return llvm::DINode::FlagZero;
return llvm::DINode::FlagAllCallsDescribed;
diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h
index 90e9a61ebe96..96ef6c7c1d27 100644
--- a/clang/lib/CodeGen/CGDebugInfo.h
+++ b/clang/lib/CodeGen/CGDebugInfo.h
@@ -17,9 +17,11 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -60,7 +62,7 @@ class CGDebugInfo {
llvm::DIBuilder DBuilder;
llvm::DICompileUnit *TheCU = nullptr;
ModuleMap *ClangModuleMap = nullptr;
- ExternalASTSource::ASTSourceDescriptor PCHDescriptor;
+ ASTSourceDescriptor PCHDescriptor;
SourceLocation CurLoc;
llvm::MDNode *CurInlinedAt = nullptr;
llvm::DIType *VTablePtrType = nullptr;
@@ -165,6 +167,8 @@ class CGDebugInfo {
/// ivars and property accessors.
llvm::DIType *CreateType(const BuiltinType *Ty);
llvm::DIType *CreateType(const ComplexType *Ty);
+ llvm::DIType *CreateType(const AutoType *Ty);
+ llvm::DIType *CreateType(const ExtIntType *Ty);
llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg);
llvm::DIType *CreateType(const TypedefType *Ty, llvm::DIFile *Fg);
llvm::DIType *CreateType(const TemplateSpecializationType *Ty,
@@ -188,6 +192,7 @@ class CGDebugInfo {
llvm::DIType *CreateType(const ObjCTypeParamType *Ty, llvm::DIFile *Unit);
llvm::DIType *CreateType(const VectorType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const ConstantMatrixType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const ArrayType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const LValueReferenceType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const RValueReferenceType *Ty, llvm::DIFile *Unit);
@@ -214,10 +219,10 @@ class CGDebugInfo {
/// not updated to include implicit \c this pointer. Use this routine
/// to get a method type which includes \c this pointer.
llvm::DISubroutineType *getOrCreateMethodType(const CXXMethodDecl *Method,
- llvm::DIFile *F);
+ llvm::DIFile *F, bool decl);
llvm::DISubroutineType *
getOrCreateInstanceMethodType(QualType ThisPtr, const FunctionProtoType *Func,
- llvm::DIFile *Unit);
+ llvm::DIFile *Unit, bool decl);
llvm::DISubroutineType *
getOrCreateFunctionType(const Decl *D, QualType FnType, llvm::DIFile *F);
/// \return debug info descriptor for vtable.
@@ -378,9 +383,7 @@ public:
/// When generating debug information for a clang module or
/// precompiled header, this module map will be used to determine
/// the module of origin of each Decl.
- void setPCHDescriptor(ExternalASTSource::ASTSourceDescriptor PCH) {
- PCHDescriptor = PCH;
- }
+ void setPCHDescriptor(ASTSourceDescriptor PCH) { PCHDescriptor = PCH; }
/// @}
/// Update the current source location. If \arg loc is invalid it is
@@ -506,7 +509,7 @@ public:
llvm::DIType *getOrCreateStandaloneType(QualType Ty, SourceLocation Loc);
/// Add heapallocsite metadata for MSAllocator calls.
- void addHeapAllocSiteMetadata(llvm::Instruction *CallSite, QualType Ty,
+ void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy,
SourceLocation Loc);
void completeType(const EnumDecl *ED);
@@ -589,9 +592,8 @@ private:
/// Get a reference to a clang module. If \p CreateSkeletonCU is true,
/// this also creates a split dwarf skeleton compile unit.
- llvm::DIModule *
- getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
- bool CreateSkeletonCU);
+ llvm::DIModule *getOrCreateModuleRef(ASTSourceDescriptor Mod,
+ bool CreateSkeletonCU);
/// DebugTypeExtRefs: If \p D originated in a clang module, return it.
llvm::DIModule *getParentModuleOrNull(const Decl *D);
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index 5aac7a8d54c7..1729c7ed3c31 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -31,6 +31,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Sema/Sema.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
@@ -40,6 +41,9 @@
using namespace clang;
using namespace CodeGen;
+static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
+ "Clang max alignment greater than what LLVM supports?");
+
void CodeGenFunction::EmitDecl(const Decl &D) {
switch (D.getKind()) {
case Decl::BuiltinTemplate:
@@ -104,6 +108,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
case Decl::Label: // __label__ x;
case Decl::Import:
+ case Decl::MSGuid: // __declspec(uuid("..."))
case Decl::OMPThreadPrivate:
case Decl::OMPAllocate:
case Decl::OMPCapturedExpr:
@@ -111,6 +116,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Empty:
case Decl::Concept:
case Decl::LifetimeExtendedTemporary:
+ case Decl::RequiresExprBody:
// None of these decls require codegen support.
return;
@@ -205,9 +211,9 @@ static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
if (auto *CD = dyn_cast<CapturedDecl>(DC))
DC = cast<DeclContext>(CD->getNonClosureContext());
if (const auto *FD = dyn_cast<FunctionDecl>(DC))
- ContextName = CGM.getMangledName(FD);
+ ContextName = std::string(CGM.getMangledName(FD));
else if (const auto *BD = dyn_cast<BlockDecl>(DC))
- ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
+ ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
ContextName = OMD->getSelector().getAsString();
else
@@ -232,7 +238,7 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
// Use the label if the variable is renamed with the asm-label extension.
std::string Name;
if (D.hasAttr<AsmLabelAttr>())
- Name = getMangledName(&D);
+ Name = std::string(getMangledName(&D));
else
Name = getStaticDeclName(*this, D);
@@ -244,7 +250,7 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
// variables cannot have an initializer.
llvm::Constant *Init = nullptr;
if (Ty.getAddressSpace() == LangAS::opencl_local ||
- D.hasAttr<CUDASharedAttr>())
+ D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
Init = llvm::UndefValue::get(LTy);
else
Init = EmitNullConstant(Ty);
@@ -336,7 +342,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
// the global to match the initializer. (We have to do this
// because some types, like unions, can't be completely represented
// in the LLVM type system.)
- if (GV->getType()->getElementType() != Init->getType()) {
+ if (GV->getValueType() != Init->getType()) {
llvm::GlobalVariable *OldGV = GV;
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
@@ -756,10 +762,8 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
- if (const FullExpr *fe = dyn_cast<FullExpr>(init)) {
- enterFullExpression(fe);
- init = fe->getSubExpr();
- }
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(init))
+ init = EWC->getSubExpr();
CodeGenFunction::RunCleanupsScope Scope(*this);
// We have to maintain the illusion that the variable is
@@ -1045,13 +1049,13 @@ static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
llvm::Type *OrigTy = constant->getType();
if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
return constStructWithPadding(CGM, isPattern, STy, constant);
- if (auto *STy = dyn_cast<llvm::SequentialType>(OrigTy)) {
+ if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
llvm::SmallVector<llvm::Constant *, 8> Values;
- unsigned Size = STy->getNumElements();
+ uint64_t Size = ArrayTy->getNumElements();
if (!Size)
return constant;
- llvm::Type *ElemTy = STy->getElementType();
- bool ZeroInitializer = constant->isZeroValue();
+ llvm::Type *ElemTy = ArrayTy->getElementType();
+ bool ZeroInitializer = constant->isNullValue();
llvm::Constant *OpValue, *PaddedOp;
if (ZeroInitializer) {
OpValue = llvm::Constant::getNullValue(ElemTy);
@@ -1067,13 +1071,12 @@ static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
auto *NewElemTy = Values[0]->getType();
if (NewElemTy == ElemTy)
return constant;
- if (OrigTy->isArrayTy()) {
- auto *ArrayTy = llvm::ArrayType::get(NewElemTy, Size);
- return llvm::ConstantArray::get(ArrayTy, Values);
- } else {
- return llvm::ConstantVector::get(Values);
- }
+ auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
+ return llvm::ConstantArray::get(NewArrayTy, Values);
}
+ // FIXME: Add handling for tail padding in vectors. Vectors don't
+ // have padding between or inside elements, but the total amount of
+ // data can be less than the allocated size.
return constant;
}
@@ -1086,7 +1089,7 @@ Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
return CC->getNameAsString();
if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
return CD->getNameAsString();
- return getMangledName(FD);
+ return std::string(getMangledName(FD));
} else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
return OM->getNameAsString();
} else if (isa<BlockDecl>(DC)) {
@@ -1397,10 +1400,15 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
Address address = Address::invalid();
Address AllocaAddr = Address::invalid();
- Address OpenMPLocalAddr =
- getLangOpts().OpenMP
- ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
- : Address::invalid();
+ Address OpenMPLocalAddr = Address::invalid();
+ if (CGM.getLangOpts().OpenMPIRBuilder)
+ OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
+ else
+ OpenMPLocalAddr =
+ getLangOpts().OpenMP
+ ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
+ : Address::invalid();
+
bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
@@ -1512,9 +1520,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// is rare.
if (!Bypasses.IsBypassed(&D) &&
!(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
- uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
+ llvm::TypeSize size =
+ CGM.getDataLayout().getTypeAllocSize(allocaTy);
emission.SizeForLifetimeMarkers =
- EmitLifetimeStart(size, AllocaAddr.getPointer());
+ size.isScalable() ? EmitLifetimeStart(-1, AllocaAddr.getPointer())
+ : EmitLifetimeStart(size.getFixedSize(),
+ AllocaAddr.getPointer());
}
} else {
assert(!emission.useLifetimeMarkers());
@@ -1671,9 +1682,13 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
case LangOptions::TrivialAutoVarInitKind::Uninitialized:
llvm_unreachable("Uninitialized handled by caller");
case LangOptions::TrivialAutoVarInitKind::Zero:
+ if (CGM.stopAutoInit())
+ return;
emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
break;
case LangOptions::TrivialAutoVarInitKind::Pattern:
+ if (CGM.stopAutoInit())
+ return;
emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
break;
}
@@ -1696,6 +1711,8 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
llvm_unreachable("Uninitialized handled by caller");
case LangOptions::TrivialAutoVarInitKind::Zero:
+ if (CGM.stopAutoInit())
+ return;
if (!EltSize.isOne())
SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
@@ -1703,6 +1720,8 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
break;
case LangOptions::TrivialAutoVarInitKind::Pattern: {
+ if (CGM.stopAutoInit())
+ return;
llvm::Type *ElTy = Loc.getElementType();
llvm::Constant *Constant = constWithPadding(
CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
@@ -1861,9 +1880,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
///
/// \param init the initializing expression
/// \param D the object to act as if we're initializing
-/// \param loc the address to initialize; its type is a pointer
-/// to the LLVM mapping of the object's type
-/// \param alignment the alignment of the address
+/// \param lvalue the lvalue to initialize
/// \param capturedByInit true if \p D is a __block variable
/// whose address is potentially changed by the initializer
void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
@@ -2532,5 +2549,5 @@ void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
}
void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
- getOpenMPRuntime().checkArchForUnifiedAddressing(D);
+ getOpenMPRuntime().processRequiresDirective(D);
}
diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp
index 3baa0a080f5d..5a8500364295 100644
--- a/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -16,11 +16,12 @@
#include "CodeGenFunction.h"
#include "TargetInfo.h"
#include "clang/AST/Attr.h"
-#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/LangOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/Support/Path.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
using namespace clang;
using namespace CodeGen;
@@ -239,7 +240,7 @@ llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
}
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
ty, FnName.str(), FI, VD.getLocation());
CodeGenFunction CGF(CGM);
@@ -249,7 +250,7 @@ llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
- // Make sure the call and the callee agree on calling convention.
+ // Make sure the call and the callee agree on calling convention.
if (auto *dtorFn = dyn_cast<llvm::Function>(
dtor.getCallee()->stripPointerCastsAndAliases()))
call->setCallingConv(dtorFn->getCallingConv());
@@ -270,8 +271,12 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
// extern "C" int atexit(void (*f)(void));
+ assert(cast<llvm::Function>(dtorStub)->getFunctionType() ==
+ llvm::FunctionType::get(CGM.VoidTy, false) &&
+ "Argument to atexit has a wrong type.");
+
llvm::FunctionType *atexitTy =
- llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
+ llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
llvm::FunctionCallee atexit =
CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
@@ -282,6 +287,30 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
EmitNounwindRuntimeCall(atexit, dtorStub);
}
+llvm::Value *
+CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub) {
+ // The unatexit subroutine unregisters __dtor functions that were previously
+ // registered by the atexit subroutine. If the referenced function is found,
+ // it is removed from the list of functions that are called at normal program
+ // termination and the unatexit returns a value of 0, otherwise a non-zero
+ // value is returned.
+ //
+ // extern "C" int unatexit(void (*f)(void));
+ assert(dtorStub->getFunctionType() ==
+ llvm::FunctionType::get(CGM.VoidTy, false) &&
+ "Argument to unatexit has a wrong type.");
+
+ llvm::FunctionType *unatexitTy =
+ llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false);
+
+ llvm::FunctionCallee unatexit =
+ CGM.CreateRuntimeFunction(unatexitTy, "unatexit", llvm::AttributeList());
+
+ cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow();
+
+ return EmitNounwindRuntimeCall(unatexit, dtorStub);
+}
+
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
llvm::GlobalVariable *DeclPtr,
bool PerformInit) {
@@ -333,19 +362,23 @@ void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights);
}
-llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
+llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
- SourceLocation Loc, bool TLS) {
- llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- Name, &getModule());
+ SourceLocation Loc, bool TLS, bool IsExternalLinkage) {
+ llvm::Function *Fn = llvm::Function::Create(
+ FTy,
+ IsExternalLinkage ? llvm::GlobalValue::ExternalLinkage
+ : llvm::GlobalValue::InternalLinkage,
+ Name, &getModule());
+
if (!getLangOpts().AppleKext && !TLS) {
// Set the section if needed.
if (const char *Section = getTarget().getStaticInitSectionSpecifier())
Fn->setSection(Section);
}
- SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
+ if (Fn->hasInternalLinkage())
+ SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
Fn->setCallingConv(getRuntimeCC());
@@ -392,20 +425,20 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
!isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
- auto RASignKind = getCodeGenOpts().getSignReturnAddress();
- if (RASignKind != CodeGenOptions::SignReturnAddressScope::None) {
+ auto RASignKind = getLangOpts().getSignReturnAddressScope();
+ if (RASignKind != LangOptions::SignReturnAddressScopeKind::None) {
Fn->addFnAttr("sign-return-address",
- RASignKind == CodeGenOptions::SignReturnAddressScope::All
+ RASignKind == LangOptions::SignReturnAddressScopeKind::All
? "all"
: "non-leaf");
- auto RASignKey = getCodeGenOpts().getSignReturnAddressKey();
+ auto RASignKey = getLangOpts().getSignReturnAddressKey();
Fn->addFnAttr("sign-return-address-key",
- RASignKey == CodeGenOptions::SignReturnAddressKeyValue::AKey
+ RASignKey == LangOptions::SignReturnAddressKeyKind::AKey
? "a_key"
: "b_key");
}
- if (getCodeGenOpts().BranchTargetEnforcement)
+ if (getLangOpts().BranchTargetEnforcement)
Fn->addFnAttr("branch-target-enforcement");
return Fn;
@@ -461,10 +494,8 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
}
// Create a variable initialization function.
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(FTy, FnName.str(),
- getTypes().arrangeNullaryFunction(),
- D->getLocation());
+ llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation());
auto *ISA = D->getAttr<InitSegAttr>();
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
@@ -533,6 +564,22 @@ void CodeGenModule::EmitCXXThreadLocalInitFunc() {
CXXThreadLocals.clear();
}
+static SmallString<128> getTransformedFileName(llvm::Module &M) {
+ SmallString<128> FileName = llvm::sys::path::filename(M.getName());
+
+ if (FileName.empty())
+ FileName = "<null>";
+
+ for (size_t i = 0; i < FileName.size(); ++i) {
+ // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
+ // to be the set of C preprocessing numbers.
+ if (!isPreprocessingNumberBody(FileName[i]))
+ FileName[i] = '_';
+ }
+
+ return FileName;
+}
+
void
CodeGenModule::EmitCXXGlobalInitFunc() {
while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
@@ -541,11 +588,27 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
+ const bool UseSinitAndSterm = getCXXABI().useSinitAndSterm();
+ if (UseSinitAndSterm) {
+ GlobalUniqueModuleId = getUniqueModuleId(&getModule());
+
+ // FIXME: We need to figure out what to hash on or encode into the unique ID
+ // we need.
+ if (GlobalUniqueModuleId.compare("") == 0)
+ llvm::report_fatal_error(
+ "cannot produce a unique identifier for this module"
+ " based on strong external symbols");
+ GlobalUniqueModuleId = GlobalUniqueModuleId.substr(1);
+ }
+
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
- // Create our global initialization function.
+ // Create our global prioritized initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
+ assert(!UseSinitAndSterm && "Prioritized sinit and sterm functions are not"
+ " supported yet.");
+
SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
PrioritizedCXXGlobalInits.end());
@@ -565,7 +628,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
std::string PrioritySuffix = llvm::utostr(Priority);
// Priority is always <= 65535 (enforced by sema).
PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
- llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
+ llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
FTy, "_GLOBAL__I_" + PrioritySuffix, FI);
for (; I < PrioE; ++I)
@@ -577,22 +640,27 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
PrioritizedCXXGlobalInits.clear();
}
- // Include the filename in the symbol name. Including "sub_" matches gcc and
- // makes sure these symbols appear lexicographically behind the symbols with
- // priority emitted above.
- SmallString<128> FileName = llvm::sys::path::filename(getModule().getName());
- if (FileName.empty())
- FileName = "<null>";
+ if (UseSinitAndSterm && CXXGlobalInits.empty())
+ return;
- for (size_t i = 0; i < FileName.size(); ++i) {
- // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
- // to be the set of C preprocessing numbers.
- if (!isPreprocessingNumberBody(FileName[i]))
- FileName[i] = '_';
+ // Create our global initialization function.
+ SmallString<128> FuncName;
+ bool IsExternalLinkage = false;
+ if (UseSinitAndSterm) {
+ llvm::Twine("__sinit80000000_clang_", GlobalUniqueModuleId)
+ .toVector(FuncName);
+ IsExternalLinkage = true;
+ } else {
+ // Include the filename in the symbol name. Including "sub_" matches gcc
+ // and makes sure these symbols appear lexicographically behind the symbols
+ // with priority emitted above.
+ llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule()))
+ .toVector(FuncName);
}
- llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
- FTy, llvm::Twine("_GLOBAL__sub_I_", FileName), FI);
+ llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy, FuncName, FI, SourceLocation(), false /* TLS */,
+ IsExternalLinkage);
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
AddGlobalCtor(Fn);
@@ -618,19 +686,38 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
CXXGlobalInits.clear();
}
-void CodeGenModule::EmitCXXGlobalDtorFunc() {
- if (CXXGlobalDtors.empty())
+void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
+ if (CXXGlobalDtorsOrStermFinalizers.empty())
return;
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
-
- // Create our global destructor function.
const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a", FI);
- CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
+ // Create our global cleanup function.
+ llvm::Function *Fn = nullptr;
+ if (getCXXABI().useSinitAndSterm()) {
+ if (GlobalUniqueModuleId.empty()) {
+ GlobalUniqueModuleId = getUniqueModuleId(&getModule());
+ // FIXME: We need to figure out what to hash on or encode into the unique
+ // ID we need.
+ if (GlobalUniqueModuleId.compare("") == 0)
+ llvm::report_fatal_error(
+ "cannot produce a unique identifier for this module"
+ " based on strong external symbols");
+ GlobalUniqueModuleId = GlobalUniqueModuleId.substr(1);
+ }
+
+ Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy, llvm::Twine("__sterm80000000_clang_", GlobalUniqueModuleId), FI,
+ SourceLocation(), false /* TLS */, true /* IsExternalLinkage */);
+ } else {
+ Fn = CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
+ }
+
+ CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
+ Fn, CXXGlobalDtorsOrStermFinalizers);
AddGlobalDtor(Fn);
+ CXXGlobalDtorsOrStermFinalizers.clear();
}
/// Emit the code necessary to initialize the given global variable.
@@ -726,10 +813,10 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
FinishFunction();
}
-void CodeGenFunction::GenerateCXXGlobalDtorsFunc(
+void CodeGenFunction::GenerateCXXGlobalCleanUpFunc(
llvm::Function *Fn,
const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
- llvm::Constant *>> &DtorsAndObjects) {
+ llvm::Constant *>> &DtorsOrStermFinalizers) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
@@ -737,13 +824,22 @@ void CodeGenFunction::GenerateCXXGlobalDtorsFunc(
// Emit an artificial location for this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
- // Emit the dtors, in reverse order from construction.
- for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
+ // Emit the cleanups, in reverse order from construction.
+ for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) {
llvm::FunctionType *CalleeTy;
llvm::Value *Callee;
llvm::Constant *Arg;
- std::tie(CalleeTy, Callee, Arg) = DtorsAndObjects[e - i - 1];
- llvm::CallInst *CI = Builder.CreateCall(CalleeTy, Callee, Arg);
+ std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1];
+
+ llvm::CallInst *CI = nullptr;
+ if (Arg == nullptr) {
+ assert(
+ CGM.getCXXABI().useSinitAndSterm() &&
+ "Arg could not be nullptr unless using sinit and sterm functions.");
+ CI = Builder.CreateCall(CalleeTy, Callee);
+ } else
+ CI = Builder.CreateCall(CalleeTy, Callee, Arg);
+
// Make sure the call and the callee agree on calling convention.
if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
CI->setCallingConv(F->getCallingConv());
@@ -767,7 +863,7 @@ llvm::Function *CodeGenFunction::generateDestroyHelper(
const CGFunctionInfo &FI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
CurEHLocation = VD->getBeginLoc();
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
index 53fafab3e0e6..bdf70252b5ad 100644
--- a/clang/lib/CodeGen/CGException.cpp
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
@@ -468,6 +469,18 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
// encode these in an object file but MSVC doesn't do anything with it.
if (getTarget().getCXXABI().isMicrosoft())
return;
+ // In wasm we currently treat 'throw()' in the same way as 'noexcept'. In
+ // case of throw with types, we ignore it and print a warning for now.
+ // TODO Correctly handle exception specification in wasm
+ if (CGM.getLangOpts().WasmExceptions) {
+ if (EST == EST_DynamicNone)
+ EHStack.pushTerminate();
+ else
+ CGM.getDiags().Report(D->getLocation(),
+ diag::warn_wasm_dynamic_exception_spec_ignored)
+ << FD->getExceptionSpecSourceRange();
+ return;
+ }
unsigned NumExceptions = Proto->getNumExceptions();
EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
@@ -544,6 +557,14 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
// encode these in an object file but MSVC doesn't do anything with it.
if (getTarget().getCXXABI().isMicrosoft())
return;
+ // In wasm we currently treat 'throw()' in the same way as 'noexcept'. In
+ // case of throw with types, we ignore it and print a warning for now.
+ // TODO Correctly handle exception specification in wasm
+ if (CGM.getLangOpts().WasmExceptions) {
+ if (EST == EST_DynamicNone)
+ EHStack.popTerminate();
+ return;
+ }
EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin());
emitFilterDispatchBlock(*this, filterScope);
EHStack.popFilter();
@@ -630,9 +651,6 @@ CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
case EHScope::Terminate:
dispatchBlock = getTerminateHandler();
break;
-
- case EHScope::PadEnd:
- llvm_unreachable("PadEnd unnecessary for Itanium!");
}
scope.setCachedEHDispatchBlock(dispatchBlock);
}
@@ -674,9 +692,6 @@ CodeGenFunction::getFuncletEHDispatchBlock(EHScopeStack::stable_iterator SI) {
case EHScope::Terminate:
DispatchBlock->setName("terminate");
break;
-
- case EHScope::PadEnd:
- llvm_unreachable("PadEnd dispatch block missing!");
}
EHS.setCachedEHDispatchBlock(DispatchBlock);
return DispatchBlock;
@@ -692,7 +707,6 @@ static bool isNonEHScope(const EHScope &S) {
case EHScope::Filter:
case EHScope::Catch:
case EHScope::Terminate:
- case EHScope::PadEnd:
return false;
}
@@ -703,12 +717,12 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
assert(EHStack.requiresLandingPad());
assert(!EHStack.empty());
- // If exceptions are disabled and SEH is not in use, then there is no invoke
- // destination. SEH "works" even if exceptions are off. In practice, this
- // means that C++ destructors and other EH cleanups don't run, which is
+ // If exceptions are disabled/ignored and SEH is not in use, then there is no
+ // invoke destination. SEH "works" even if exceptions are off. In practice,
+ // this means that C++ destructors and other EH cleanups don't run, which is
// consistent with MSVC's behavior.
const LangOptions &LO = CGM.getLangOpts();
- if (!LO.Exceptions) {
+ if (!LO.Exceptions || LO.IgnoreExceptions) {
if (!LO.Borland && !LO.MicrosoftExt)
return nullptr;
if (!currentFunctionUsesSEHTry())
@@ -751,15 +765,14 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
assert(EHStack.requiresLandingPad());
-
+ assert(!CGM.getLangOpts().IgnoreExceptions &&
+ "LandingPad should not be emitted when -fignore-exceptions are in "
+ "effect.");
EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope());
switch (innermostEHScope.getKind()) {
case EHScope::Terminate:
return getTerminateLandingPad();
- case EHScope::PadEnd:
- llvm_unreachable("PadEnd unnecessary for Itanium!");
-
case EHScope::Catch:
case EHScope::Cleanup:
case EHScope::Filter:
@@ -825,9 +838,6 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
case EHScope::Catch:
break;
-
- case EHScope::PadEnd:
- llvm_unreachable("PadEnd unnecessary for Itanium!");
}
EHCatchScope &catchScope = cast<EHCatchScope>(*I);
@@ -1637,6 +1647,19 @@ struct PerformSEHFinally final : EHScopeStack::Cleanup {
llvm::Value *IsForEH =
llvm::ConstantInt::get(CGF.ConvertType(ArgTys[0]), F.isForEHCleanup());
+
+ // Except _leave and fall-through at the end, all other exits in a _try
+ // (return/goto/continue/break) are considered as abnormal terminations
+ // since _leave/fall-through is always Indexed 0,
+ // just use NormalCleanupDestSlot (>= 1 for goto/return/..),
+ // as 1st Arg to indicate abnormal termination
+ if (!F.isForEHCleanup() && F.hasExitSwitch()) {
+ Address Addr = CGF.getNormalCleanupDestSlot();
+ llvm::Value *Load = CGF.Builder.CreateLoad(Addr, "cleanup.dest");
+ llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int32Ty);
+ IsForEH = CGF.Builder.CreateICmpNE(Load, Zero);
+ }
+
Args.add(RValue::get(IsForEH), ArgTys[0]);
Args.add(RValue::get(FP), ArgTys[1]);
@@ -1792,6 +1815,48 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
llvm::Constant *ParentI8Fn =
llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentI8Fn, EntryFP});
+
+ // if the parent is a _finally, the passed-in ParentFP is the FP
+ // of parent _finally, not Establisher's FP (FP of outermost function).
+ // Establkisher FP is 2nd paramenter passed into parent _finally.
+ // Fortunately, it's always saved in parent's frame. The following
+ // code retrieves it, and escapes it so that spill instruction won't be
+ // optimized away.
+ if (ParentCGF.ParentCGF != nullptr) {
+ // Locate and escape Parent's frame_pointer.addr alloca
+ // Depending on target, should be 1st/2nd one in LocalDeclMap.
+ // Let's just scan for ImplicitParamDecl with VoidPtrTy.
+ llvm::AllocaInst *FramePtrAddrAlloca = nullptr;
+ for (auto &I : ParentCGF.LocalDeclMap) {
+ const VarDecl *D = cast<VarDecl>(I.first);
+ if (isa<ImplicitParamDecl>(D) &&
+ D->getType() == getContext().VoidPtrTy) {
+ assert(D->getName().startswith("frame_pointer"));
+ FramePtrAddrAlloca = cast<llvm::AllocaInst>(I.second.getPointer());
+ break;
+ }
+ }
+ assert(FramePtrAddrAlloca);
+ auto InsertPair = ParentCGF.EscapedLocals.insert(
+ std::make_pair(FramePtrAddrAlloca, ParentCGF.EscapedLocals.size()));
+ int FrameEscapeIdx = InsertPair.first->second;
+
+ // an example of a filter's prolog::
+ // %0 = call i8* @llvm.eh.recoverfp(bitcast(@"?fin$0@0@main@@"),..)
+ // %1 = call i8* @llvm.localrecover(bitcast(@"?fin$0@0@main@@"),..)
+ // %2 = bitcast i8* %1 to i8**
+ // %3 = load i8*, i8* *%2, align 8
+ // ==> %3 is the frame-pointer of outermost host function
+ llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration(
+ &CGM.getModule(), llvm::Intrinsic::localrecover);
+ llvm::Constant *ParentI8Fn =
+ llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
+ ParentFP = Builder.CreateCall(
+ FrameRecoverFn, {ParentI8Fn, ParentFP,
+ llvm::ConstantInt::get(Int32Ty, FrameEscapeIdx)});
+ ParentFP = Builder.CreateBitCast(ParentFP, CGM.VoidPtrPtrTy);
+ ParentFP = Builder.CreateLoad(Address(ParentFP, getPointerAlign()));
+ }
}
// Create llvm.localrecover calls for all captures.
@@ -1885,7 +1950,7 @@ void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
OutlinedStmt->getBeginLoc(), OutlinedStmt->getBeginLoc());
CurSEHParent = ParentCGF.CurSEHParent;
- CGM.SetLLVMFunctionAttributes(GlobalDecl(), FnInfo, CurFn);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), CurFn, FnInfo);
EmitCapturedLocals(ParentCGF, OutlinedStmt, IsFilter);
}
@@ -1990,6 +2055,7 @@ void CodeGenFunction::pushSEHCleanup(CleanupKind Kind,
void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) {
CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
+ HelperCGF.ParentCGF = this;
if (const SEHFinallyStmt *Finally = S.getFinallyHandler()) {
// Outline the finally block.
llvm::Function *FinallyFunc =
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 8e0604181fb1..9e8770573d70 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -27,6 +27,7 @@
#include "clang/AST/NSAPI.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
@@ -125,8 +126,8 @@ Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
assert(isa<llvm::AllocaInst>(Var.getPointer()));
- auto *Store = new llvm::StoreInst(Init, Var.getPointer());
- Store->setAlignment(Var.getAlignment().getAsAlign());
+ auto *Store = new llvm::StoreInst(Init, Var.getPointer(), /*volatile*/ false,
+ Var.getAlignment().getAsAlign());
llvm::BasicBlock *Block = AllocaInsertPt->getParent();
Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
}
@@ -144,8 +145,19 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
const Twine &Name, Address *Alloca) {
- return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
- /*ArraySize=*/nullptr, Alloca);
+ Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
+ /*ArraySize=*/nullptr, Alloca);
+
+ if (Ty->isConstantMatrixType()) {
+ auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType());
+ auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
+ ArrayTy->getNumElements());
+
+ Result = Address(
+ Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()),
+ Result.getAlignment());
+ }
+ return Result;
}
Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align,
@@ -415,6 +427,11 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
llvm_unreachable("unknown storage duration");
}
+/// Helper method to check if the underlying ABI is AAPCS
+static bool isAAPCS(const TargetInfo &TargetInfo) {
+ return TargetInfo.getABI().startswith("aapcs");
+}
+
LValue CodeGenFunction::
EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
const Expr *E = M->getSubExpr();
@@ -711,7 +728,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
if (SanOpts.has(SanitizerKind::ObjectSize) &&
!SkippedChecks.has(SanitizerKind::ObjectSize) &&
!Ty->isIncompleteType()) {
- uint64_t TySize = getContext().getTypeSizeInChars(Ty).getQuantity();
+ uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
if (ArraySize)
Size = Builder.CreateMul(Size, ArraySize);
@@ -742,7 +759,9 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
!SkippedChecks.has(SanitizerKind::Alignment)) {
AlignVal = Alignment.getQuantity();
if (!Ty->isIncompleteType() && !AlignVal)
- AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
+ AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
+ /*ForPointeeType=*/true)
+ .getQuantity();
// The glvalue must be suitably aligned.
if (AlignVal > 1 &&
@@ -858,8 +877,12 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
static bool isFlexibleArrayMemberExpr(const Expr *E) {
// For compatibility with existing code, we treat arrays of length 0 or
// 1 as flexible array members.
+ // FIXME: This is inconsistent with the warning code in SemaChecking. Unify
+ // the two mechanisms.
const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ // FIXME: Sema doesn't treat [1] as a flexible array member if the bound
+ // was produced by macro expansion.
if (CAT->getSize().ugt(1))
return false;
} else if (!isa<IncompleteArrayType>(AT))
@@ -872,6 +895,10 @@ static bool isFlexibleArrayMemberExpr(const Expr *E) {
// FIXME: If the base type of the member expr is not FD->getParent(),
// this should not be treated as a flexible array member access.
if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ // FIXME: Sema doesn't treat a T[1] union member as a flexible array
+ // member, only a T[0] or T[] member gets that treatment.
+ if (FD->getParent()->isUnion())
+ return true;
RecordDecl::field_iterator FI(
DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
return ++FI == FD->getParent()->field_end();
@@ -1069,9 +1096,8 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
if (isa<ExplicitCastExpr>(CE)) {
LValueBaseInfo TargetTypeBaseInfo;
TBAAAccessInfo TargetTypeTBAAInfo;
- CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(),
- &TargetTypeBaseInfo,
- &TargetTypeTBAAInfo);
+ CharUnits Align = CGM.getNaturalPointeeTypeAlignment(
+ E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
if (TBAAInfo)
*TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
TargetTypeTBAAInfo);
@@ -1139,8 +1165,8 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// TODO: conditional operators, comma.
// Otherwise, use the alignment of the type.
- CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo,
- TBAAInfo);
+ CharUnits Align =
+ CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
return Address(EmitScalarExpr(E), Align);
}
@@ -1276,8 +1302,15 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
case Expr::DeclRefExprClass:
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
- case Expr::ConstantExprClass:
+ case Expr::ConstantExprClass: {
+ const ConstantExpr *CE = cast<ConstantExpr>(E);
+ if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
+ QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
+ ->getCallReturnType(getContext());
+ return MakeNaturalAlignAddrLValue(Result, RetType);
+ }
return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
+ }
case Expr::ParenExprClass:
return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
case Expr::GenericSelectionExprClass:
@@ -1304,7 +1337,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::ExprWithCleanupsClass: {
const auto *cleanups = cast<ExprWithCleanups>(E);
- enterFullExpression(cleanups);
RunCleanupsScope Scope(*this);
LValue LV = EmitLValue(cleanups->getSubExpr());
if (LV.isSimple()) {
@@ -1343,6 +1375,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitUnaryOpLValue(cast<UnaryOperator>(E));
case Expr::ArraySubscriptExprClass:
return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::MatrixSubscriptExprClass:
+ return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
case Expr::OMPArraySectionExprClass:
return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
case Expr::ExtVectorElementExprClass:
@@ -1368,6 +1402,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::CXXAddrspaceCastExprClass:
case Expr::ObjCBridgedCastExprClass:
return EmitCastLValue(cast<CastExpr>(E));
@@ -1651,15 +1686,14 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
if (VTy->getNumElements() == 3) {
// Bitcast to vec4 type.
- llvm::VectorType *vec4Ty =
- llvm::VectorType::get(VTy->getElementType(), 4);
+ auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4);
Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
// Now load value.
llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
// Shuffle vector to get vec3.
V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
- {0, 1, 2}, "extractVec");
+ ArrayRef<int>{0, 1, 2}, "extractVec");
return EmitFromMemory(V, Ty);
}
}
@@ -1716,6 +1750,42 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
return Value;
}
+// Convert the pointer of \p Addr to a pointer to a vector (the value type of
+// MatrixType), if it points to a array (the memory type of MatrixType).
+static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
+ bool IsVector = true) {
+ auto *ArrayTy = dyn_cast<llvm::ArrayType>(
+ cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ if (ArrayTy && IsVector) {
+ auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
+ ArrayTy->getNumElements());
+
+ return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
+ }
+ auto *VectorTy = dyn_cast<llvm::VectorType>(
+ cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ if (VectorTy && !IsVector) {
+ auto *ArrayTy = llvm::ArrayType::get(VectorTy->getElementType(),
+ VectorTy->getNumElements());
+
+ return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
+ }
+
+ return Addr;
+}
+
+// Emit a store of a matrix LValue. This may require casting the original
+// pointer to memory address (ArrayType) to a pointer to the value type
+// (VectorType).
+static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
+ bool isInit, CodeGenFunction &CGF) {
+ Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
+ value->getType()->isVectorTy());
+ CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
+ lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
+ lvalue.isNontemporal());
+}
+
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
bool Volatile, QualType Ty,
LValueBaseInfo BaseInfo,
@@ -1729,13 +1799,10 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
// Handle vec3 special.
if (VecTy && VecTy->getNumElements() == 3) {
// Our source is a vec3, do a shuffle vector to make it a vec4.
- llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1),
- Builder.getInt32(2),
- llvm::UndefValue::get(Builder.getInt32Ty())};
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
- MaskV, "extractVec");
- SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
+ ArrayRef<int>{0, 1, 2, -1},
+ "extractVec");
+ SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
}
if (Addr.getElementType() != SrcTy) {
Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
@@ -1766,11 +1833,26 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
bool isInit) {
+ if (lvalue.getType()->isConstantMatrixType()) {
+ EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
+ return;
+ }
+
EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
lvalue.getType(), lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
}
+// Emit a load of a LValue of matrix type. This may require casting the pointer
+// to memory address (ArrayType) to a pointer to the value type (VectorType).
+static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
+ CodeGenFunction &CGF) {
+ assert(LV.getType()->isConstantMatrixType());
+ Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
+ LV.setAddress(Addr);
+ return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
+}
+
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
/// method emits the address of the lvalue, then loads the result as an rvalue,
/// returning the rvalue.
@@ -1796,6 +1878,9 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isSimple()) {
assert(!LV.getType()->isFunctionType());
+ if (LV.getType()->isConstantMatrixType())
+ return EmitLoadOfMatrixLValue(LV, Loc, *this);
+
// Everything needs a load.
return RValue::get(EmitLoadOfScalar(LV, Loc));
}
@@ -1809,13 +1894,21 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
// If this is a reference to a subset of the elements of a vector, either
// shuffle the input or extract/insert them as appropriate.
- if (LV.isExtVectorElt())
+ if (LV.isExtVectorElt()) {
return EmitLoadOfExtVectorElementLValue(LV);
+ }
// Global Register variables always invoke intrinsics
if (LV.isGlobalReg())
return EmitLoadOfGlobalRegLValue(LV);
+ if (LV.isMatrixElt()) {
+ llvm::LoadInst *Load =
+ Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
+ return RValue::get(
+ Builder.CreateExtractElement(Load, LV.getMatrixIdx(), "matrixext"));
+ }
+
assert(LV.isBitField() && "Unknown LValue type!");
return EmitLoadOfBitfieldLValue(LV, Loc);
}
@@ -1870,13 +1963,12 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
// Always use shuffle vector to try to retain the original program structure
unsigned NumResultElts = ExprVT->getNumElements();
- SmallVector<llvm::Constant*, 4> Mask;
+ SmallVector<int, 4> Mask;
for (unsigned i = 0; i != NumResultElts; ++i)
- Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
+ Mask.push_back(getAccessedFieldNo(i, Elts));
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
- MaskV);
+ Mask);
return RValue::get(Vec);
}
@@ -1922,7 +2014,6 @@ RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
return RValue::get(Call);
}
-
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
@@ -1948,6 +2039,15 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isGlobalReg())
return EmitStoreThroughGlobalRegLValue(Src, Dst);
+ if (Dst.isMatrixElt()) {
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getMatrixAddress());
+ Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+ Dst.getMatrixIdx(), "matins");
+ Builder.CreateStore(Vec, Dst.getMatrixAddress(),
+ Dst.isVolatileQualified());
+ return;
+ }
+
assert(Dst.isBitField() && "Unknown LValue type");
return EmitStoreThroughBitfieldLValue(Src, Dst);
}
@@ -2066,6 +2166,14 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
} else {
assert(Info.Offset == 0);
+ // According to the AACPS:
+ // When a volatile bit-field is written, and its container does not overlap
+ // with any non-bit-field member, its container must be read exactly once and
+ // written exactly once using the access width appropriate to the type of the
+ // container. The two accesses are not atomic.
+ if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
+ CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
+ Builder.CreateLoad(Ptr, true, "bf.load");
}
// Write the new value back out.
@@ -2103,37 +2211,33 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
unsigned NumSrcElts = VTy->getNumElements();
- unsigned NumDstElts = Vec->getType()->getVectorNumElements();
+ unsigned NumDstElts =
+ cast<llvm::VectorType>(Vec->getType())->getNumElements();
if (NumDstElts == NumSrcElts) {
// Use shuffle vector is the src and destination are the same number of
// elements and restore the vector mask since it is on the side it will be
// stored.
- SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
+ SmallVector<int, 4> Mask(NumDstElts);
for (unsigned i = 0; i != NumSrcElts; ++i)
- Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
+ Mask[getAccessedFieldNo(i, Elts)] = i;
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
- Vec = Builder.CreateShuffleVector(SrcVal,
- llvm::UndefValue::get(Vec->getType()),
- MaskV);
+ Vec = Builder.CreateShuffleVector(
+ SrcVal, llvm::UndefValue::get(Vec->getType()), Mask);
} else if (NumDstElts > NumSrcElts) {
// Extended the source vector to the same length and then shuffle it
// into the destination.
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
- SmallVector<llvm::Constant*, 4> ExtMask;
+ SmallVector<int, 4> ExtMask;
for (unsigned i = 0; i != NumSrcElts; ++i)
- ExtMask.push_back(Builder.getInt32(i));
- ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
- llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
- llvm::Value *ExtSrcVal =
- Builder.CreateShuffleVector(SrcVal,
- llvm::UndefValue::get(SrcVal->getType()),
- ExtMaskV);
+ ExtMask.push_back(i);
+ ExtMask.resize(NumDstElts, -1);
+ llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(
+ SrcVal, llvm::UndefValue::get(SrcVal->getType()), ExtMask);
// build identity
- SmallVector<llvm::Constant*, 4> Mask;
+ SmallVector<int, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i)
- Mask.push_back(Builder.getInt32(i));
+ Mask.push_back(i);
// When the vector size is odd and .odd or .hi is used, the last element
// of the Elts constant array will be one past the size of the vector.
@@ -2143,9 +2247,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// modify when what gets shuffled in
for (unsigned i = 0; i != NumSrcElts; ++i)
- Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
- Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
+ Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
} else {
// We should never shorten the vector
llvm_unreachable("unexpected shorten vector length");
@@ -2295,7 +2398,13 @@ EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
static LValue EmitThreadPrivateVarDeclLValue(
CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
llvm::Type *RealVarTy, SourceLocation Loc) {
- Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
+ Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
+ CGF, VD, Addr, Loc);
+ else
+ Addr =
+ CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
+
Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
@@ -2327,9 +2436,9 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
- CharUnits Align = getNaturalTypeAlignment(RefLVal.getType()->getPointeeType(),
- PointeeBaseInfo, PointeeTBAAInfo,
- /* forPointeeType= */ true);
+ CharUnits Align = CGM.getNaturalTypeAlignment(
+ RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo,
+ /* forPointeeType= */ true);
return Address(Load, Align);
}
@@ -2347,9 +2456,9 @@ Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo) {
llvm::Value *Addr = Builder.CreateLoad(Ptr);
- return Address(Addr, getNaturalTypeAlignment(PtrTy->getPointeeType(),
- BaseInfo, TBAAInfo,
- /*forPointeeType=*/true));
+ return Address(Addr, CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(),
+ BaseInfo, TBAAInfo,
+ /*forPointeeType=*/true));
}
LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
@@ -2397,13 +2506,14 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
}
static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
- const FunctionDecl *FD) {
+ GlobalDecl GD) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (FD->hasAttr<WeakRefAttr>()) {
ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
return aliasee.getPointer();
}
- llvm::Constant *V = CGM.GetAddrOfFunction(FD);
+ llvm::Constant *V = CGM.GetAddrOfFunction(GD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
FD->getType()->getAs<FunctionProtoType>()) {
@@ -2420,9 +2530,10 @@ static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
return V;
}
-static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
- const Expr *E, const FunctionDecl *FD) {
- llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, FD);
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
+ GlobalDecl GD) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+ llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD);
CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
return CGF.MakeAddrLValue(V, E->getType(), Alignment,
AlignmentSource::Decl);
@@ -2552,10 +2663,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
} else {
// Should we be using the alignment of the constant pointer we emitted?
CharUnits Alignment =
- getNaturalTypeAlignment(E->getType(),
- /* BaseInfo= */ nullptr,
- /* TBAAInfo= */ nullptr,
- /* forPointeeType= */ true);
+ CGM.getNaturalTypeAlignment(E->getType(),
+ /* BaseInfo= */ nullptr,
+ /* TBAAInfo= */ nullptr,
+ /* forPointeeType= */ true);
Addr = Address(Val, Alignment);
}
return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
@@ -2689,6 +2800,12 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (const auto *BD = dyn_cast<BindingDecl>(ND))
return EmitLValue(BD->getBinding());
+ // We can form DeclRefExprs naming GUID declarations when reconstituting
+ // non-type template parameters into expressions.
+ if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
+ return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
+ AlignmentSource::Decl);
+
llvm_unreachable("Unhandled DeclRefExpr");
}
@@ -2779,7 +2896,7 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
- std::string Name = SL->getString();
+ std::string Name = std::string(SL->getString());
if (!Name.empty()) {
unsigned Discriminator =
CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
@@ -2788,7 +2905,8 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
} else {
- auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
+ auto C =
+ CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
}
}
@@ -2918,7 +3036,8 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
FilenameString = llvm::sys::path::filename(FilenameString);
}
- auto FilenameGV = CGM.GetAddrOfConstantCString(FilenameString, ".src");
+ auto FilenameGV =
+ CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
Filename = FilenameGV.getPointer();
@@ -3665,6 +3784,23 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
return LV;
}
+LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
+ assert(
+ !E->isIncomplete() &&
+ "incomplete matrix subscript expressions should be rejected during Sema");
+ LValue Base = EmitLValue(E->getBase());
+ llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
+ llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
+ llvm::Value *NumRows = Builder.getIntN(
+ RowIdx->getType()->getScalarSizeInBits(),
+ E->getBase()->getType()->getAs<ConstantMatrixType>()->getNumRows());
+ llvm::Value *FinalIdx =
+ Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
+ return LValue::MakeMatrixElt(
+ MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
+ E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
+}
+
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
LValueBaseInfo &BaseInfo,
TBAAAccessInfo &TBAAInfo,
@@ -3695,8 +3831,8 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
}
LValueBaseInfo TypeBaseInfo;
TBAAAccessInfo TypeTBAAInfo;
- CharUnits Align = CGF.getNaturalTypeAlignment(ElTy, &TypeBaseInfo,
- &TypeTBAAInfo);
+ CharUnits Align =
+ CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
BaseInfo.mergeForCast(TypeBaseInfo);
TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align);
@@ -3713,7 +3849,7 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
else
ResultExprTy = BaseTy->getPointeeType();
llvm::Value *Idx = nullptr;
- if (IsLowerBound || E->getColonLoc().isInvalid()) {
+ if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
// Requesting lower bound or upper bound, but without provided length and
// without ':' symbol for the default length -> length = 1.
// Idx = LowerBound ?: 0;
@@ -4020,17 +4156,17 @@ static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
return CGF.Builder.CreateStructGEP(base, idx, field->getName());
}
-static Address emitPreserveStructAccess(CodeGenFunction &CGF, Address base,
- const FieldDecl *field) {
+static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base,
+ Address addr, const FieldDecl *field) {
const RecordDecl *rec = field->getParent();
- llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateRecordType(
- CGF.getContext().getRecordType(rec), rec->getLocation());
+ llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
+ base.getType(), rec->getLocation());
unsigned idx =
CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
return CGF.Builder.CreatePreserveStructAccessIndex(
- base, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
+ addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
}
static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
@@ -4154,8 +4290,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
if (IsInPreservedAIRegion ||
(getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
// Remember the original union field index
- llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
- getContext().getRecordType(rec), rec->getLocation());
+ llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
+ rec->getLocation());
addr = Address(
Builder.CreatePreserveUnionAccessIndex(
addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
@@ -4172,7 +4308,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
addr = emitAddrOfFieldStorage(*this, addr, field);
else
// Remember the original struct field index
- addr = emitPreserveStructAccess(*this, addr, field);
+ addr = emitPreserveStructAccess(*this, base, addr, field);
}
// If this is a reference field, load the reference right now.
@@ -4248,6 +4384,14 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
/*Init*/ true);
+ // Block-scope compound literals are destroyed at the end of the enclosing
+ // scope in C.
+ if (!getLangOpts().CPlusPlus)
+ if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
+ pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,
+ E->getType(), getDestroyer(DtorKind),
+ DtorKind & EHCleanup);
+
return Result;
}
@@ -4295,6 +4439,16 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
// If the true case is live, we need to track its region.
if (CondExprBool)
incrementProfileCounter(expr);
+ // If a throw expression we emit it and return an undefined lvalue
+ // because it can't be used.
+ if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
+ EmitCXXThrowExpr(ThrowExpr);
+ llvm::Type *Ty =
+ llvm::PointerType::getUnqual(ConvertType(dead->getType()));
+ return MakeAddrLValue(
+ Address(llvm::UndefValue::get(Ty), CharUnits::One()),
+ dead->getType());
+ }
return EmitLValue(live);
}
}
@@ -4620,7 +4774,8 @@ RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
}
-static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) {
+static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (auto builtinID = FD->getBuiltinID()) {
// Replaceable builtin provide their own implementation of a builtin. Unless
@@ -4632,8 +4787,8 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) {
return CGCallee::forBuiltin(builtinID, FD);
}
- llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD);
- return CGCallee::forDirect(calleePtr, GlobalDecl(FD));
+ llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
+ return CGCallee::forDirect(calleePtr, GD);
}
CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
@@ -4774,7 +4929,7 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
}
Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return Builder.CreateElementBitCast(CGM.GetAddrOfUuidDescriptor(E),
+ return Builder.CreateElementBitCast(CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()),
ConvertType(E->getType()));
}
@@ -5019,7 +5174,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
// to the function type.
if (isa<FunctionNoProtoType>(FnType) || Chain) {
llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
- CalleeTy = CalleeTy->getPointerTo();
+ int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace();
+ CalleeTy = CalleeTy->getPointerTo(AS);
llvm::Value *CalleePtr = Callee.getFunctionPointer();
CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 8de609a2ccd9..fb96d70732e8 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -15,6 +15,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
@@ -126,6 +127,11 @@ public:
}
void VisitConstantExpr(ConstantExpr *E) {
+ if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
+ CGF.EmitAggregateStore(Result, Dest.getAddress(),
+ E->getType().isVolatileQualified());
+ return;
+ }
return Visit(E->getSubExpr());
}
@@ -249,7 +255,7 @@ void AggExprEmitter::withReturnValueSlot(
const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
QualType RetTy = E->getType();
bool RequiresDestruction =
- Dest.isIgnored() &&
+ !Dest.isExternallyDestructed() &&
RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
// If it makes no observable difference, save a memcpy + temporary.
@@ -287,10 +293,8 @@ void AggExprEmitter::withReturnValueSlot(
}
RValue Src =
- EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused));
-
- if (RequiresDestruction)
- CGF.pushDestroy(RetTy.isDestructedType(), Src.getAggregateAddress(), RetTy);
+ EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
+ Dest.isExternallyDestructed()));
if (!UseTemp)
return;
@@ -659,22 +663,32 @@ AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
}
AggValueSlot Slot = EnsureSlot(E->getType());
+
+ // Block-scope compound literals are destroyed at the end of the enclosing
+ // scope in C.
+ bool Destruct =
+ !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
+ if (Destruct)
+ Slot.setExternallyDestructed();
+
CGF.EmitAggExpr(E->getInitializer(), Slot);
+
+ if (Destruct)
+ if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
+ CGF.pushLifetimeExtendedDestroy(
+ CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
+ CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
}
/// Attempt to look through various unimportant expressions to find a
/// cast of the given kind.
-static Expr *findPeephole(Expr *op, CastKind kind) {
- while (true) {
- op = op->IgnoreParens();
- if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
- if (castE->getCastKind() == kind)
- return castE->getSubExpr();
- if (castE->getCastKind() == CK_NoOp)
- continue;
- }
- return nullptr;
+static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
+ op = op->IgnoreParenNoopCasts(ctx);
+ if (auto castE = dyn_cast<CastExpr>(op)) {
+ if (castE->getCastKind() == kind)
+ return castE->getSubExpr();
}
+ return nullptr;
}
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
@@ -763,7 +777,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
(isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
// These two cases are reverses of each other; try to peephole them.
- if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
+ if (Expr *op =
+ findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
E->getType()) &&
"peephole significantly changed types?");
@@ -813,8 +828,19 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// If we're loading from a volatile type, force the destination
// into existence.
if (E->getSubExpr()->getType().isVolatileQualified()) {
+ bool Destruct =
+ !Dest.isExternallyDestructed() &&
+ E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
+ if (Destruct)
+ Dest.setExternallyDestructed();
EnsureDest(E->getType());
- return Visit(E->getSubExpr());
+ Visit(E->getSubExpr());
+
+ if (Destruct)
+ CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
+ E->getType());
+
+ return;
}
LLVM_FALLTHROUGH;
@@ -1328,7 +1354,6 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
}
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
- CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope cleanups(CGF);
Visit(E->getSubExpr());
}
@@ -1923,6 +1948,18 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
}
}
+ if (getLangOpts().CUDAIsDevice) {
+ if (Ty->isCUDADeviceBuiltinSurfaceType()) {
+ if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
+ Src))
+ return;
+ } else if (Ty->isCUDADeviceBuiltinTextureType()) {
+ if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
+ Src))
+ return;
+ }
+ }
+
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
// C99 6.5.16.1p3, which states "If the value being stored in an object is
// read from another object that overlaps in anyway the storage of the first
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 42c1c34c57ad..d59aa6ce0fb9 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -112,7 +112,8 @@ RValue CodeGenFunction::EmitCXXDestructorCall(
commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
ImplicitParamTy, CE, Args, nullptr);
return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
- ReturnValueSlot(), Args);
+ ReturnValueSlot(), Args, nullptr,
+ CE ? CE->getExprLoc() : SourceLocation{});
}
RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
@@ -380,7 +381,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
IsArrow ? Base->getType()->getPointeeType() : Base->getType();
EmitCXXDestructorCall(GD, Callee, This.getPointer(*this), ThisTy,
/*ImplicitParam=*/nullptr,
- /*ImplicitParamTy=*/QualType(), nullptr);
+ /*ImplicitParamTy=*/QualType(), CE);
}
return RValue::get(nullptr);
}
@@ -1637,6 +1638,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
RValue RV =
EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
+ // Set !heapallocsite metadata on the call to operator new.
+ if (getDebugInfo())
+ if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal()))
+ getDebugInfo()->addHeapAllocSiteMetadata(newCall, allocType,
+ E->getExprLoc());
+
// If this was a call to a global replaceable allocation function that does
// not take an alignment argument, the allocator is known to produce
// storage that's suitably aligned for any object that fits, up to a known
@@ -1866,10 +1873,13 @@ static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
}
/// Emit the code for deleting a single object.
-static void EmitObjectDelete(CodeGenFunction &CGF,
+/// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
+/// if not.
+static bool EmitObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
Address Ptr,
- QualType ElementType) {
+ QualType ElementType,
+ llvm::BasicBlock *UnconditionalDeleteBlock) {
// C++11 [expr.delete]p3:
// If the static type of the object to be deleted is different from its
// dynamic type, the static type shall be a base class of the dynamic type
@@ -1916,7 +1926,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
if (UseVirtualCall) {
CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
Dtor);
- return;
+ return false;
}
}
}
@@ -1951,7 +1961,15 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
}
}
+ // When optimizing for size, call 'operator delete' unconditionally.
+ if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) {
+ CGF.EmitBlock(UnconditionalDeleteBlock);
+ CGF.PopCleanupBlock();
+ return true;
+ }
+
CGF.PopCleanupBlock();
+ return false;
}
namespace {
@@ -2028,6 +2046,12 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
Address Ptr = EmitPointerWithAlignment(Arg);
// Null check the pointer.
+ //
+ // We could avoid this null check if we can determine that the object
+ // destruction is trivial and doesn't require an array cookie; we can
+ // unconditionally perform the operator delete call in that case. For now, we
+ // assume that deleted pointers are null rarely enough that it's better to
+ // keep the branch. This might be worth revisiting for a -O0 code size win.
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
@@ -2073,11 +2097,11 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
+ EmitBlock(DeleteEnd);
} else {
- EmitObjectDelete(*this, E, Ptr, DeleteTy);
+ if (!EmitObjectDelete(*this, E, Ptr, DeleteTy, DeleteEnd))
+ EmitBlock(DeleteEnd);
}
-
- EmitBlock(DeleteEnd);
}
static bool isGLValueFromPointerDeref(const Expr *E) {
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index f7a4e9e94712..a49817898ae3 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -13,6 +13,7 @@
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantEmitter.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
@@ -97,11 +98,14 @@ public:
}
ComplexPairTy VisitStmt(Stmt *S) {
- S->dump(CGF.getContext().getSourceManager());
+ S->dump(llvm::errs(), CGF.getContext());
llvm_unreachable("Stmt can't have complex result type!");
}
ComplexPairTy VisitExpr(Expr *S);
ComplexPairTy VisitConstantExpr(ConstantExpr *E) {
+ if (llvm::Constant *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E))
+ return ComplexPairTy(Result->getAggregateElement(0U),
+ Result->getAggregateElement(1U));
return Visit(E->getSubExpr());
}
ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
@@ -222,7 +226,6 @@ public:
return Visit(DIE->getExpr());
}
ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
- CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope Scope(CGF);
ComplexPairTy Vals = Visit(E->getSubExpr());
// Defend against dominance problems caused by jumps out of expression
@@ -431,8 +434,10 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
// C99 6.3.1.6: When a value of complex type is converted to another
// complex type, both the real and imaginary parts follow the conversion
// rules for the corresponding real types.
- Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType, Loc);
- Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType, Loc);
+ if (Val.first)
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType, Loc);
+ if (Val.second)
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType, Loc);
return Val;
}
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index 46ed90a20264..c6b2930faece 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -318,12 +318,17 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
CharUnits Offset = Offsets[Index];
if (auto *CA = dyn_cast<llvm::ConstantAggregate>(C)) {
+ // Expand the sequence into its contained elements.
+ // FIXME: This assumes vector elements are byte-sized.
replace(Elems, Index, Index + 1,
llvm::map_range(llvm::seq(0u, CA->getNumOperands()),
[&](unsigned Op) { return CA->getOperand(Op); }));
- if (auto *Seq = dyn_cast<llvm::SequentialType>(CA->getType())) {
+ if (isa<llvm::ArrayType>(CA->getType()) ||
+ isa<llvm::VectorType>(CA->getType())) {
// Array or vector.
- CharUnits ElemSize = getSize(Seq->getElementType());
+ llvm::Type *ElemTy =
+ llvm::GetElementPtrInst::getTypeAtIndex(CA->getType(), (uint64_t)0);
+ CharUnits ElemSize = getSize(ElemTy);
replace(
Offsets, Index, Index + 1,
llvm::map_range(llvm::seq(0u, CA->getNumOperands()),
@@ -344,6 +349,8 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
}
if (auto *CDS = dyn_cast<llvm::ConstantDataSequential>(C)) {
+ // Expand the sequence into its contained elements.
+ // FIXME: This assumes vector elements are byte-sized.
// FIXME: If possible, split into two ConstantDataSequentials at Hint.
CharUnits ElemSize = getSize(CDS->getElementType());
replace(Elems, Index, Index + 1,
@@ -359,6 +366,7 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
}
if (isa<llvm::ConstantAggregateZero>(C)) {
+ // Split into two zeros at the hinted offset.
CharUnits ElemSize = getSize(C);
assert(Hint > Offset && Hint < Offset + ElemSize && "nothing to split");
replace(Elems, Index, Index + 1,
@@ -368,6 +376,7 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
}
if (isa<llvm::UndefValue>(C)) {
+ // Drop undef; it doesn't contribute to the final layout.
replace(Elems, Index, Index + 1, {});
replace(Offsets, Index, Index + 1, {});
return true;
@@ -589,19 +598,21 @@ bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars,
bool ConstStructBuilder::AppendBitField(
const FieldDecl *Field, uint64_t FieldOffset, llvm::ConstantInt *CI,
bool AllowOverwrite) {
- uint64_t FieldSize = Field->getBitWidthValue(CGM.getContext());
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
llvm::APInt FieldValue = CI->getValue();
// Promote the size of FieldValue if necessary
// FIXME: This should never occur, but currently it can because initializer
// constants are cast to bool, and because clang is not enforcing bitfield
// width limits.
- if (FieldSize > FieldValue.getBitWidth())
- FieldValue = FieldValue.zext(FieldSize);
+ if (Info.Size > FieldValue.getBitWidth())
+ FieldValue = FieldValue.zext(Info.Size);
// Truncate the size of FieldValue to the bit field size.
- if (FieldSize < FieldValue.getBitWidth())
- FieldValue = FieldValue.trunc(FieldSize);
+ if (Info.Size < FieldValue.getBitWidth())
+ FieldValue = FieldValue.trunc(Info.Size);
return Builder.addBits(FieldValue,
CGM.getContext().toBits(StartOffset) + FieldOffset,
@@ -766,7 +777,7 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
// Add a vtable pointer, if we need one and it hasn't already been added.
- if (CD->isDynamicClass() && !IsPrimaryBase) {
+ if (Layout.hasOwnVFPtr()) {
llvm::Constant *VTableAddressPoint =
CGM.getCXXABI().getVTableAddressPointForConstExpr(
BaseSubobject(CD, Offset), VTableClass);
@@ -1000,6 +1011,8 @@ public:
}
llvm::Constant *VisitConstantExpr(ConstantExpr *CE, QualType T) {
+ if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(CE))
+ return Result;
return Visit(CE->getSubExpr(), T);
}
@@ -1167,9 +1180,7 @@ public:
}
llvm::Constant *VisitExprWithCleanups(ExprWithCleanups *E, QualType T) {
- if (!E->cleanupsHaveSideEffects())
- return Visit(E->getSubExpr(), T);
- return nullptr;
+ return Visit(E->getSubExpr(), T);
}
llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E,
@@ -1269,19 +1280,7 @@ public:
if (!E->getConstructor()->isTrivial())
return nullptr;
- // FIXME: We should not have to call getBaseElementType here.
- const auto *RT =
- CGM.getContext().getBaseElementType(Ty)->castAs<RecordType>();
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-
- // If the class doesn't have a trivial destructor, we can't emit it as a
- // constant expr.
- if (!RD->hasTrivialDestructor())
- return nullptr;
-
- // Only copy and default constructors can be trivial.
-
-
+ // Only default and copy/move constructors can be trivial.
if (E->getNumArgs()) {
assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
assert(E->getConstructor()->isCopyOrMoveConstructor() &&
@@ -1361,6 +1360,20 @@ ConstantEmitter::tryEmitAbstract(const APValue &value, QualType destType) {
return validateAndPopAbstract(C, state);
}
+llvm::Constant *ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) {
+ if (!CE->hasAPValueResult())
+ return nullptr;
+ const Expr *Inner = CE->getSubExpr()->IgnoreImplicit();
+ QualType RetType;
+ if (auto *Call = dyn_cast<CallExpr>(Inner))
+ RetType = Call->getCallReturnType(CGF->getContext());
+ else if (auto *Ctor = dyn_cast<CXXConstructExpr>(Inner))
+ RetType = Ctor->getType();
+ llvm::Constant *Res =
+ emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), RetType);
+ return Res;
+}
+
llvm::Constant *
ConstantEmitter::emitAbstract(const Expr *E, QualType destType) {
auto state = pushAbstract();
@@ -1769,7 +1782,6 @@ private:
ConstantLValue VisitCallExpr(const CallExpr *E);
ConstantLValue VisitBlockExpr(const BlockExpr *E);
ConstantLValue VisitCXXTypeidExpr(const CXXTypeidExpr *E);
- ConstantLValue VisitCXXUuidofExpr(const CXXUuidofExpr *E);
ConstantLValue VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E);
@@ -1884,6 +1896,9 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
}
}
+ if (auto *GD = dyn_cast<MSGuidDecl>(D))
+ return CGM.GetAddrOfMSGuidDecl(GD);
+
return nullptr;
}
@@ -1904,6 +1919,8 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
ConstantLValue
ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) {
+ if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(E))
+ return Result;
return Visit(E->getSubExpr());
}
@@ -1994,11 +2011,6 @@ ConstantLValueEmitter::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
}
ConstantLValue
-ConstantLValueEmitter::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return CGM.GetAddrOfUuidDescriptor(E);
-}
-
-ConstantLValue
ConstantLValueEmitter::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E) {
assert(E->getStorageDuration() == SD_Static);
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 3f23fe11e4f5..6131f97995dc 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -37,6 +37,7 @@
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
+#include "llvm/IR/MatrixBuilder.h"
#include "llvm/IR/Module.h"
#include <cstdarg>
@@ -129,11 +130,10 @@ struct BinOpInfo {
return true;
}
- /// Check if either operand is a fixed point type or integer type, with at
- /// least one being a fixed point type. In any case, this
- /// operation did not follow usual arithmetic conversion and both operands may
- /// not be the same.
- bool isFixedPointBinOp() const {
+ /// Check if at least one operand is a fixed point type. In such cases, this
+ /// operation did not follow usual arithmetic conversion and both operands
+ /// might not be of the same type.
+ bool isFixedPointOp() const {
// We cannot simply check the result type since comparison operations return
// an int.
if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
@@ -141,6 +141,8 @@ struct BinOpInfo {
QualType RHSType = BinOp->getRHS()->getType();
return LHSType->isFixedPointType() || RHSType->isFixedPointType();
}
+ if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
+ return UnOp->getSubExpr()->getType()->isFixedPointType();
return false;
}
};
@@ -213,22 +215,6 @@ static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
(2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
}
-/// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
-static void updateFastMathFlags(llvm::FastMathFlags &FMF,
- FPOptions FPFeatures) {
- FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
-}
-
-/// Propagate fast-math flags from \p Op to the instruction in \p V.
-static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
- if (auto *I = dyn_cast<llvm::Instruction>(V)) {
- llvm::FastMathFlags FMF = I->getFastMathFlags();
- updateFastMathFlags(FMF, Op.FPFeatures);
- I->setFastMathFlags(FMF);
- }
- return V;
-}
-
class ScalarExprEmitter
: public StmtVisitor<ScalarExprEmitter, Value*> {
CodeGenFunction &CGF;
@@ -297,7 +283,7 @@ public:
Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
- CGF.EmitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
+ CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
}
/// EmitLoadOfLValue - Given an expression with complex type that represents a
@@ -427,12 +413,18 @@ public:
}
Value *VisitStmt(Stmt *S) {
- S->dump(CGF.getContext().getSourceManager());
+ S->dump(llvm::errs(), CGF.getContext());
llvm_unreachable("Stmt can't have complex result type!");
}
Value *VisitExpr(Expr *S);
Value *VisitConstantExpr(ConstantExpr *E) {
+ if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
+ if (E->isGLValue())
+ return CGF.Builder.CreateLoad(Address(
+ Result, CGF.getContext().getTypeAlignInChars(E->getType())));
+ return Result;
+ }
return Visit(E->getSubExpr());
}
Value *VisitParenExpr(ParenExpr *PE) {
@@ -551,11 +543,17 @@ public:
}
Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
Value *VisitMemberExpr(MemberExpr *E);
Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
+ // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
+ // literals aren't l-values in C++. We do so simply because that's the
+ // cleanest way to handle compound literals in C++.
+ // See the discussion here: https://reviews.llvm.org/D64464
return EmitLoadOfLValue(E);
}
@@ -680,6 +678,10 @@ public:
return Builder.getInt1(E->isSatisfied());
}
+ Value *VisitRequiresExpr(const RequiresExpr *E) {
+ return Builder.getInt1(E->isSatisfied());
+ }
+
Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
}
@@ -728,15 +730,34 @@ public:
}
}
+ if (Ops.Ty->isConstantMatrixType()) {
+ llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ // We need to check the types of the operands of the operator to get the
+ // correct matrix dimensions.
+ auto *BO = cast<BinaryOperator>(Ops.E);
+ auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
+ BO->getLHS()->getType().getCanonicalType());
+ auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
+ BO->getRHS()->getType().getCanonicalType());
+ if (LHSMatTy && RHSMatTy)
+ return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
+ LHSMatTy->getNumColumns(),
+ RHSMatTy->getNumColumns());
+ return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
+ }
+
if (Ops.Ty->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
!CanElideOverflowCheck(CGF.getContext(), Ops))
return EmitOverflowCheckedBinOp(Ops);
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
- Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
- return propagateFMFlags(V, Ops);
+ // Preserve the old values
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
+ return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
}
+ if (Ops.isFixedPointOp())
+ return EmitFixedPointBinOp(Ops);
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
/// Create a binary op that checks for overflow.
@@ -748,6 +769,11 @@ public:
llvm::Value *Zero,bool isDiv);
// Common helper for getting how wide LHS of shift is.
static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
+
+ // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
+ // non powers of two.
+ Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
+
Value *EmitDiv(const BinOpInfo &Ops);
Value *EmitRem(const BinOpInfo &Ops);
Value *EmitAdd(const BinOpInfo &Ops);
@@ -1297,7 +1323,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
"Splatted expr doesn't match with vector element type?");
// Splat the element across to all elements
- unsigned NumElements = DstTy->getVectorNumElements();
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
return Builder.CreateVectorSplat(NumElements, Src, "splat");
}
@@ -1315,8 +1341,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// short or half vector.
// Source and destination are both expected to be vectors.
- llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
- llvm::Type *DstElementTy = DstTy->getVectorElementType();
+ llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
+ llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
(void)DstElementTy;
assert(((SrcElementTy->isIntegerTy() &&
@@ -1622,8 +1648,8 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// n = extract mask i
// x = extract val n
// newv = insert newv, x, i
- llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
- MTy->getNumElements());
+ auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
+ MTy->getNumElements());
Value* NewV = llvm::UndefValue::get(RTy);
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
@@ -1638,18 +1664,17 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
- SmallVector<llvm::Constant*, 32> indices;
+ SmallVector<int, 32> Indices;
for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
// Check for -1 and output it as undef in the IR.
if (Idx.isSigned() && Idx.isAllOnesValue())
- indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Indices.push_back(-1);
else
- indices.push_back(Builder.getInt32(Idx.getZExtValue()));
+ Indices.push_back(Idx.getZExtValue());
}
- Value *SV = llvm::ConstantVector::get(indices);
- return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+ return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
}
Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
@@ -1682,8 +1707,8 @@ Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
assert(DstTy->isVectorTy() &&
"ConvertVector destination IR type must be a vector");
- llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
- *DstEltTy = DstTy->getVectorElementType();
+ llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
+ *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
if (DstEltType->isBooleanType()) {
assert((SrcEltTy->isFloatingPointTy() ||
@@ -1764,22 +1789,34 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
return Builder.CreateExtractElement(Base, Idx, "vecext");
}
-static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
- unsigned Off, llvm::Type *I32Ty) {
+Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Handle the vector case. The base must be a vector, the index must be an
+ // integer value.
+ Value *RowIdx = Visit(E->getRowIdx());
+ Value *ColumnIdx = Visit(E->getColumnIdx());
+ Value *Matrix = Visit(E->getBase());
+
+ // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
+ llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ return MB.CreateExtractElement(
+ Matrix, RowIdx, ColumnIdx,
+ E->getBase()->getType()->getAs<ConstantMatrixType>()->getNumRows());
+}
+
+static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
+ unsigned Off) {
int MV = SVI->getMaskValue(Idx);
if (MV == -1)
- return llvm::UndefValue::get(I32Ty);
- return llvm::ConstantInt::get(I32Ty, Off+MV);
+ return -1;
+ return Off + MV;
}
-static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
- if (C->getBitWidth() != 32) {
- assert(llvm::ConstantInt::isValueValidForType(I32Ty,
- C->getZExtValue()) &&
- "Index operand too large for shufflevector mask!");
- return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
- }
- return C;
+static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
+ assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
+ "Index operand too large for shufflevector mask!");
+ return C->getZExtValue();
}
Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
@@ -1816,7 +1853,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
for (unsigned i = 0; i != NumInitElements; ++i) {
Expr *IE = E->getInit(i);
Value *Init = Visit(IE);
- SmallVector<llvm::Constant*, 16> Args;
+ SmallVector<int, 16> Args;
llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
@@ -1834,7 +1871,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// insert into undef -> shuffle (src, undef)
// shufflemask must use an i32
Args.push_back(getAsInt32(C, CGF.Int32Ty));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, -1);
LHS = EI->getVectorOperand();
RHS = V;
@@ -1843,17 +1880,16 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// insert into undefshuffle && size match -> shuffle (v, src)
llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
for (unsigned j = 0; j != CurIdx; ++j)
- Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
- Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ Args.push_back(getMaskElt(SVV, j, 0));
+ Args.push_back(ResElts + C->getZExtValue());
+ Args.resize(ResElts, -1);
LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
RHS = EI->getVectorOperand();
VIsUndefShuffle = false;
}
if (!Args.empty()) {
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
- V = Builder.CreateShuffleVector(LHS, RHS, Mask);
+ V = Builder.CreateShuffleVector(LHS, RHS, Args);
++CurIdx;
continue;
}
@@ -1882,15 +1918,14 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// If the current vector initializer is a shuffle with undef, merge
// this shuffle directly into it.
if (VIsUndefShuffle) {
- Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
- CGF.Int32Ty));
+ Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
} else {
- Args.push_back(Builder.getInt32(j));
+ Args.push_back(j);
}
}
for (unsigned j = 0, je = InitElts; j != je; ++j)
- Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ Args.push_back(getMaskElt(SVI, j, Offset));
+ Args.resize(ResElts, -1);
if (VIsUndefShuffle)
V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
@@ -1903,26 +1938,24 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// to the vector initializer into V.
if (Args.empty()) {
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(Builder.getInt32(j));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
- Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
- Mask, "vext");
+ Args.push_back(j);
+ Args.resize(ResElts, -1);
+ Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), Args,
+ "vext");
Args.clear();
for (unsigned j = 0; j != CurIdx; ++j)
- Args.push_back(Builder.getInt32(j));
+ Args.push_back(j);
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(Builder.getInt32(j+Offset));
- Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ Args.push_back(j + Offset);
+ Args.resize(ResElts, -1);
}
// If V is undef, make sure it ends up on the RHS of the shuffle to aid
// merging subsequent shuffles into this one.
if (CurIdx == 0)
std::swap(V, Init);
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
- V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
+ V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
VIsUndefShuffle = isa<llvm::UndefValue>(Init);
CurIdx += InitElts;
}
@@ -2036,11 +2069,15 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
}
- // Update heapallocsite metadata when there is an explicit cast.
- if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(Src))
- if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE))
- CGF.getDebugInfo()->
- addHeapAllocSiteMetadata(CI, CE->getType(), CE->getExprLoc());
+ // Update heapallocsite metadata when there is an explicit pointer cast.
+ if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
+ if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
+ QualType PointeeType = DestTy->getPointeeType();
+ if (!PointeeType.isNull())
+ CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
+ CE->getExprLoc());
+ }
+ }
return Builder.CreateBitCast(Src, DstTy);
}
@@ -2210,7 +2247,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Type *DstTy = ConvertType(DestTy);
Value *Elt = Visit(const_cast<Expr*>(E));
// Splat the element across to all elements
- unsigned NumElements = DstTy->getVectorNumElements();
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
return Builder.CreateVectorSplat(NumElements, Elt, "splat");
}
@@ -2311,7 +2348,6 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
}
Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
- CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope Scope(CGF);
Value *V = Visit(E->getSubExpr());
// Defend against dominance problems caused by jumps out of expression
@@ -2325,13 +2361,14 @@ Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
//===----------------------------------------------------------------------===//
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
- llvm::Value *InVal, bool IsInc) {
+ llvm::Value *InVal, bool IsInc,
+ FPOptions FPFeatures) {
BinOpInfo BinOp;
BinOp.LHS = InVal;
BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
BinOp.Ty = E->getType();
BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
- // FIXME: once UnaryOperator carries FPFeatures, copy it here.
+ BinOp.FPFeatures = FPFeatures;
BinOp.E = E;
return BinOp;
}
@@ -2351,7 +2388,8 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
case LangOptions::SOB_Trapping:
if (!E->canOverflow())
return Builder.CreateNSWAdd(InVal, Amount, Name);
- return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
+ return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
+ E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
}
llvm_unreachable("Unknown SignedOverflowBehaviorTy");
}
@@ -2497,8 +2535,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
} else if (E->canOverflow() && type->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
- value =
- EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
+ value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
+ E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
} else {
llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
@@ -2609,6 +2647,36 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
}
+ // Fixed-point types.
+ } else if (type->isFixedPointType()) {
+ // Fixed-point types are tricky. In some cases, it isn't possible to
+ // represent a 1 or a -1 in the type at all. Piggyback off of
+ // EmitFixedPointBinOp to avoid having to reimplement saturation.
+ BinOpInfo Info;
+ Info.E = E;
+ Info.Ty = E->getType();
+ Info.Opcode = isInc ? BO_Add : BO_Sub;
+ Info.LHS = value;
+ Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
+ // If the type is signed, it's better to represent this as +(-1) or -(-1),
+ // since -1 is guaranteed to be representable.
+ if (type->isSignedFixedPointType()) {
+ Info.Opcode = isInc ? BO_Sub : BO_Add;
+ Info.RHS = Builder.CreateNeg(Info.RHS);
+ }
+ // Now, convert from our invented integer literal to the type of the unary
+ // op. This will upscale and saturate if necessary. This value can become
+ // undef in some cases.
+ FixedPointSemantics SrcSema =
+ FixedPointSemantics::GetIntegerSemantics(value->getType()
+ ->getScalarSizeInBits(),
+ /*IsSigned=*/true);
+ FixedPointSemantics DstSema =
+ CGF.getContext().getFixedPointSemantics(Info.Ty);
+ Info.RHS = EmitFixedPointConversion(Info.RHS, SrcSema, DstSema,
+ E->getExprLoc());
+ value = EmitFixedPointBinOp(Info);
+
// Objective-C pointer types.
} else {
const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
@@ -2668,7 +2736,7 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Sub;
- // FIXME: once UnaryOperator carries FPFeatures, copy it here.
+ BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
BinOp.E = E;
return EmitSub(BinOp);
}
@@ -2681,13 +2749,17 @@ Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
// Perform vector logical not on comparison with zero vector.
- if (E->getType()->isExtVectorType()) {
+ if (E->getType()->isVectorType() &&
+ E->getType()->castAs<VectorType>()->getVectorKind() ==
+ VectorType::GenericVector) {
Value *Oper = Visit(E->getSubExpr());
Value *Zero = llvm::Constant::getNullValue(Oper->getType());
Value *Result;
- if (Oper->getType()->isFPOrFPVectorTy())
+ if (Oper->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
+ CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
- else
+ } else
Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
}
@@ -2888,7 +2960,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
Result.RHS = Visit(E->getRHS());
Result.Ty = E->getType();
Result.Opcode = E->getOpcode();
- Result.FPFeatures = E->getFPFeatures();
+ Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
Result.E = E;
return Result;
}
@@ -2908,7 +2980,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.RHS = Visit(E->getRHS());
OpInfo.Ty = E->getComputationResultType();
OpInfo.Opcode = E->getOpcode();
- OpInfo.FPFeatures = E->getFPFeatures();
+ OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
OpInfo.E = E;
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
@@ -3096,7 +3168,9 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
}
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
- llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ llvm::Value *Val;
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
+ Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
if (CGF.getLangOpts().OpenCL &&
!CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
// OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
@@ -3112,6 +3186,8 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
}
return Val;
}
+ else if (Ops.isFixedPointOp())
+ return EmitFixedPointBinOp(Ops);
else if (Ops.Ty->hasUnsignedIntegerRepresentation())
return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
else
@@ -3361,7 +3437,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// the add operand respectively. This allows fmuladd to represent a*b-c, or
// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
// efficient operations.
-static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
+static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
const CodeGenFunction &CGF, CGBuilderTy &Builder,
bool negMul, bool negAdd) {
assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
@@ -3373,12 +3449,23 @@ static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
if (negAdd)
Addend = Builder.CreateFNeg(Addend, "neg");
- Value *FMulAdd = Builder.CreateCall(
- CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
- {MulOp0, MulOp1, Addend});
- MulOp->eraseFromParent();
+ Value *FMulAdd = nullptr;
+ if (Builder.getIsFPConstrained()) {
+ assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
+ "Only constrained operation should be created when Builder is in FP "
+ "constrained mode");
+ FMulAdd = Builder.CreateConstrainedFPCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
+ Addend->getType()),
+ {MulOp0, MulOp1, Addend});
+ } else {
+ FMulAdd = Builder.CreateCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
+ {MulOp0, MulOp1, Addend});
+ }
+ MulOp->eraseFromParent();
- return FMulAdd;
+ return FMulAdd;
}
// Check whether it would be legal to emit an fmuladd intrinsic call to
@@ -3413,6 +3500,19 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op,
return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
}
+ if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
+ if (LHSBinOp->getIntrinsicID() ==
+ llvm::Intrinsic::experimental_constrained_fmul &&
+ LHSBinOp->use_empty())
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ }
+ if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
+ if (RHSBinOp->getIntrinsicID() ==
+ llvm::Intrinsic::experimental_constrained_fmul &&
+ RHSBinOp->use_empty())
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ }
+
return nullptr;
}
@@ -3436,21 +3536,26 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
}
}
+ if (op.Ty->isConstantMatrixType()) {
+ llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ return MB.CreateAdd(op.LHS, op.RHS);
+ }
+
if (op.Ty->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
!CanElideOverflowCheck(CGF.getContext(), op))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
// Try to form an fmuladd.
if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
return FMulAdd;
- Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
- return propagateFMFlags(V, op);
+ return Builder.CreateFAdd(op.LHS, op.RHS, "add");
}
- if (op.isFixedPointBinOp())
+ if (op.isFixedPointOp())
return EmitFixedPointBinOp(op);
return Builder.CreateAdd(op.LHS, op.RHS, "add");
@@ -3462,14 +3567,27 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
using llvm::APSInt;
using llvm::ConstantInt;
- const auto *BinOp = cast<BinaryOperator>(op.E);
-
- // The result is a fixed point type and at least one of the operands is fixed
- // point while the other is either fixed point or an int. This resulting type
- // should be determined by Sema::handleFixedPointConversions().
+ // This is either a binary operation where at least one of the operands is
+ // a fixed-point type, or a unary operation where the operand is a fixed-point
+ // type. The result type of a binary operation is determined by
+ // Sema::handleFixedPointConversions().
QualType ResultTy = op.Ty;
- QualType LHSTy = BinOp->getLHS()->getType();
- QualType RHSTy = BinOp->getRHS()->getType();
+ QualType LHSTy, RHSTy;
+ if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
+ RHSTy = BinOp->getRHS()->getType();
+ if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
+ // For compound assignment, the effective type of the LHS at this point
+ // is the computation LHS type, not the actual LHS type, and the final
+ // result type is not the type of the expression but rather the
+ // computation result type.
+ LHSTy = CAO->getComputationLHSType();
+ ResultTy = CAO->getComputationResultType();
+ } else
+ LHSTy = BinOp->getLHS()->getType();
+ } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
+ LHSTy = UnOp->getSubExpr()->getType();
+ RHSTy = UnOp->getSubExpr()->getType();
+ }
ASTContext &Ctx = CGF.getContext();
Value *LHS = op.LHS;
Value *RHS = op.RHS;
@@ -3481,16 +3599,17 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
// Convert the operands to the full precision type.
Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
- BinOp->getExprLoc());
+ op.E->getExprLoc());
Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
- BinOp->getExprLoc());
+ op.E->getExprLoc());
- // Perform the actual addition.
+ // Perform the actual operation.
Value *Result;
- switch (BinOp->getOpcode()) {
+ switch (op.Opcode) {
+ case BO_AddAssign:
case BO_Add: {
- if (ResultFixedSema.isSaturated()) {
- llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
+ if (CommonFixedSema.isSaturated()) {
+ llvm::Intrinsic::ID IID = CommonFixedSema.isSigned()
? llvm::Intrinsic::sadd_sat
: llvm::Intrinsic::uadd_sat;
Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
@@ -3499,9 +3618,10 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
}
break;
}
+ case BO_SubAssign:
case BO_Sub: {
- if (ResultFixedSema.isSaturated()) {
- llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
+ if (CommonFixedSema.isSaturated()) {
+ llvm::Intrinsic::ID IID = CommonFixedSema.isSigned()
? llvm::Intrinsic::ssub_sat
: llvm::Intrinsic::usub_sat;
Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
@@ -3510,6 +3630,32 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
}
break;
}
+ case BO_MulAssign:
+ case BO_Mul: {
+ llvm::Intrinsic::ID IID;
+ if (CommonFixedSema.isSaturated())
+ IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix_sat
+ : llvm::Intrinsic::umul_fix_sat;
+ else
+ IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix
+ : llvm::Intrinsic::umul_fix;
+ Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()},
+ {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())});
+ break;
+ }
+ case BO_DivAssign:
+ case BO_Div: {
+ llvm::Intrinsic::ID IID;
+ if (CommonFixedSema.isSaturated())
+ IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix_sat
+ : llvm::Intrinsic::udiv_fix_sat;
+ else
+ IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix
+ : llvm::Intrinsic::udiv_fix;
+ Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()},
+ {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())});
+ break;
+ }
case BO_LT:
return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
: Builder.CreateICmpULT(FullLHS, FullRHS);
@@ -3529,17 +3675,11 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
return Builder.CreateICmpEQ(FullLHS, FullRHS);
case BO_NE:
return Builder.CreateICmpNE(FullLHS, FullRHS);
- case BO_Mul:
- case BO_Div:
case BO_Shl:
case BO_Shr:
case BO_Cmp:
case BO_LAnd:
case BO_LOr:
- case BO_MulAssign:
- case BO_DivAssign:
- case BO_AddAssign:
- case BO_SubAssign:
case BO_ShlAssign:
case BO_ShrAssign:
llvm_unreachable("Found unimplemented fixed point binary operation");
@@ -3560,7 +3700,7 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
// Convert to the result type.
return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
- BinOp->getExprLoc());
+ op.E->getExprLoc());
}
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
@@ -3581,20 +3721,25 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
}
}
+ if (op.Ty->isConstantMatrixType()) {
+ llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ return MB.CreateSub(op.LHS, op.RHS);
+ }
+
if (op.Ty->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
!CanElideOverflowCheck(CGF.getContext(), op))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
// Try to form an fmuladd.
if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
return FMulAdd;
- Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
- return propagateFMFlags(V, op);
+ return Builder.CreateFSub(op.LHS, op.RHS, "sub");
}
- if (op.isFixedPointBinOp())
+ if (op.isFixedPointOp())
return EmitFixedPointBinOp(op);
return Builder.CreateSub(op.LHS, op.RHS, "sub");
@@ -3666,6 +3811,21 @@ Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
}
+Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
+ const Twine &Name) {
+ llvm::IntegerType *Ty;
+ if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
+ Ty = cast<llvm::IntegerType>(VT->getElementType());
+ else
+ Ty = cast<llvm::IntegerType>(LHS->getType());
+
+ if (llvm::isPowerOf2_64(Ty->getBitWidth()))
+ return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
+
+ return Builder.CreateURem(
+ RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
+}
+
Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
// LLVM requires the LHS and RHS to be the same type: promote or truncate the
// RHS to the same size as the LHS.
@@ -3676,12 +3836,11 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
Ops.Ty->hasSignedIntegerRepresentation() &&
!CGF.getLangOpts().isSignedOverflowDefined() &&
- !CGF.getLangOpts().CPlusPlus2a;
+ !CGF.getLangOpts().CPlusPlus20;
bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
// OpenCL 6.3j: shift values are effectively % word size of LHS.
if (CGF.getLangOpts().OpenCL)
- RHS =
- Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
+ RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
else if ((SanitizeBase || SanitizeExponent) &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
@@ -3743,8 +3902,7 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
// OpenCL 6.3j: shift values are effectively % word size of LHS.
if (CGF.getLangOpts().OpenCL)
- RHS =
- Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
+ RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
@@ -3897,9 +4055,10 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
E->getExprLoc());
}
- if (BOInfo.isFixedPointBinOp()) {
+ if (BOInfo.isFixedPointOp()) {
Result = EmitFixedPointBinOp(BOInfo);
} else if (LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
if (!IsSignaling)
Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
else
@@ -4052,6 +4211,8 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
Value *RHS = Visit(E->getRHS());
Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
if (LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
+ CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
} else {
@@ -4136,6 +4297,8 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
Value *RHS = Visit(E->getRHS());
Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
if (LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
+ CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
} else {
@@ -4269,8 +4432,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// OpenCL: If the condition is a vector, we can treat this condition like
// the select function.
- if (CGF.getLangOpts().OpenCL
- && condExpr->getType()->isVectorType()) {
+ if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
+ condExpr->getType()->isExtVectorType()) {
CGF.incrementProfileCounter(E);
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
@@ -4285,10 +4448,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
- llvm::Value *tmp = Builder.CreateSExt(TestMSB,
- llvm::VectorType::get(elemType,
- numElem),
- "sext");
+ llvm::Value *tmp = Builder.CreateSExt(
+ TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
llvm::Value *tmp2 = Builder.CreateNot(tmp);
// Cast float to int to perform ANDs if necessary.
@@ -4427,14 +4588,9 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
Value *Src, unsigned NumElementsDst) {
llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
- SmallVector<llvm::Constant*, 4> Args;
- Args.push_back(Builder.getInt32(0));
- Args.push_back(Builder.getInt32(1));
- Args.push_back(Builder.getInt32(2));
- if (NumElementsDst == 4)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
- return Builder.CreateShuffleVector(Src, UnV, Mask);
+ static constexpr int Mask[] = {0, 1, 2, -1};
+ return Builder.CreateShuffleVector(Src, UnV,
+ llvm::makeArrayRef(Mask, NumElementsDst));
}
// Create cast instructions for converting LLVM value \p Src to LLVM type \p
@@ -4512,7 +4668,8 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// get a vec3.
if (NumElementsSrc != 3 && NumElementsDst == 3) {
if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
- auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
+ auto *Vec4Ty = llvm::FixedVectorType::get(
+ cast<llvm::VectorType>(DstTy)->getElementType(), 4);
Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
Vec4Ty);
}
@@ -4655,7 +4812,7 @@ struct GEPOffsetAndOverflow {
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
llvm::LLVMContext &VMContext,
CodeGenModule &CGM,
- CGBuilderTy Builder) {
+ CGBuilderTy &Builder) {
const auto &DL = CGM.getDataLayout();
// The total (signed) byte offset for the GEP.
diff --git a/clang/lib/CodeGen/CGGPUBuiltin.cpp b/clang/lib/CodeGen/CGGPUBuiltin.cpp
index d7e267630762..f860623e2bc3 100644
--- a/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -16,6 +16,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Transforms/Utils/AMDGPUEmitPrintf.h"
using namespace clang;
using namespace CodeGen;
@@ -110,7 +111,7 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) {
llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1);
llvm::Value *Arg = Args[I].getRValue(*this).getScalarVal();
- Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlignment(Arg->getType()));
+ Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlign(Arg->getType()));
}
BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx));
}
@@ -120,3 +121,36 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
return RValue::get(Builder.CreateCall(
VprintfFunc, {Args[0].getRValue(*this).getScalarVal(), BufferPtr}));
}
+
+RValue
+CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ assert(getTarget().getTriple().getArch() == llvm::Triple::amdgcn);
+ assert(E->getBuiltinCallee() == Builtin::BIprintf ||
+ E->getBuiltinCallee() == Builtin::BI__builtin_printf);
+ assert(E->getNumArgs() >= 1); // printf always has at least one arg.
+
+ CallArgList CallArgs;
+ EmitCallArgs(CallArgs,
+ E->getDirectCallee()->getType()->getAs<FunctionProtoType>(),
+ E->arguments(), E->getDirectCallee(),
+ /* ParamsToSkip = */ 0);
+
+ SmallVector<llvm::Value *, 8> Args;
+ for (auto A : CallArgs) {
+ // We don't know how to emit non-scalar varargs.
+ if (!A.getRValue(*this).isScalar()) {
+ CGM.ErrorUnsupported(E, "non-scalar arg to printf");
+ return RValue::get(llvm::ConstantInt::get(IntTy, -1));
+ }
+
+ llvm::Value *Arg = A.getRValue(*this).getScalarVal();
+ Args.push_back(Arg);
+ }
+
+ llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
+ IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
+ auto Printf = llvm::emitAMDGPUPrintfCall(IRB, Args);
+ Builder.SetInsertPoint(IRB.GetInsertBlock(), IRB.GetInsertPoint());
+ return RValue::get(Printf);
+}
diff --git a/clang/lib/CodeGen/CGLoopInfo.cpp b/clang/lib/CodeGen/CGLoopInfo.cpp
index e4b184eb8798..78da72eda0cf 100644
--- a/clang/lib/CodeGen/CGLoopInfo.cpp
+++ b/clang/lib/CodeGen/CGLoopInfo.cpp
@@ -9,6 +9,8 @@
#include "CGLoopInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
@@ -572,6 +574,7 @@ void LoopInfoStack::push(BasicBlock *Header, const llvm::DebugLoc &StartLoc,
}
void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
+ const clang::CodeGenOptions &CGOpts,
ArrayRef<const clang::Attr *> Attrs,
const llvm::DebugLoc &StartLoc,
const llvm::DebugLoc &EndLoc) {
@@ -752,6 +755,14 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
}
}
+ if (CGOpts.OptimizationLevel > 0)
+ // Disable unrolling for the loop, if unrolling is disabled (via
+ // -fno-unroll-loops) and no pragmas override the decision.
+ if (!CGOpts.UnrollLoops &&
+ (StagedAttrs.UnrollEnable == LoopAttributes::Unspecified &&
+ StagedAttrs.UnrollCount == 0))
+ setUnrollState(LoopAttributes::Disable);
+
/// Stage the attributes.
push(Header, StartLoc, EndLoc);
}
diff --git a/clang/lib/CodeGen/CGLoopInfo.h b/clang/lib/CodeGen/CGLoopInfo.h
index 5abcf37c5433..e379c64c99a8 100644
--- a/clang/lib/CodeGen/CGLoopInfo.h
+++ b/clang/lib/CodeGen/CGLoopInfo.h
@@ -29,6 +29,7 @@ class MDNode;
namespace clang {
class Attr;
class ASTContext;
+class CodeGenOptions;
namespace CodeGen {
/// Attributes that may be specified on loops.
@@ -202,6 +203,7 @@ public:
/// Begin a new structured loop. Stage attributes from the Attrs list.
/// The staged attributes are applied to the loop and then cleared.
void push(llvm::BasicBlock *Header, clang::ASTContext &Ctx,
+ const clang::CodeGenOptions &CGOpts,
llvm::ArrayRef<const Attr *> Attrs, const llvm::DebugLoc &StartLoc,
const llvm::DebugLoc &EndLoc);
diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index d5f378c52232..d134be83a9dc 100644
--- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -254,6 +254,10 @@ struct GenBinaryFuncName : CopyStructVisitor<GenBinaryFuncName<IsMove>, IsMove>,
void visitVolatileTrivial(QualType FT, const FieldDecl *FD,
CharUnits CurStructOffset) {
+ // Zero-length bit-fields don't need to be copied/assigned.
+ if (FD && FD->isZeroLengthBitField(this->Ctx))
+ return;
+
// Because volatile fields can be bit-fields and are individually copied,
// their offset and width are in bits.
uint64_t OffsetInBits =
@@ -317,6 +321,16 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM,
return CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
}
+template <size_t N, size_t... Ints>
+static std::array<Address, N> getParamAddrs(std::index_sequence<Ints...> IntSeq,
+ std::array<CharUnits, N> Alignments,
+ FunctionArgList Args,
+ CodeGenFunction *CGF) {
+ return std::array<Address, N>{{
+ Address(CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[Ints])),
+ Alignments[Ints])...}};
+}
+
// Template classes that are used as bases for classes that emit special
// functions.
template <class Derived> struct GenFuncBase {
@@ -424,9 +438,9 @@ template <class Derived> struct GenFuncBase {
}
template <size_t N>
- llvm::Function *
- getFunction(StringRef FuncName, QualType QT, std::array<Address, N> Addrs,
- std::array<CharUnits, N> Alignments, CodeGenModule &CGM) {
+ llvm::Function *getFunction(StringRef FuncName, QualType QT,
+ std::array<CharUnits, N> Alignments,
+ CodeGenModule &CGM) {
// If the special function already exists in the module, return it.
if (llvm::Function *F = CGM.getModule().getFunction(FuncName)) {
bool WrongType = false;
@@ -439,7 +453,7 @@ template <class Derived> struct GenFuncBase {
}
if (WrongType) {
- std::string FuncName = F->getName();
+ std::string FuncName = std::string(F->getName());
SourceLocation Loc = QT->castAs<RecordType>()->getDecl()->getLocation();
CGM.Error(Loc, "special function " + FuncName +
" for non-trivial C struct has incorrect type");
@@ -466,12 +480,8 @@ template <class Derived> struct GenFuncBase {
CodeGenFunction NewCGF(CGM);
setCGF(&NewCGF);
CGF->StartFunction(FD, Ctx.VoidTy, F, FI, Args);
-
- for (unsigned I = 0; I < N; ++I) {
- llvm::Value *V = CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[I]));
- Addrs[I] = Address(V, Alignments[I]);
- }
-
+ std::array<Address, N> Addrs =
+ getParamAddrs<N>(std::make_index_sequence<N>{}, Alignments, Args, CGF);
asDerived().visitStructFields(QT, CharUnits::Zero(), Addrs);
CGF->FinishFunction();
return F;
@@ -491,7 +501,7 @@ template <class Derived> struct GenFuncBase {
}
if (llvm::Function *F =
- getFunction(FuncName, QT, Addrs, Alignments, CallerCGF.CGM))
+ getFunction(FuncName, QT, Alignments, CallerCGF.CGM))
CallerCGF.EmitNounwindRuntimeCall(F, Ptrs);
}
@@ -543,6 +553,10 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
std::array<Address, 2> Addrs) {
LValue DstLV, SrcLV;
if (FD) {
+ // No need to copy zero-length bit-fields.
+ if (FD->isZeroLengthBitField(this->CGF->getContext()))
+ return;
+
QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
llvm::PointerType *PtrTy = this->CGF->ConvertType(RT)->getPointerTo();
Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
@@ -825,17 +839,6 @@ static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT,
Gen.callFunc(FuncName, QT, Addrs, CGF);
}
-template <size_t N> static std::array<Address, N> createNullAddressArray();
-
-template <> std::array<Address, 1> createNullAddressArray() {
- return std::array<Address, 1>({{Address(nullptr, CharUnits::Zero())}});
-}
-
-template <> std::array<Address, 2> createNullAddressArray() {
- return std::array<Address, 2>({{Address(nullptr, CharUnits::Zero()),
- Address(nullptr, CharUnits::Zero())}});
-}
-
template <class G, size_t N>
static llvm::Function *
getSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, bool IsVolatile,
@@ -844,8 +847,7 @@ getSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, bool IsVolatile,
// The following call requires an array of addresses as arguments, but doesn't
// actually use them (it overwrites them with the addresses of the arguments
// of the created function).
- return Gen.getFunction(FuncName, QT, createNullAddressArray<N>(), Alignments,
- CGM);
+ return Gen.getFunction(FuncName, QT, Alignments, CGM);
}
// Functions to emit calls to the special functions of a non-trivial C struct.
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index 90fca2836d99..cd2b84f5dd20 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -1491,11 +1491,10 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
argLoad.getType()))
finalArg = &argCast;
-
- BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
- ivarRef.getType(), VK_RValue, OK_Ordinary,
- SourceLocation(), FPOptions());
- EmitStmt(&assign);
+ BinaryOperator *assign = BinaryOperator::Create(
+ getContext(), &ivarRef, finalArg, BO_Assign, ivarRef.getType(), VK_RValue,
+ OK_Ordinary, SourceLocation(), FPOptionsOverride());
+ EmitStmt(assign);
}
/// Generate an Objective-C property setter function.
@@ -1837,6 +1836,40 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Value *CurrentItem =
Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
+ if (SanOpts.has(SanitizerKind::ObjCCast)) {
+ // Before using an item from the collection, check that the implicit cast
+ // from id to the element type is valid. This is done with instrumentation
+ // roughly corresponding to:
+ //
+ // if (![item isKindOfClass:expectedCls]) { /* emit diagnostic */ }
+ const ObjCObjectPointerType *ObjPtrTy =
+ elementType->getAsObjCInterfacePointerType();
+ const ObjCInterfaceType *InterfaceTy =
+ ObjPtrTy ? ObjPtrTy->getInterfaceType() : nullptr;
+ if (InterfaceTy) {
+ SanitizerScope SanScope(this);
+ auto &C = CGM.getContext();
+ assert(InterfaceTy->getDecl() && "No decl for ObjC interface type");
+ Selector IsKindOfClassSel = GetUnarySelector("isKindOfClass", C);
+ CallArgList IsKindOfClassArgs;
+ llvm::Value *Cls =
+ CGM.getObjCRuntime().GetClass(*this, InterfaceTy->getDecl());
+ IsKindOfClassArgs.add(RValue::get(Cls), C.getObjCClassType());
+ llvm::Value *IsClass =
+ CGM.getObjCRuntime()
+ .GenerateMessageSend(*this, ReturnValueSlot(), C.BoolTy,
+ IsKindOfClassSel, CurrentItem,
+ IsKindOfClassArgs)
+ .getScalarVal();
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(S.getBeginLoc()),
+ EmitCheckTypeDescriptor(QualType(InterfaceTy, 0))};
+ EmitCheck({{IsClass, SanitizerKind::ObjCCast}},
+ SanitizerHandler::InvalidObjCCast,
+ ArrayRef<llvm::Constant *>(StaticData), CurrentItem);
+ }
+ }
+
// Cast that value to the right type.
CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
"currentitem");
@@ -2160,7 +2193,8 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
if (!mandatory && isa<llvm::Instruction>(result)) {
llvm::CallInst *call
= cast<llvm::CallInst>(result->stripPointerCasts());
- assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock);
+ assert(call->getCalledOperand() ==
+ CGM.getObjCEntrypoints().objc_retainBlock);
call->setMetadata("clang.arc.copy_on_escape",
llvm::MDNode::get(Builder.getContext(), None));
@@ -3255,7 +3289,6 @@ static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
// The retain needs to happen within the full-expression.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
- enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return EmitARCRetainScalarExpr(cleanups->getSubExpr());
}
@@ -3271,7 +3304,6 @@ llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
// The retain needs to happen within the full-expression.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
- enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr());
}
@@ -3382,7 +3414,6 @@ static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF,
llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) {
// Look through full-expressions.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
- enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr());
}
@@ -3505,7 +3536,7 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
if (!Ty->isRecordType())
return nullptr;
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
- if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
return nullptr;
llvm::Constant *HelperFn = nullptr;
if (hasTrivialSetExpr(PID))
@@ -3555,21 +3586,21 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
StartFunction(FD, ReturnTy, Fn, FI, args);
- DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue,
- SourceLocation());
- UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
- VK_LValue, OK_Ordinary, SourceLocation(), false);
+ DeclRefExpr DstExpr(C, &DstDecl, false, DestTy, VK_RValue, SourceLocation());
+ UnaryOperator *DST = UnaryOperator::Create(
+ C, &DstExpr, UO_Deref, DestTy->getPointeeType(), VK_LValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
- DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
- SourceLocation());
- UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
- VK_LValue, OK_Ordinary, SourceLocation(), false);
+ DeclRefExpr SrcExpr(C, &SrcDecl, false, SrcTy, VK_RValue, SourceLocation());
+ UnaryOperator *SRC = UnaryOperator::Create(
+ C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
- Expr *Args[2] = { &DST, &SRC };
+ Expr *Args[2] = {DST, SRC};
CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(),
- VK_LValue, SourceLocation(), FPOptions());
+ VK_LValue, SourceLocation(), FPOptionsOverride());
EmitStmt(TheCall);
@@ -3589,7 +3620,7 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
QualType Ty = PD->getType();
if (!Ty->isRecordType())
return nullptr;
- if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
return nullptr;
llvm::Constant *HelperFn = nullptr;
if (hasTrivialGetExpr(PID))
@@ -3641,14 +3672,15 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
SourceLocation());
- UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
- VK_LValue, OK_Ordinary, SourceLocation(), false);
+ UnaryOperator *SRC = UnaryOperator::Create(
+ C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
CXXConstructExpr *CXXConstExpr =
cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
SmallVector<Expr*, 4> ConstructorArgs;
- ConstructorArgs.push_back(&SRC);
+ ConstructorArgs.push_back(SRC);
ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()),
CXXConstExpr->arg_end());
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
index a27b6d4ed637..bb9c494ae68e 100644
--- a/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -203,7 +203,8 @@ protected:
/// the start of the string. The result of this function can be used anywhere
/// where the C code specifies const char*.
llvm::Constant *MakeConstantString(StringRef Str, const char *Name = "") {
- ConstantAddress Array = CGM.GetAddrOfConstantCString(Str, Name);
+ ConstantAddress Array =
+ CGM.GetAddrOfConstantCString(std::string(Str), Name);
return llvm::ConstantExpr::getGetElementPtr(Array.getElementType(),
Array.getPointer(), Zeros);
}
@@ -254,11 +255,11 @@ protected:
isDynamic=true) {
int attrs = property->getPropertyAttributes();
// For read-only properties, clear the copy and retain flags
- if (attrs & ObjCPropertyDecl::OBJC_PR_readonly) {
- attrs &= ~ObjCPropertyDecl::OBJC_PR_copy;
- attrs &= ~ObjCPropertyDecl::OBJC_PR_retain;
- attrs &= ~ObjCPropertyDecl::OBJC_PR_weak;
- attrs &= ~ObjCPropertyDecl::OBJC_PR_strong;
+ if (attrs & ObjCPropertyAttribute::kind_readonly) {
+ attrs &= ~ObjCPropertyAttribute::kind_copy;
+ attrs &= ~ObjCPropertyAttribute::kind_retain;
+ attrs &= ~ObjCPropertyAttribute::kind_weak;
+ attrs &= ~ObjCPropertyAttribute::kind_strong;
}
// The first flags field has the same attribute values as clang uses internally
Fields.addInt(Int8Ty, attrs & 0xff);
@@ -616,6 +617,13 @@ public:
llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
const ObjCProtocolDecl *PD) override;
void GenerateProtocol(const ObjCProtocolDecl *PD) override;
+
+ virtual llvm::Constant *GenerateProtocolRef(const ObjCProtocolDecl *PD);
+
+ llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) override {
+ return GenerateProtocolRef(PD);
+ }
+
llvm::Function *ModuleInitFunction() override;
llvm::FunctionCallee GetPropertyGetFunction() override;
llvm::FunctionCallee GetPropertySetFunction() override;
@@ -820,7 +828,7 @@ class CGObjCGNUstep : public CGObjCGNU {
// Slot_t objc_slot_lookup_super(struct objc_super*, SEL);
SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
PtrToObjCSuperTy, SelectorTy);
- // If we're in ObjC++ mode, then we want to make
+ // If we're in ObjC++ mode, then we want to make
if (usesSEHExceptions) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void objc_exception_rethrow(void)
@@ -1347,7 +1355,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
void GenerateProtocol(const ObjCProtocolDecl *PD) override {
// Do nothing - we only emit referenced protocols.
}
- llvm::Constant *GenerateProtocolRef(const ObjCProtocolDecl *PD) {
+ llvm::Constant *GenerateProtocolRef(const ObjCProtocolDecl *PD) override {
std::string ProtocolName = PD->getNameAsString();
auto *&Protocol = ExistingProtocols[ProtocolName];
if (Protocol)
@@ -1433,7 +1441,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Constant *GetTypeString(llvm::StringRef TypeEncoding) {
if (TypeEncoding.empty())
return NULLPtr;
- std::string MangledTypes = TypeEncoding;
+ std::string MangledTypes = std::string(TypeEncoding);
std::replace(MangledTypes.begin(), MangledTypes.end(),
'@', '\1');
std::string TypesVarName = ".objc_sel_types_" + MangledTypes;
@@ -1556,7 +1564,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// We have to do this by hand, rather than with @llvm.ctors, so that the
// linker can remove the duplicate invocations.
auto *InitVar = new llvm::GlobalVariable(TheModule, LoadFunction->getType(),
- /*isConstant*/true, llvm::GlobalValue::LinkOnceAnyLinkage,
+ /*isConstant*/false, llvm::GlobalValue::LinkOnceAnyLinkage,
LoadFunction, ".objc_ctor");
// Check that this hasn't been renamed. This shouldn't happen, because
// this function should be called precisely once.
@@ -1647,14 +1655,16 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
for (const auto &lateInit : EarlyInitList) {
auto *global = TheModule.getGlobalVariable(lateInit.first);
if (global) {
- b.CreateAlignedStore(global,
- b.CreateStructGEP(lateInit.second.first, lateInit.second.second), CGM.getPointerAlign().getQuantity());
+ b.CreateAlignedStore(
+ global,
+ b.CreateStructGEP(lateInit.second.first, lateInit.second.second),
+ CGM.getPointerAlign().getAsAlign());
}
}
b.CreateRetVoid();
// We can't use the normal LLVM global initialisation array, because we
// need to specify that this runs early in library initialisation.
- auto *InitVar = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ auto *InitVar = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
/*isConstant*/true, llvm::GlobalValue::InternalLinkage,
Init, ".objc_early_init_ptr");
InitVar->setSection(".CRT$XCLb");
@@ -1943,7 +1953,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
if (SuperClass) {
std::pair<llvm::Constant*, int> v{classStruct, 1};
- EarlyInitList.emplace_back(SuperClass->getName(), std::move(v));
+ EarlyInitList.emplace_back(std::string(SuperClass->getName()),
+ std::move(v));
}
}
@@ -2410,7 +2421,8 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
assert(PT && "Invalid @catch type.");
const ObjCInterfaceType *IT = PT->getInterfaceType();
assert(IT && "Invalid @catch type.");
- std::string className = IT->getDecl()->getIdentifier()->getName();
+ std::string className =
+ std::string(IT->getDecl()->getIdentifier()->getName());
std::string typeinfoName = "__objc_eh_typeinfo_" + className;
@@ -3034,13 +3046,18 @@ CGObjCGNU::GenerateProtocolList(ArrayRef<std::string> Protocols) {
llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
const ObjCProtocolDecl *PD) {
+ auto protocol = GenerateProtocolRef(PD);
+ llvm::Type *T =
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+ return CGF.Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateProtocolRef(const ObjCProtocolDecl *PD) {
llvm::Constant *&protocol = ExistingProtocols[PD->getNameAsString()];
if (!protocol)
GenerateProtocol(PD);
assert(protocol && "Unknown protocol");
- llvm::Type *T =
- CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
- return CGF.Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+ return protocol;
}
llvm::Constant *
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index f36c28a85a68..1d0379afb4b5 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -1107,11 +1107,6 @@ public:
void GenerateProtocol(const ObjCProtocolDecl *PD) override;
- /// GetOrEmitProtocol - Get the protocol object for the given
- /// declaration, emitting it if necessary. The return value has type
- /// ProtocolPtrTy.
- virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
-
/// GetOrEmitProtocolRef - Get a forward reference to the protocol
/// object for the given declaration, emitting it if needed. These
/// forward references will be filled in with empty bodies if no
@@ -2035,7 +2030,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// Don't enforce the target's minimum global alignment, since the only use
// of the string is via this class initializer.
- GV->setAlignment(llvm::Align::None());
+ GV->setAlignment(llvm::Align(1));
Fields.addBitCast(GV, CGM.Int8PtrTy);
// String length.
@@ -2558,9 +2553,8 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
}
if (FQT->isRecordType() && ElCount) {
int OldIndex = RunSkipBlockVars.size() - 1;
- const RecordType *RT = FQT->getAs<RecordType>();
- BuildRCBlockVarRecordLayout(RT, BytePos + FieldOffset,
- HasUnion);
+ auto *RT = FQT->castAs<RecordType>();
+ BuildRCBlockVarRecordLayout(RT, BytePos + FieldOffset, HasUnion);
// Replicate layout information for each array element. Note that
// one element is already done.
@@ -3047,9 +3041,10 @@ llvm::Value *CGObjCCommonMac::EmitClassRefViaRuntime(
ObjCCommonTypesHelper &ObjCTypes) {
llvm::FunctionCallee lookUpClassFn = ObjCTypes.getLookUpClassFn();
- llvm::Value *className =
- CGF.CGM.GetAddrOfConstantCString(ID->getObjCRuntimeNameAsString())
- .getPointer();
+ llvm::Value *className = CGF.CGM
+ .GetAddrOfConstantCString(std::string(
+ ID->getObjCRuntimeNameAsString()))
+ .getPointer();
ASTContext &ctx = CGF.CGM.getContext();
className =
CGF.Builder.CreateBitCast(className,
@@ -3291,6 +3286,8 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
for (auto *PD : ClassExt->properties()) {
if (IsClassProperty != PD->isClassProperty())
continue;
+ if (PD->isDirectProperty())
+ continue;
PropertySet.insert(PD->getIdentifier());
Properties.push_back(PD);
}
@@ -3302,6 +3299,8 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
// class extension.
if (!PropertySet.insert(PD->getIdentifier()).second)
continue;
+ if (PD->isDirectProperty())
+ continue;
Properties.push_back(PD);
}
@@ -3327,8 +3326,6 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
values.addInt(ObjCTypes.IntTy, Properties.size());
auto propertiesArray = values.beginArray(ObjCTypes.PropertyTy);
for (auto PD : Properties) {
- if (PD->isDirectProperty())
- continue;
auto property = propertiesArray.beginStruct(ObjCTypes.PropertyTy);
property.add(GetPropertyName(PD->getIdentifier()));
property.add(GetPropertyTypeString(PD, Container));
@@ -3637,7 +3634,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
// Check for a forward reference.
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
if (GV) {
- assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ assert(GV->getValueType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
values.finishAndSetAsInitializer(GV);
GV->setSection(Section);
@@ -3700,7 +3697,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
// Check for a forward reference.
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
if (GV) {
- assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ assert(GV->getValueType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
values.finishAndSetAsInitializer(GV);
} else {
@@ -3731,7 +3728,7 @@ llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
llvm::GlobalValue::PrivateLinkage, nullptr,
Name);
- assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ assert(GV->getValueType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
return GV;
}
@@ -3745,7 +3742,7 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
llvm::GlobalValue::PrivateLinkage, nullptr,
Name);
- assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ assert(GV->getValueType() == ObjCTypes.ClassTy &&
"Forward class metadata reference has incorrect type.");
return GV;
}
@@ -4029,22 +4026,49 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
llvm::Function *
CGObjCCommonMac::GenerateDirectMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
- auto I = DirectMethodDefinitions.find(OMD->getCanonicalDecl());
- if (I != DirectMethodDefinitions.end())
- return I->second;
+ auto *COMD = OMD->getCanonicalDecl();
+ auto I = DirectMethodDefinitions.find(COMD);
+ llvm::Function *OldFn = nullptr, *Fn = nullptr;
- SmallString<256> Name;
- GetNameForMethod(OMD, CD, Name, /*ignoreCategoryNamespace*/true);
+ if (I != DirectMethodDefinitions.end()) {
+ // Objective-C allows for the declaration and implementation types
+ // to differ slightly.
+ //
+ // If we're being asked for the Function associated for a method
+ // implementation, a previous value might have been cached
+ // based on the type of the canonical declaration.
+ //
+ // If these do not match, then we'll replace this function with
+ // a new one that has the proper type below.
+ if (!OMD->getBody() || COMD->getReturnType() == OMD->getReturnType())
+ return I->second;
+ OldFn = I->second;
+ }
CodeGenTypes &Types = CGM.getTypes();
llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
- llvm::Function *Method =
- llvm::Function::Create(MethodTy, llvm::GlobalValue::ExternalLinkage,
- Name.str(), &CGM.getModule());
- DirectMethodDefinitions.insert(std::make_pair(OMD->getCanonicalDecl(), Method));
- return Method;
+ if (OldFn) {
+ Fn = llvm::Function::Create(MethodTy, llvm::GlobalValue::ExternalLinkage,
+ "", &CGM.getModule());
+ Fn->takeName(OldFn);
+ OldFn->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(Fn, OldFn->getType()));
+ OldFn->eraseFromParent();
+
+ // Replace the cached function in the map.
+ I->second = Fn;
+ } else {
+ SmallString<256> Name;
+ GetNameForMethod(OMD, CD, Name, /*ignoreCategoryNamespace*/ true);
+
+ Fn = llvm::Function::Create(MethodTy, llvm::GlobalValue::ExternalLinkage,
+ Name.str(), &CGM.getModule());
+ DirectMethodDefinitions.insert(std::make_pair(COMD, Fn));
+ }
+
+ return Fn;
}
void CGObjCCommonMac::GenerateDirectMethodPrologue(
@@ -4195,7 +4219,8 @@ CGObjCCommonMac::CreateCStringLiteral(StringRef Name, ObjCLabelType Type,
: "__TEXT,__cstring,cstring_literals";
break;
case ObjCLabelType::PropertyName:
- Section = "__TEXT,__cstring,cstring_literals";
+ Section = NonFragile ? "__TEXT,__objc_methname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals";
break;
}
@@ -5128,15 +5153,18 @@ void CGObjCCommonMac::EmitImageInfo() {
Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Section",
llvm::MDString::get(VMContext, Section));
+ auto Int8Ty = llvm::Type::getInt8Ty(VMContext);
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
// Non-GC overrides those files which specify GC.
- Mod.addModuleFlag(llvm::Module::Override,
- "Objective-C Garbage Collection", (uint32_t)0);
+ Mod.addModuleFlag(llvm::Module::Error,
+ "Objective-C Garbage Collection",
+ llvm::ConstantInt::get(Int8Ty,0));
} else {
// Add the ObjC garbage collection value.
Mod.addModuleFlag(llvm::Module::Error,
"Objective-C Garbage Collection",
- eImageInfo_GarbageCollected);
+ llvm::ConstantInt::get(Int8Ty,
+ (uint8_t)eImageInfo_GarbageCollected));
if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
// Add the ObjC GC Only value.
@@ -5147,7 +5175,7 @@ void CGObjCCommonMac::EmitImageInfo() {
llvm::Metadata *Ops[2] = {
llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), eImageInfo_GarbageCollected))};
+ Int8Ty, eImageInfo_GarbageCollected))};
Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only",
llvm::MDNode::get(VMContext, Ops));
}
@@ -5423,7 +5451,7 @@ llvm::Constant *IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC,
// This isn't a stable sort, but our algorithm should handle it fine.
llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
} else {
- assert(std::is_sorted(IvarsInfo.begin(), IvarsInfo.end()));
+ assert(llvm::is_sorted(IvarsInfo));
}
assert(IvarsInfo.back().Offset < InstanceEnd);
@@ -6217,11 +6245,9 @@ void CGObjCNonFragileABIMac::AddModuleClassList(
assert((!CGM.getTriple().isOSBinFormatMachO() ||
SectionName.startswith("__DATA")) &&
"SectionName expected to start with __DATA on MachO");
- llvm::GlobalValue::LinkageTypes LT =
- getLinkageTypeForObjCMetadata(CGM, SectionName);
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false, LT, Init,
- SymbolName);
+ llvm::GlobalVariable *GV = new llvm::GlobalVariable(
+ CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::PrivateLinkage, Init, SymbolName);
GV->setAlignment(
llvm::Align(CGM.getDataLayout().getABITypeAlignment(Init->getType())));
GV->setSection(SectionName);
@@ -6350,7 +6376,7 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
unsigned InstanceStart,
unsigned InstanceSize,
const ObjCImplementationDecl *ID) {
- std::string ClassName = ID->getObjCRuntimeNameAsString();
+ std::string ClassName = std::string(ID->getObjCRuntimeNameAsString());
CharUnits beginInstance = CharUnits::fromQuantity(InstanceStart);
CharUnits endInstance = CharUnits::fromQuantity(InstanceSize);
@@ -7509,10 +7535,9 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
llvm::Constant *ClassGV = GetClassGlobalForClassRef(ID);
std::string SectionName =
GetSectionName("__objc_superrefs", "regular,no_dead_strip");
- Entry = new llvm::GlobalVariable(
- CGM.getModule(), ClassGV->getType(), false,
- getLinkageTypeForObjCMetadata(CGM, SectionName), ClassGV,
- "OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ClassGV->getType(), false,
+ llvm::GlobalValue::PrivateLinkage, ClassGV,
+ "OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(CGF.getPointerAlign().getAsAlign());
Entry->setSection(SectionName);
CGM.addCompilerUsedGlobal(Entry);
@@ -7533,10 +7558,9 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
auto MetaClassGV = GetClassGlobal(ID, /*metaclass*/ true, NotForDefinition);
std::string SectionName =
GetSectionName("__objc_superrefs", "regular,no_dead_strip");
- Entry = new llvm::GlobalVariable(
- CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false,
- getLinkageTypeForObjCMetadata(CGM, SectionName), MetaClassGV,
- "OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ MetaClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(Align.getAsAlign());
Entry->setSection(SectionName);
CGM.addCompilerUsedGlobal(Entry);
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
index f8b831d0e9be..39efe040302d 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -13,14 +13,15 @@
//===----------------------------------------------------------------------===//
#include "CGObjCRuntime.h"
-#include "CGCleanup.h"
#include "CGCXXABI.h"
+#include "CGCleanup.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/CodeGen/CodeGenABITypes.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -211,7 +212,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CGF.pushSEHCleanup(NormalAndEHCleanup, FinallyFunc);
}
-
+
// Emit the try body.
CGF.EmitStmt(S.getTryBody());
@@ -271,7 +272,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
cleanups.ForceCleanup();
CGF.EmitBranchThroughCleanup(Cont);
- }
+ }
// Go back to the try-statement fallthrough.
CGF.Builder.restoreIP(SavedIP);
@@ -383,3 +384,9 @@ CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
return MessageSendInfo(argsInfo, signatureType);
}
+
+llvm::Constant *
+clang::CodeGen::emitObjCProtocolObject(CodeGenModule &CGM,
+ const ObjCProtocolDecl *protocol) {
+ return CGM.getObjCRuntime().GetOrEmitProtocol(protocol);
+}
diff --git a/clang/lib/CodeGen/CGObjCRuntime.h b/clang/lib/CodeGen/CGObjCRuntime.h
index f0b3525cfde2..a2c189585f7b 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/clang/lib/CodeGen/CGObjCRuntime.h
@@ -211,6 +211,11 @@ public:
/// implementations.
virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) = 0;
+
/// Generate a function preamble for a method with the specified
/// types.
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 97b17799a03e..43cbe9c720ea 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -21,17 +21,24 @@
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/BitmaskEnum.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
+#include <numeric>
using namespace clang;
using namespace CodeGen;
@@ -562,205 +569,6 @@ enum OpenMPSchedType {
OMP_sch_modifier_nonmonotonic = (1 << 30),
};
-enum OpenMPRTLFunction {
- /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
- /// kmpc_micro microtask, ...);
- OMPRTL__kmpc_fork_call,
- /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
- /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
- OMPRTL__kmpc_threadprivate_cached,
- /// Call to void __kmpc_threadprivate_register( ident_t *,
- /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
- OMPRTL__kmpc_threadprivate_register,
- // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
- OMPRTL__kmpc_global_thread_num,
- // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit);
- OMPRTL__kmpc_critical,
- // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
- // global_tid, kmp_critical_name *crit, uintptr_t hint);
- OMPRTL__kmpc_critical_with_hint,
- // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit);
- OMPRTL__kmpc_end_critical,
- // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
- // global_tid);
- OMPRTL__kmpc_cancel_barrier,
- // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_barrier,
- // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_for_static_fini,
- // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- OMPRTL__kmpc_serialized_parallel,
- // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- OMPRTL__kmpc_end_serialized_parallel,
- // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_threads);
- OMPRTL__kmpc_push_num_threads,
- // Call to void __kmpc_flush(ident_t *loc);
- OMPRTL__kmpc_flush,
- // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
- OMPRTL__kmpc_master,
- // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
- OMPRTL__kmpc_end_master,
- // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
- // int end_part);
- OMPRTL__kmpc_omp_taskyield,
- // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
- OMPRTL__kmpc_single,
- // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
- OMPRTL__kmpc_end_single,
- // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
- // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
- // kmp_routine_entry_t *task_entry);
- OMPRTL__kmpc_omp_task_alloc,
- // Call to kmp_task_t * __kmpc_omp_target_task_alloc(ident_t *,
- // kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t,
- // size_t sizeof_shareds, kmp_routine_entry_t *task_entry,
- // kmp_int64 device_id);
- OMPRTL__kmpc_omp_target_task_alloc,
- // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
- // new_task);
- OMPRTL__kmpc_omp_task,
- // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
- // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
- // kmp_int32 didit);
- OMPRTL__kmpc_copyprivate,
- // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
- // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
- OMPRTL__kmpc_reduce,
- // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
- // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
- // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
- // *lck);
- OMPRTL__kmpc_reduce_nowait,
- // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *lck);
- OMPRTL__kmpc_end_reduce,
- // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *lck);
- OMPRTL__kmpc_end_reduce_nowait,
- // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
- // kmp_task_t * new_task);
- OMPRTL__kmpc_omp_task_begin_if0,
- // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
- // kmp_task_t * new_task);
- OMPRTL__kmpc_omp_task_complete_if0,
- // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_ordered,
- // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_end_ordered,
- // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
- // global_tid);
- OMPRTL__kmpc_omp_taskwait,
- // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_taskgroup,
- // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
- OMPRTL__kmpc_end_taskgroup,
- // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
- // int proc_bind);
- OMPRTL__kmpc_push_proc_bind,
- // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
- // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
- // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
- OMPRTL__kmpc_omp_task_with_deps,
- // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
- // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
- // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
- OMPRTL__kmpc_omp_wait_deps,
- // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
- // global_tid, kmp_int32 cncl_kind);
- OMPRTL__kmpc_cancellationpoint,
- // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 cncl_kind);
- OMPRTL__kmpc_cancel,
- // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_teams, kmp_int32 thread_limit);
- OMPRTL__kmpc_push_num_teams,
- // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
- // microtask, ...);
- OMPRTL__kmpc_fork_teams,
- // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
- // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
- // sched, kmp_uint64 grainsize, void *task_dup);
- OMPRTL__kmpc_taskloop,
- // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
- // num_dims, struct kmp_dim *dims);
- OMPRTL__kmpc_doacross_init,
- // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
- OMPRTL__kmpc_doacross_fini,
- // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
- // *vec);
- OMPRTL__kmpc_doacross_post,
- // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
- // *vec);
- OMPRTL__kmpc_doacross_wait,
- // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
- // *data);
- OMPRTL__kmpc_task_reduction_init,
- // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
- // *d);
- OMPRTL__kmpc_task_reduction_get_th_data,
- // Call to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al);
- OMPRTL__kmpc_alloc,
- // Call to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al);
- OMPRTL__kmpc_free,
-
- //
- // Offloading related calls
- //
- // Call to void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
- // size);
- OMPRTL__kmpc_push_target_tripcount,
- // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target,
- // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target_nowait,
- // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types, int32_t num_teams, int32_t thread_limit);
- OMPRTL__tgt_target_teams,
- // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
- // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
- // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
- OMPRTL__tgt_target_teams_nowait,
- // Call to void __tgt_register_requires(int64_t flags);
- OMPRTL__tgt_register_requires,
- // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- OMPRTL__tgt_target_data_begin,
- // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target_data_begin_nowait,
- // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
- OMPRTL__tgt_target_data_end,
- // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target_data_end_nowait,
- // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- OMPRTL__tgt_target_data_update,
- // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- OMPRTL__tgt_target_data_update_nowait,
- // Call to int64_t __tgt_mapper_num_components(void *rt_mapper_handle);
- OMPRTL__tgt_mapper_num_components,
- // Call to void __tgt_push_mapper_component(void *rt_mapper_handle, void
- // *base, void *begin, int64_t size, int64_t type);
- OMPRTL__tgt_push_mapper_component,
-};
-
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class CleanupTy final : public EHScopeStack::Cleanup {
@@ -971,27 +779,37 @@ void ReductionCodeGen::emitAggregateInitialization(
}
ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
+ ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps) {
ClausesData.reserve(Shareds.size());
SharedAddresses.reserve(Shareds.size());
Sizes.reserve(Shareds.size());
BaseDecls.reserve(Shareds.size());
- auto IPriv = Privates.begin();
- auto IRed = ReductionOps.begin();
+ const auto *IOrig = Origs.begin();
+ const auto *IPriv = Privates.begin();
+ const auto *IRed = ReductionOps.begin();
for (const Expr *Ref : Shareds) {
- ClausesData.emplace_back(Ref, *IPriv, *IRed);
+ ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
+ std::advance(IOrig, 1);
std::advance(IPriv, 1);
std::advance(IRed, 1);
}
}
-void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
- assert(SharedAddresses.size() == N &&
+void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
+ assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&
"Number of generated lvalues must be exactly N.");
- LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
- LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
+ LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
+ LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
SharedAddresses.emplace_back(First, Second);
+ if (ClausesData[N].Shared == ClausesData[N].Ref) {
+ OrigAddresses.emplace_back(First, Second);
+ } else {
+ LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
+ LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
+ OrigAddresses.emplace_back(First, Second);
+ }
}
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
@@ -1001,26 +819,25 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
if (!PrivateType->isVariablyModifiedType()) {
Sizes.emplace_back(
- CGF.getTypeSize(
- SharedAddresses[N].first.getType().getNonReferenceType()),
+ CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
nullptr);
return;
}
llvm::Value *Size;
llvm::Value *SizeInChars;
- auto *ElemType = cast<llvm::PointerType>(
- SharedAddresses[N].first.getPointer(CGF)->getType())
- ->getElementType();
+ auto *ElemType =
+ cast<llvm::PointerType>(OrigAddresses[N].first.getPointer(CGF)->getType())
+ ->getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
if (AsArraySection) {
- Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(CGF),
- SharedAddresses[N].first.getPointer(CGF));
+ Size = CGF.Builder.CreatePtrDiff(OrigAddresses[N].second.getPointer(CGF),
+ OrigAddresses[N].first.getPointer(CGF));
Size = CGF.Builder.CreateNUWAdd(
Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
} else {
- SizeInChars = CGF.getTypeSize(
- SharedAddresses[N].first.getType().getNonReferenceType());
+ SizeInChars =
+ CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
}
Sizes.emplace_back(SizeInChars, Size);
@@ -1243,7 +1060,7 @@ static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator)
: CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
- OffloadEntriesInfoManager(CGM) {
+ OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) {
ASTContext &C = CGM.getContext();
RecordDecl *RD = C.buildImplicitRecord("ident_t");
QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
@@ -1263,55 +1080,11 @@ CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
+ // Initialize Types used in OpenMPIRBuilder from OMPKinds.def
+ OMPBuilder.initialize();
loadOffloadInfoMetadata();
}
-bool CGOpenMPRuntime::tryEmitDeclareVariant(const GlobalDecl &NewGD,
- const GlobalDecl &OldGD,
- llvm::GlobalValue *OrigAddr,
- bool IsForDefinition) {
- // Emit at least a definition for the aliasee if the the address of the
- // original function is requested.
- if (IsForDefinition || OrigAddr)
- (void)CGM.GetAddrOfGlobal(NewGD);
- StringRef NewMangledName = CGM.getMangledName(NewGD);
- llvm::GlobalValue *Addr = CGM.GetGlobalValue(NewMangledName);
- if (Addr && !Addr->isDeclaration()) {
- const auto *D = cast<FunctionDecl>(OldGD.getDecl());
- const CGFunctionInfo &FI = CGM.getTypes().arrangeGlobalDeclaration(NewGD);
- llvm::Type *DeclTy = CGM.getTypes().GetFunctionType(FI);
-
- // Create a reference to the named value. This ensures that it is emitted
- // if a deferred decl.
- llvm::GlobalValue::LinkageTypes LT = CGM.getFunctionLinkage(OldGD);
-
- // Create the new alias itself, but don't set a name yet.
- auto *GA =
- llvm::GlobalAlias::create(DeclTy, 0, LT, "", Addr, &CGM.getModule());
-
- if (OrigAddr) {
- assert(OrigAddr->isDeclaration() && "Expected declaration");
-
- GA->takeName(OrigAddr);
- OrigAddr->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(GA, OrigAddr->getType()));
- OrigAddr->eraseFromParent();
- } else {
- GA->setName(CGM.getMangledName(OldGD));
- }
-
- // Set attributes which are particular to an alias; this is a
- // specialization of the attributes which may be set on a global function.
- if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
- D->isWeakImported())
- GA->setLinkage(llvm::Function::WeakAnyLinkage);
-
- CGM.SetCommonAttributes(OldGD, GA);
- return true;
- }
- return false;
-}
-
void CGOpenMPRuntime::clear() {
InternalVars.clear();
// Clean non-target variable declarations possibly used only in debug info.
@@ -1325,14 +1098,6 @@ void CGOpenMPRuntime::clear() {
continue;
GV->eraseFromParent();
}
- // Emit aliases for the deferred aliasees.
- for (const auto &Pair : DeferredVariantFunction) {
- StringRef MangledName = CGM.getMangledName(Pair.second.second);
- llvm::GlobalValue *Addr = CGM.GetGlobalValue(MangledName);
- // If not able to emit alias, just emit original declaration.
- (void)tryEmitDeclareVariant(Pair.second.first, Pair.second.second, Addr,
- /*IsForDefinition=*/false);
- }
}
std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
@@ -1343,7 +1108,7 @@ std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
OS << Sep << Part;
Sep = Separator;
}
- return OS.str();
+ return std::string(OS.str());
}
static llvm::Function *
@@ -1494,6 +1259,8 @@ static llvm::Function *emitParallelOrTeamsOutlinedFunction(
bool HasCancel = false;
if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
HasCancel = OPD->hasCancel();
+ else if (const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
+ HasCancel = OPD->hasCancel();
else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
HasCancel = OPSD->hasCancel();
else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
@@ -1511,12 +1278,12 @@ static llvm::Function *emitParallelOrTeamsOutlinedFunction(
// TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new
// parallel region to make cancellation barriers work properly.
- llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder();
- PushAndPopStackRAII PSR(OMPBuilder, CGF, HasCancel);
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel);
CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
HasCancel, OutlinedHelperName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
+ return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
}
llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
@@ -1549,7 +1316,9 @@ llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
TaskTVar->getType()->castAs<PointerType>())
.getPointer(CGF)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_task),
+ TaskArgs);
};
CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
UntiedCodeGen);
@@ -1560,11 +1329,19 @@ llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
: OMPD_task;
const CapturedStmt *CS = D.getCapturedStmt(Region);
- const auto *TD = dyn_cast<OMPTaskDirective>(&D);
+ bool HasCancel = false;
+ if (const auto *TD = dyn_cast<OMPTaskDirective>(&D))
+ HasCancel = TD->hasCancel();
+ else if (const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
+ HasCancel = TD->hasCancel();
+ else if (const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
+ HasCancel = TD->hasCancel();
+ else if (const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
+ HasCancel = TD->hasCancel();
+
CodeGenFunction CGF(CGM, true);
CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
- InnermostKind,
- TD ? TD->hasCancel() : false, Action);
+ InnermostKind, HasCancel, Action);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
if (!Tied)
@@ -1786,7 +1563,8 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
llvm::CallInst *Call = CGF.Builder.CreateCall(
- createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_global_thread_num),
emitUpdateLocation(CGF, Loc));
Call->setCallingConv(CGF.getRuntimeCC());
Elem.second.ThreadID = Call;
@@ -1800,16 +1578,17 @@ void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
OpenMPLocThreadIDMap.erase(CGF.CurFn);
}
if (FunctionUDRMap.count(CGF.CurFn) > 0) {
- for(auto *D : FunctionUDRMap[CGF.CurFn])
+ for(const auto *D : FunctionUDRMap[CGF.CurFn])
UDRMap.erase(D);
FunctionUDRMap.erase(CGF.CurFn);
}
auto I = FunctionUDMMap.find(CGF.CurFn);
if (I != FunctionUDMMap.end()) {
- for(auto *D : I->second)
+ for(const auto *D : I->second)
UDMMap.erase(D);
FunctionUDMMap.erase(I);
}
+ LastprivateConditionalToTypes.erase(CGF.CurFn);
}
llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
@@ -1826,766 +1605,6 @@ llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
return llvm::PointerType::getUnqual(Kmpc_MicroTy);
}
-llvm::FunctionCallee CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
- llvm::FunctionCallee RTLFn = nullptr;
- switch (static_cast<OpenMPRTLFunction>(Function)) {
- case OMPRTL__kmpc_fork_call: {
- // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
- // microtask, ...);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- getKmpc_MicroPointerTy()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
- if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
- if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
- llvm::LLVMContext &Ctx = F->getContext();
- llvm::MDBuilder MDB(Ctx);
- // Annotate the callback behavior of the __kmpc_fork_call:
- // - The callback callee is argument number 2 (microtask).
- // - The first two arguments of the callback callee are unknown (-1).
- // - All variadic arguments to the __kmpc_fork_call are passed to the
- // callback callee.
- F->addMetadata(
- llvm::LLVMContext::MD_callback,
- *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
- 2, {-1, -1},
- /* VarArgsArePassed */ true)}));
- }
- }
- break;
- }
- case OMPRTL__kmpc_global_thread_num: {
- // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
- break;
- }
- case OMPRTL__kmpc_threadprivate_cached: {
- // Build void *__kmpc_threadprivate_cached(ident_t *loc,
- // kmp_int32 global_tid, void *data, size_t size, void ***cache);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.VoidPtrTy, CGM.SizeTy,
- CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
- break;
- }
- case OMPRTL__kmpc_critical: {
- // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
- break;
- }
- case OMPRTL__kmpc_critical_with_hint: {
- // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit, uintptr_t hint);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy),
- CGM.IntPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
- break;
- }
- case OMPRTL__kmpc_threadprivate_register: {
- // Build void __kmpc_threadprivate_register(ident_t *, void *data,
- // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
- // typedef void *(*kmpc_ctor)(void *);
- auto *KmpcCtorTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
- /*isVarArg*/ false)->getPointerTo();
- // typedef void *(*kmpc_cctor)(void *, void *);
- llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *KmpcCopyCtorTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
- /*isVarArg*/ false)
- ->getPointerTo();
- // typedef void (*kmpc_dtor)(void *);
- auto *KmpcDtorTy =
- llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
- ->getPointerTo();
- llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
- KmpcCopyCtorTy, KmpcDtorTy};
- auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
- /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
- break;
- }
- case OMPRTL__kmpc_end_critical: {
- // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *crit);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
- break;
- }
- case OMPRTL__kmpc_cancel_barrier: {
- // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
- break;
- }
- case OMPRTL__kmpc_barrier: {
- // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
- break;
- }
- case OMPRTL__kmpc_for_static_fini: {
- // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
- break;
- }
- case OMPRTL__kmpc_push_num_threads: {
- // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_threads)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
- break;
- }
- case OMPRTL__kmpc_serialized_parallel: {
- // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
- break;
- }
- case OMPRTL__kmpc_end_serialized_parallel: {
- // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
- break;
- }
- case OMPRTL__kmpc_flush: {
- // Build void __kmpc_flush(ident_t *loc);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
- break;
- }
- case OMPRTL__kmpc_master: {
- // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
- break;
- }
- case OMPRTL__kmpc_end_master: {
- // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
- break;
- }
- case OMPRTL__kmpc_omp_taskyield: {
- // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
- // int end_part);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
- break;
- }
- case OMPRTL__kmpc_single: {
- // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
- break;
- }
- case OMPRTL__kmpc_end_single: {
- // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
- break;
- }
- case OMPRTL__kmpc_omp_task_alloc: {
- // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
- // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
- // kmp_routine_entry_t *task_entry);
- assert(KmpRoutineEntryPtrTy != nullptr &&
- "Type kmp_routine_entry_t must be created.");
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
- CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
- // Return void * and then cast to particular kmp_task_t type.
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
- break;
- }
- case OMPRTL__kmpc_omp_target_task_alloc: {
- // Build kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *, kmp_int32 gtid,
- // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
- // kmp_routine_entry_t *task_entry, kmp_int64 device_id);
- assert(KmpRoutineEntryPtrTy != nullptr &&
- "Type kmp_routine_entry_t must be created.");
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
- CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy,
- CGM.Int64Ty};
- // Return void * and then cast to particular kmp_task_t type.
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_target_task_alloc");
- break;
- }
- case OMPRTL__kmpc_omp_task: {
- // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
- // *new_task);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
- break;
- }
- case OMPRTL__kmpc_copyprivate: {
- // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
- // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
- // kmp_int32 didit);
- llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *CpyFnTy =
- llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
- CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
- break;
- }
- case OMPRTL__kmpc_reduce: {
- // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
- // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
- llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
- CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
- break;
- }
- case OMPRTL__kmpc_reduce_nowait: {
- // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
- // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
- // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
- // *lck);
- llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
- CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
- break;
- }
- case OMPRTL__kmpc_end_reduce: {
- // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *lck);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
- break;
- }
- case OMPRTL__kmpc_end_reduce_nowait: {
- // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
- // kmp_critical_name *lck);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty,
- llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
- break;
- }
- case OMPRTL__kmpc_omp_task_begin_if0: {
- // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
- // *new_task);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
- break;
- }
- case OMPRTL__kmpc_omp_task_complete_if0: {
- // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
- // *new_task);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy,
- /*Name=*/"__kmpc_omp_task_complete_if0");
- break;
- }
- case OMPRTL__kmpc_ordered: {
- // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
- break;
- }
- case OMPRTL__kmpc_end_ordered: {
- // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
- break;
- }
- case OMPRTL__kmpc_omp_taskwait: {
- // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
- break;
- }
- case OMPRTL__kmpc_taskgroup: {
- // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
- break;
- }
- case OMPRTL__kmpc_end_taskgroup: {
- // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
- break;
- }
- case OMPRTL__kmpc_push_proc_bind: {
- // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
- // int proc_bind)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
- break;
- }
- case OMPRTL__kmpc_omp_task_with_deps: {
- // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
- // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
- // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
- CGM.VoidPtrTy, CGM.Int32Ty, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
- break;
- }
- case OMPRTL__kmpc_omp_wait_deps: {
- // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
- // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
- // kmp_depend_info_t *noalias_dep_list);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.Int32Ty, CGM.VoidPtrTy,
- CGM.Int32Ty, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
- break;
- }
- case OMPRTL__kmpc_cancellationpoint: {
- // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
- // global_tid, kmp_int32 cncl_kind)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
- break;
- }
- case OMPRTL__kmpc_cancel: {
- // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
- // kmp_int32 cncl_kind)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
- break;
- }
- case OMPRTL__kmpc_push_num_teams: {
- // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
- // kmp_int32 num_teams, kmp_int32 num_threads)
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
- break;
- }
- case OMPRTL__kmpc_fork_teams: {
- // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
- // microtask, ...);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- getKmpc_MicroPointerTy()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
- if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
- if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
- llvm::LLVMContext &Ctx = F->getContext();
- llvm::MDBuilder MDB(Ctx);
- // Annotate the callback behavior of the __kmpc_fork_teams:
- // - The callback callee is argument number 2 (microtask).
- // - The first two arguments of the callback callee are unknown (-1).
- // - All variadic arguments to the __kmpc_fork_teams are passed to the
- // callback callee.
- F->addMetadata(
- llvm::LLVMContext::MD_callback,
- *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
- 2, {-1, -1},
- /* VarArgsArePassed */ true)}));
- }
- }
- break;
- }
- case OMPRTL__kmpc_taskloop: {
- // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
- // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
- // sched, kmp_uint64 grainsize, void *task_dup);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
- CGM.IntTy,
- CGM.VoidPtrTy,
- CGM.IntTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty,
- CGM.IntTy,
- CGM.IntTy,
- CGM.Int64Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
- break;
- }
- case OMPRTL__kmpc_doacross_init: {
- // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
- // num_dims, struct kmp_dim *dims);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
- CGM.Int32Ty,
- CGM.Int32Ty,
- CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
- break;
- }
- case OMPRTL__kmpc_doacross_fini: {
- // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
- break;
- }
- case OMPRTL__kmpc_doacross_post: {
- // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
- // *vec);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
- break;
- }
- case OMPRTL__kmpc_doacross_wait: {
- // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
- // *vec);
- llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
- break;
- }
- case OMPRTL__kmpc_task_reduction_init: {
- // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
- // *data);
- llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn =
- CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
- break;
- }
- case OMPRTL__kmpc_task_reduction_get_th_data: {
- // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
- // *d);
- llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
- break;
- }
- case OMPRTL__kmpc_alloc: {
- // Build to void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t
- // al); omp_allocator_handle_t type is void *.
- llvm::Type *TypeParams[] = {CGM.IntTy, CGM.SizeTy, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_alloc");
- break;
- }
- case OMPRTL__kmpc_free: {
- // Build to void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t
- // al); omp_allocator_handle_t type is void *.
- llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_free");
- break;
- }
- case OMPRTL__kmpc_push_target_tripcount: {
- // Build void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
- // size);
- llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int64Ty};
- llvm::FunctionType *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_target_tripcount");
- break;
- }
- case OMPRTL__tgt_target: {
- // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.VoidPtrTy,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
- break;
- }
- case OMPRTL__tgt_target_nowait: {
- // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
- // int64_t *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.VoidPtrTy,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
- break;
- }
- case OMPRTL__tgt_target_teams: {
- // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
- // int32_t arg_num, void** args_base, void **args, int64_t *arg_sizes,
- // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.VoidPtrTy,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo(),
- CGM.Int32Ty,
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
- break;
- }
- case OMPRTL__tgt_target_teams_nowait: {
- // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
- // *host_ptr, int32_t arg_num, void** args_base, void **args, int64_t
- // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.VoidPtrTy,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo(),
- CGM.Int32Ty,
- CGM.Int32Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
- break;
- }
- case OMPRTL__tgt_register_requires: {
- // Build void __tgt_register_requires(int64_t flags);
- llvm::Type *TypeParams[] = {CGM.Int64Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_requires");
- break;
- }
- case OMPRTL__tgt_target_data_begin: {
- // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
- break;
- }
- case OMPRTL__tgt_target_data_begin_nowait: {
- // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
- break;
- }
- case OMPRTL__tgt_target_data_end: {
- // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
- break;
- }
- case OMPRTL__tgt_target_data_end_nowait: {
- // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
- break;
- }
- case OMPRTL__tgt_target_data_update: {
- // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
- // void** args_base, void **args, int64_t *arg_sizes, int64_t *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
- break;
- }
- case OMPRTL__tgt_target_data_update_nowait: {
- // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
- // arg_num, void** args_base, void **args, int64_t *arg_sizes, int64_t
- // *arg_types);
- llvm::Type *TypeParams[] = {CGM.Int64Ty,
- CGM.Int32Ty,
- CGM.VoidPtrPtrTy,
- CGM.VoidPtrPtrTy,
- CGM.Int64Ty->getPointerTo(),
- CGM.Int64Ty->getPointerTo()};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
- break;
- }
- case OMPRTL__tgt_mapper_num_components: {
- // Build int64_t __tgt_mapper_num_components(void *rt_mapper_handle);
- llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_mapper_num_components");
- break;
- }
- case OMPRTL__tgt_push_mapper_component: {
- // Build void __tgt_push_mapper_component(void *rt_mapper_handle, void
- // *base, void *begin, int64_t size, int64_t type);
- llvm::Type *TypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy, CGM.VoidPtrTy,
- CGM.Int64Ty, CGM.Int64Ty};
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_push_mapper_component");
- break;
- }
- }
- assert(RTLFn && "Unable to find OpenMP runtime function");
- return RTLFn;
-}
-
llvm::FunctionCallee
CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
@@ -2764,7 +1783,9 @@ Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
getOrCreateThreadPrivateCache(VD)};
return Address(CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
+ Args),
VDAddr.getAlignment());
}
@@ -2774,7 +1795,8 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit(
// Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
// library.
llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_global_thread_num),
OMPLoc);
// Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
// to register constructor/destructor for variable.
@@ -2782,7 +1804,9 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit(
OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
Ctor, CopyCtor, Dtor};
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
+ Args);
}
llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
@@ -2813,7 +1837,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
std::string Name = getName({"__kmpc_global_ctor_", ""});
llvm::Function *Fn =
- CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
+ CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
Args, Loc, Loc);
llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
@@ -2846,7 +1870,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
std::string Name = getName({"__kmpc_global_dtor_", ""});
llvm::Function *Fn =
- CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
+ CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
Loc, Loc);
@@ -2889,7 +1913,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
auto *InitFunctionTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
std::string Name = getName({"__omp_threadprivate_init_", ""});
- llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction(
InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
CodeGenFunction InitCGF(CGM);
FunctionArgList ArgList;
@@ -2918,12 +1942,14 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
HasRequiresUnifiedSharedMemory))
return CGM.getLangOpts().OpenMPIsDevice;
VD = VD->getDefinition(CGM.getContext());
- if (VD && !DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
+ assert(VD && "Unknown VarDecl");
+
+ if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
return CGM.getLangOpts().OpenMPIsDevice;
QualType ASTTy = VD->getType();
-
SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
+
// Produce the unique prefix to identify the new target regions. We use
// the source location of the variable declaration which we know to not
// conflict with any target region.
@@ -2949,7 +1975,7 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, Twine(Buffer, "_ctor"), FI, Loc);
auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
@@ -2987,7 +2013,7 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, Twine(Buffer, "_dtor"), FI, Loc);
auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
@@ -3042,7 +2068,9 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
return Address(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
+ Args),
VarLVType->getPointerTo(/*AddrSpace=*/0)),
CGM.getContext().getTypeAlignInChars(VarType));
}
@@ -3093,8 +2121,9 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ auto &M = CGM.getModule();
+ auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
+ this](CodeGenFunction &CGF, PrePostActionTy &) {
// Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *Args[] = {
@@ -3106,18 +2135,19 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
llvm::FunctionCallee RTLFn =
- RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
+ OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
};
- auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
+ this](CodeGenFunction &CGF, PrePostActionTy &) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
// Build calls:
// __kmpc_serialized_parallel(&Loc, GTid);
llvm::Value *Args[] = {RTLoc, ThreadID};
- CGF.EmitRuntimeCall(
- RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_serialized_parallel),
+ Args);
// OutlinedFn(&GTid, &zero_bound, CapturedStruct);
Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
@@ -3134,9 +2164,9 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
// __kmpc_end_serialized_parallel(&Loc, GTid);
llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
- CGF.EmitRuntimeCall(
- RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
- EndArgs);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_end_serialized_parallel),
+ EndArgs);
};
if (IfCond) {
emitIfClause(CGF, IfCond, ThenGen, ElseGen);
@@ -3250,12 +2280,16 @@ void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
std::end(Args));
if (Hint) {
EnterArgs.push_back(CGF.Builder.CreateIntCast(
- CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
+ CGF.EmitScalarExpr(Hint), CGM.Int32Ty, /*isSigned=*/false));
}
CommonActionTy Action(
- createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint
- : OMPRTL__kmpc_critical),
- EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(),
+ Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
+ EnterArgs,
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_end_critical),
+ Args);
CriticalOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
}
@@ -3271,8 +2305,12 @@ void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
// }
// Prepare arguments and build a call to __kmpc_master
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
- createRuntimeFunction(OMPRTL__kmpc_end_master), Args,
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_master),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_master),
+ Args,
/*Conditional=*/true);
MasterOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
@@ -3283,11 +2321,18 @@ void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
- // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
- llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
+ OMPBuilder.CreateTaskyield(CGF.Builder);
+ } else {
+ // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
+ Args);
+ }
+
if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
Region->emitUntiedSwitch(CGF);
}
@@ -3302,8 +2347,11 @@ void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
// __kmpc_end_taskgroup(ident_t *, gtid);
// Prepare arguments and build a call to __kmpc_taskgroup
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
- createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_taskgroup),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
Args);
TaskgroupOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
@@ -3409,8 +2457,12 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
}
// Prepare arguments and build a call to __kmpc_single
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
- createRuntimeFunction(OMPRTL__kmpc_end_single), Args,
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_single),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_single),
+ Args,
/*Conditional=*/true);
SingleOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
@@ -3455,7 +2507,9 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
CpyFn, // void (*) (void *, void *) <copy_func>
DidItVal // i32 did_it
};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_copyprivate),
+ Args);
}
}
@@ -3470,8 +2524,11 @@ void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
// Prepare arguments and build a call to __kmpc_ordered
if (IsThreads) {
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
- createRuntimeFunction(OMPRTL__kmpc_end_ordered),
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_ordered),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_ordered),
Args);
OrderedOpGen.setAction(Action);
emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
@@ -3519,9 +2576,8 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
// Check if we should use the OMPBuilder
auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
- llvm::OpenMPIRBuilder *OMPBuilder = CGF.CGM.getOpenMPIRBuilder();
- if (OMPBuilder) {
- CGF.Builder.restoreIP(OMPBuilder->CreateBarrier(
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
+ CGF.Builder.restoreIP(OMPBuilder.CreateBarrier(
CGF.Builder, Kind, ForceSimpleCall, EmitChecks));
return;
}
@@ -3538,7 +2594,9 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
if (OMPRegionInfo) {
if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
llvm::Value *Result = CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_cancel_barrier),
+ Args);
if (EmitChecks) {
// if (__kmpc_cancel_barrier()) {
// exit from construct;
@@ -3557,7 +2615,9 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
return;
}
}
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_barrier),
+ Args);
}
/// Map the OpenMP loop schedule to the runtime enumeration.
@@ -3771,6 +2831,7 @@ void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::FunctionCallee StaticInitFunction =
createForStaticInitFunction(Values.IVSize, Values.IVSigned);
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
}
@@ -3805,7 +2866,9 @@ void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
? OMP_IDENT_WORK_LOOP
: OMP_IDENT_WORK_SECTIONS),
getThreadID(CGF, Loc)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_for_static_fini),
Args);
}
@@ -3853,7 +2916,8 @@ void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_push_num_threads),
Args);
}
@@ -3867,16 +2931,23 @@ void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), /*isSigned=*/true)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
+ Args);
}
void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
- SourceLocation Loc) {
- if (!CGF.HaveInsertPoint())
- return;
- // Build call void __kmpc_flush(ident_t *loc)
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
- emitUpdateLocation(CGF, Loc));
+ SourceLocation Loc, llvm::AtomicOrdering AO) {
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
+ OMPBuilder.CreateFlush(CGF.Builder);
+ } else {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call void __kmpc_flush(ident_t *loc)
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_flush),
+ emitUpdateLocation(CGF, Loc));
+ }
}
namespace {
@@ -4358,13 +3429,14 @@ QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
namespace {
struct PrivateHelpersTy {
- PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
- const VarDecl *PrivateElemInit)
- : Original(Original), PrivateCopy(PrivateCopy),
+ PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
+ const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
+ : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
PrivateElemInit(PrivateElemInit) {}
- const VarDecl *Original;
- const VarDecl *PrivateCopy;
- const VarDecl *PrivateElemInit;
+ const Expr *OriginalRef = nullptr;
+ const VarDecl *Original = nullptr;
+ const VarDecl *PrivateCopy = nullptr;
+ const VarDecl *PrivateElemInit = nullptr;
};
typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
} // anonymous namespace
@@ -4744,7 +3816,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
// For target-based directives skip 3 firstprivate arrays BasePointersArray,
// PointersArray and SizesArray. The original variables for these arrays are
// not captured and we get their addresses explicitly.
- if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
+ if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
(IsTargetTask && KmpTaskSharedsPtr.isValid())) {
SrcBase = CGF.MakeAddrLValue(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -4776,13 +3848,23 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
"Expected artificial target data variable.");
SharedRefLValue =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
- } else {
+ } else if (ForDup) {
SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
SharedRefLValue = CGF.MakeAddrLValue(
Address(SharedRefLValue.getPointer(CGF),
C.getDeclAlign(OriginalVD)),
SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
SharedRefLValue.getTBAAInfo());
+ } else if (CGF.LambdaCaptureFields.count(
+ Pair.second.Original->getCanonicalDecl()) > 0 ||
+ dyn_cast_or_null<BlockDecl>(CGF.CurCodeDecl)) {
+ SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
+ } else {
+ // Processing for implicitly captured variables.
+ InlinedOpenMPRegionRAII Region(
+ CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
+ /*HasCancel=*/false);
+ SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
}
if (Type->isArrayType()) {
// Initialize firstprivate array.
@@ -4915,7 +3997,7 @@ emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
Base, *std::next(KmpTaskTQTyRD->field_begin(),
KmpTaskTShareds)),
Loc),
- CGF.getNaturalTypeAlignment(SharedsTy));
+ CGM.getNaturalTypeAlignment(SharedsTy));
}
emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
@@ -4938,6 +4020,135 @@ checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
return NeedsCleanup;
}
+namespace {
+/// Loop generator for OpenMP iterator expression.
+class OMPIteratorGeneratorScope final
+ : public CodeGenFunction::OMPPrivateScope {
+ CodeGenFunction &CGF;
+ const OMPIteratorExpr *E = nullptr;
+ SmallVector<CodeGenFunction::JumpDest, 4> ContDests;
+ SmallVector<CodeGenFunction::JumpDest, 4> ExitDests;
+ OMPIteratorGeneratorScope() = delete;
+ OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete;
+
+public:
+ OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E)
+ : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
+ if (!E)
+ return;
+ SmallVector<llvm::Value *, 4> Uppers;
+ for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
+ Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
+ const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
+ addPrivate(VD, [&CGF, VD]() {
+ return CGF.CreateMemTemp(VD->getType(), VD->getName());
+ });
+ const OMPIteratorHelperData &HelperData = E->getHelper(I);
+ addPrivate(HelperData.CounterVD, [&CGF, &HelperData]() {
+ return CGF.CreateMemTemp(HelperData.CounterVD->getType(),
+ "counter.addr");
+ });
+ }
+ Privatize();
+
+ for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
+ const OMPIteratorHelperData &HelperData = E->getHelper(I);
+ LValue CLVal =
+ CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
+ HelperData.CounterVD->getType());
+ // Counter = 0;
+ CGF.EmitStoreOfScalar(
+ llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
+ CLVal);
+ CodeGenFunction::JumpDest &ContDest =
+ ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
+ CodeGenFunction::JumpDest &ExitDest =
+ ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit"));
+ // N = <number-of_iterations>;
+ llvm::Value *N = Uppers[I];
+ // cont:
+ // if (Counter < N) goto body; else goto exit;
+ CGF.EmitBlock(ContDest.getBlock());
+ auto *CVal =
+ CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
+ llvm::Value *Cmp =
+ HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType()
+ ? CGF.Builder.CreateICmpSLT(CVal, N)
+ : CGF.Builder.CreateICmpULT(CVal, N);
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body");
+ CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
+ // body:
+ CGF.EmitBlock(BodyBB);
+ // Iteri = Begini + Counter * Stepi;
+ CGF.EmitIgnoredExpr(HelperData.Update);
+ }
+ }
+ ~OMPIteratorGeneratorScope() {
+ if (!E)
+ return;
+ for (unsigned I = E->numOfIterators(); I > 0; --I) {
+ // Counter = Counter + 1;
+ const OMPIteratorHelperData &HelperData = E->getHelper(I - 1);
+ CGF.EmitIgnoredExpr(HelperData.CounterUpdate);
+ // goto cont;
+ CGF.EmitBranchThroughCleanup(ContDests[I - 1]);
+ // exit:
+ CGF.EmitBlock(ExitDests[I - 1].getBlock(), /*IsFinished=*/I == 1);
+ }
+ }
+};
+} // namespace
+
+static std::pair<llvm::Value *, llvm::Value *>
+getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
+ const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
+ llvm::Value *Addr;
+ if (OASE) {
+ const Expr *Base = OASE->getBase();
+ Addr = CGF.EmitScalarExpr(Base);
+ } else {
+ Addr = CGF.EmitLValue(E).getPointer(CGF);
+ }
+ llvm::Value *SizeVal;
+ QualType Ty = E->getType();
+ if (OASE) {
+ SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType());
+ for (const Expr *SE : OASE->getDimensions()) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(SE);
+ Sz = CGF.EmitScalarConversion(
+ Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc());
+ SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
+ }
+ } else if (const auto *ASE =
+ dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
+ LValue UpAddrLVal =
+ CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
+ llvm::Value *UpAddr =
+ CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(CGF), /*Idx0=*/1);
+ llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
+ llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
+ SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
+ } else {
+ SizeVal = CGF.getTypeSize(Ty);
+ }
+ return std::make_pair(Addr, SizeVal);
+}
+
+/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
+static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
+ QualType FlagsTy = C.getIntTypeForBitwidth(32, /*Signed=*/false);
+ if (KmpTaskAffinityInfoTy.isNull()) {
+ RecordDecl *KmpAffinityInfoRD =
+ C.buildImplicitRecord("kmp_task_affinity_info_t");
+ KmpAffinityInfoRD->startDefinition();
+ addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType());
+ addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
+ addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
+ KmpAffinityInfoRD->completeDefinition();
+ KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
+ }
+}
+
CGOpenMPRuntime::TaskResultTy
CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
@@ -4946,23 +4157,23 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
ASTContext &C = CGM.getContext();
llvm::SmallVector<PrivateDataTy, 4> Privates;
// Aggregate privates and sort them by the alignment.
- auto I = Data.PrivateCopies.begin();
+ const auto *I = Data.PrivateCopies.begin();
for (const Expr *E : Data.PrivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.emplace_back(
C.getDeclAlign(VD),
- PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
+ PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
/*PrivateElemInit=*/nullptr));
++I;
}
I = Data.FirstprivateCopies.begin();
- auto IElemInitRef = Data.FirstprivateInits.begin();
+ const auto *IElemInitRef = Data.FirstprivateInits.begin();
for (const Expr *E : Data.FirstprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.emplace_back(
C.getDeclAlign(VD),
PrivateHelpersTy(
- VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
+ E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
++I;
++IElemInitRef;
@@ -4972,7 +4183,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.emplace_back(
C.getDeclAlign(VD),
- PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
+ PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
/*PrivateElemInit=*/nullptr));
++I;
}
@@ -5046,7 +4257,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
TiedFlag = 0x1,
FinalFlag = 0x2,
DestructorsFlag = 0x8,
- PriorityFlag = 0x20
+ PriorityFlag = 0x20,
+ DetachableFlag = 0x40,
};
unsigned Flags = Data.Tied ? TiedFlag : 0;
bool NeedsCleanup = false;
@@ -5057,6 +4269,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
}
if (Data.Priority.getInt())
Flags = Flags | PriorityFlag;
+ if (D.hasClausesOfKind<OMPDetachClause>())
+ Flags = Flags | DetachableFlag;
llvm::Value *TaskFlags =
Data.Final.getPointer()
? CGF.Builder.CreateSelect(Data.Final.getPointer(),
@@ -5084,10 +4298,170 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
AllocArgs.push_back(DeviceID);
NewTask = CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_omp_target_task_alloc), AllocArgs);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
+ AllocArgs);
} else {
- NewTask = CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
+ NewTask =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
+ AllocArgs);
+ }
+ // Emit detach clause initialization.
+ // evt = (typeof(evt))__kmpc_task_allow_completion_event(loc, tid,
+ // task_descriptor);
+ if (const auto *DC = D.getSingleClause<OMPDetachClause>()) {
+ const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts();
+ LValue EvtLVal = CGF.EmitLValue(Evt);
+
+ // Build kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref,
+ // int gtid, kmp_task_t *task);
+ llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc());
+ llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc());
+ Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, /*isSigned=*/false);
+ llvm::Value *EvtVal = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
+ {Loc, Tid, NewTask});
+ EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(),
+ Evt->getExprLoc());
+ CGF.EmitStoreOfScalar(EvtVal, EvtLVal);
+ }
+ // Process affinity clauses.
+ if (D.hasClausesOfKind<OMPAffinityClause>()) {
+ // Process list of affinity data.
+ ASTContext &C = CGM.getContext();
+ Address AffinitiesArray = Address::invalid();
+ // Calculate number of elements to form the array of affinity data.
+ llvm::Value *NumOfElements = nullptr;
+ unsigned NumAffinities = 0;
+ for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
+ if (const Expr *Modifier = C->getModifier()) {
+ const auto *IE = cast<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts());
+ for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
+ Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
+ NumOfElements =
+ NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
+ }
+ } else {
+ NumAffinities += C->varlist_size();
+ }
+ }
+ getKmpAffinityType(CGM.getContext(), KmpTaskAffinityInfoTy);
+ // Fields ids in kmp_task_affinity_info record.
+ enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
+
+ QualType KmpTaskAffinityInfoArrayTy;
+ if (NumOfElements) {
+ NumOfElements = CGF.Builder.CreateNUWAdd(
+ llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
+ OpaqueValueExpr OVE(
+ Loc,
+ C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
+ VK_RValue);
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
+ RValue::get(NumOfElements));
+ KmpTaskAffinityInfoArrayTy =
+ C.getVariableArrayType(KmpTaskAffinityInfoTy, &OVE, ArrayType::Normal,
+ /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
+ // Properly emit variable-sized array.
+ auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
+ ImplicitParamDecl::Other);
+ CGF.EmitVarDecl(*PD);
+ AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
+ NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
+ /*isSigned=*/false);
+ } else {
+ KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
+ KmpTaskAffinityInfoTy,
+ llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+ AffinitiesArray =
+ CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
+ AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
+ NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities,
+ /*isSigned=*/false);
+ }
+
+ const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl();
+ // Fill array by elements without iterators.
+ unsigned Pos = 0;
+ bool HasIterator = false;
+ for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
+ if (C->getModifier()) {
+ HasIterator = true;
+ continue;
+ }
+ for (const Expr *E : C->varlists()) {
+ llvm::Value *Addr;
+ llvm::Value *Size;
+ std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+ LValue Base =
+ CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos),
+ KmpTaskAffinityInfoTy);
+ // affs[i].base_addr = &<Affinities[i].second>;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
+ CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
+ BaseAddrLVal);
+ // affs[i].len = sizeof(<Affinities[i].second>);
+ LValue LenLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
+ CGF.EmitStoreOfScalar(Size, LenLVal);
+ ++Pos;
+ }
+ }
+ LValue PosLVal;
+ if (HasIterator) {
+ PosLVal = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"),
+ C.getSizeType());
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
+ }
+ // Process elements with iterators.
+ for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
+ const Expr *Modifier = C->getModifier();
+ if (!Modifier)
+ continue;
+ OMPIteratorGeneratorScope IteratorScope(
+ CGF, cast_or_null<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts()));
+ for (const Expr *E : C->varlists()) {
+ llvm::Value *Addr;
+ llvm::Value *Size;
+ std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+ llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
+ LValue Base = CGF.MakeAddrLValue(
+ Address(CGF.Builder.CreateGEP(AffinitiesArray.getPointer(), Idx),
+ AffinitiesArray.getAlignment()),
+ KmpTaskAffinityInfoTy);
+ // affs[i].base_addr = &<Affinities[i].second>;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
+ CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
+ BaseAddrLVal);
+ // affs[i].len = sizeof(<Affinities[i].second>);
+ LValue LenLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
+ CGF.EmitStoreOfScalar(Size, LenLVal);
+ Idx = CGF.Builder.CreateNUWAdd(
+ Idx, llvm::ConstantInt::get(Idx->getType(), 1));
+ CGF.EmitStoreOfScalar(Idx, PosLVal);
+ }
+ }
+ // Call to kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref,
+ // kmp_int32 gtid, kmp_task_t *new_task, kmp_int32
+ // naffins, kmp_task_affinity_info_t *affin_list);
+ llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
+ llvm::Value *GTid = getThreadID(CGF, Loc);
+ llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ AffinitiesArray.getPointer(), CGM.VoidPtrTy);
+ // FIXME: Emit the function and ignore its result for now unless the
+ // runtime function is properly implemented.
+ (void)CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
+ {LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
}
llvm::Value *NewTaskNewTaskTTy =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -5106,7 +4480,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
KmpTaskTShareds)),
Loc),
- CGF.getNaturalTypeAlignment(SharedsTy));
+ CGM.getNaturalTypeAlignment(SharedsTy));
LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
@@ -5158,6 +4532,540 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
return Result;
}
+namespace {
+/// Dependence kind for RTL.
+enum RTLDependenceKindTy {
+ DepIn = 0x01,
+ DepInOut = 0x3,
+ DepMutexInOutSet = 0x4
+};
+/// Fields ids in kmp_depend_info record.
+enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
+} // namespace
+
+/// Translates internal dependency kind into the runtime kind.
+static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
+ RTLDependenceKindTy DepKind;
+ switch (K) {
+ case OMPC_DEPEND_in:
+ DepKind = DepIn;
+ break;
+ // Out and InOut dependencies must use the same code.
+ case OMPC_DEPEND_out:
+ case OMPC_DEPEND_inout:
+ DepKind = DepInOut;
+ break;
+ case OMPC_DEPEND_mutexinoutset:
+ DepKind = DepMutexInOutSet;
+ break;
+ case OMPC_DEPEND_source:
+ case OMPC_DEPEND_sink:
+ case OMPC_DEPEND_depobj:
+ case OMPC_DEPEND_unknown:
+ llvm_unreachable("Unknown task dependence type");
+ }
+ return DepKind;
+}
+
+/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
+static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
+ QualType &FlagsTy) {
+ FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
+ if (KmpDependInfoTy.isNull()) {
+ RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
+ KmpDependInfoRD->startDefinition();
+ addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
+ addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
+ addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
+ KmpDependInfoRD->completeDefinition();
+ KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
+ }
+}
+
+std::pair<llvm::Value *, LValue>
+CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
+ SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ DepobjLVal.getAddress(CGF),
+ C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
+ Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
+ Base.getTBAAInfo());
+ llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
+ Addr.getPointer(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ LValue NumDepsBase = CGF.MakeAddrLValue(
+ Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
+ Base.getBaseInfo(), Base.getTBAAInfo());
+ // NumDeps = deps[i].base_addr;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
+ return std::make_pair(NumDeps, Base);
+}
+
+static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ llvm::PointerUnion<unsigned *, LValue *> Pos,
+ const OMPTaskDataTy::DependData &Data,
+ Address DependenciesArray) {
+ CodeGenModule &CGM = CGF.CGM;
+ ASTContext &C = CGM.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
+
+ OMPIteratorGeneratorScope IteratorScope(
+ CGF, cast_or_null<OMPIteratorExpr>(
+ Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
+ : nullptr));
+ for (const Expr *E : Data.DepExprs) {
+ llvm::Value *Addr;
+ llvm::Value *Size;
+ std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+ LValue Base;
+ if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
+ Base = CGF.MakeAddrLValue(
+ CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
+ } else {
+ LValue &PosLVal = *Pos.get<LValue *>();
+ llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
+ Base = CGF.MakeAddrLValue(
+ Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Idx),
+ DependenciesArray.getAlignment()),
+ KmpDependInfoTy);
+ }
+ // deps[i].base_addr = &<Dependencies[i].second>;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
+ BaseAddrLVal);
+ // deps[i].len = sizeof(<Dependencies[i].second>);
+ LValue LenLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), Len));
+ CGF.EmitStoreOfScalar(Size, LenLVal);
+ // deps[i].flags = <Dependencies[i].first>;
+ RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
+ LValue FlagsLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
+ FlagsLVal);
+ if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
+ ++(*P);
+ } else {
+ LValue &PosLVal = *Pos.get<LValue *>();
+ llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
+ Idx = CGF.Builder.CreateNUWAdd(Idx,
+ llvm::ConstantInt::get(Idx->getType(), 1));
+ CGF.EmitStoreOfScalar(Idx, PosLVal);
+ }
+ }
+}
+
+static SmallVector<llvm::Value *, 4>
+emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ const OMPTaskDataTy::DependData &Data) {
+ assert(Data.DepKind == OMPC_DEPEND_depobj &&
+ "Expected depobj dependecy kind.");
+ SmallVector<llvm::Value *, 4> Sizes;
+ SmallVector<LValue, 4> SizeLVals;
+ ASTContext &C = CGF.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
+ llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
+ {
+ OMPIteratorGeneratorScope IteratorScope(
+ CGF, cast_or_null<OMPIteratorExpr>(
+ Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
+ : nullptr));
+ for (const Expr *E : Data.DepExprs) {
+ LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ DepobjLVal.getAddress(CGF),
+ C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Base.getAddress(CGF), KmpDependInfoPtrT);
+ Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
+ Base.getTBAAInfo());
+ llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
+ Addr.getPointer(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ LValue NumDepsBase = CGF.MakeAddrLValue(
+ Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
+ Base.getBaseInfo(), Base.getTBAAInfo());
+ // NumDeps = deps[i].base_addr;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ llvm::Value *NumDeps =
+ CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
+ LValue NumLVal = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
+ C.getUIntPtrType());
+ CGF.InitTempAlloca(NumLVal.getAddress(CGF),
+ llvm::ConstantInt::get(CGF.IntPtrTy, 0));
+ llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
+ llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
+ CGF.EmitStoreOfScalar(Add, NumLVal);
+ SizeLVals.push_back(NumLVal);
+ }
+ }
+ for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
+ llvm::Value *Size =
+ CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc());
+ Sizes.push_back(Size);
+ }
+ return Sizes;
+}
+
+static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ LValue PosLVal,
+ const OMPTaskDataTy::DependData &Data,
+ Address DependenciesArray) {
+ assert(Data.DepKind == OMPC_DEPEND_depobj &&
+ "Expected depobj dependecy kind.");
+ ASTContext &C = CGF.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
+ llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
+ llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
+ {
+ OMPIteratorGeneratorScope IteratorScope(
+ CGF, cast_or_null<OMPIteratorExpr>(
+ Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
+ : nullptr));
+ for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
+ const Expr *E = Data.DepExprs[I];
+ LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ DepobjLVal.getAddress(CGF),
+ C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Base.getAddress(CGF), KmpDependInfoPtrT);
+ Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
+ Base.getTBAAInfo());
+
+ // Get number of elements in a single depobj.
+ llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
+ Addr.getPointer(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ LValue NumDepsBase = CGF.MakeAddrLValue(
+ Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
+ Base.getBaseInfo(), Base.getTBAAInfo());
+ // NumDeps = deps[i].base_addr;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ llvm::Value *NumDeps =
+ CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
+
+ // memcopy dependency data.
+ llvm::Value *Size = CGF.Builder.CreateNUWMul(
+ ElSize,
+ CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
+ llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
+ Address DepAddr =
+ Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Pos),
+ DependenciesArray.getAlignment());
+ CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
+
+ // Increase pos.
+ // pos += size;
+ llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps);
+ CGF.EmitStoreOfScalar(Add, PosLVal);
+ }
+ }
+}
+
+std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
+ CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies,
+ SourceLocation Loc) {
+ if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) {
+ return D.DepExprs.empty();
+ }))
+ return std::make_pair(nullptr, Address::invalid());
+ // Process list of dependencies.
+ ASTContext &C = CGM.getContext();
+ Address DependenciesArray = Address::invalid();
+ llvm::Value *NumOfElements = nullptr;
+ unsigned NumDependencies = std::accumulate(
+ Dependencies.begin(), Dependencies.end(), 0,
+ [](unsigned V, const OMPTaskDataTy::DependData &D) {
+ return D.DepKind == OMPC_DEPEND_depobj
+ ? V
+ : (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
+ });
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ bool HasDepobjDeps = false;
+ bool HasRegularWithIterators = false;
+ llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
+ llvm::Value *NumOfRegularWithIterators =
+ llvm::ConstantInt::get(CGF.IntPtrTy, 1);
+ // Calculate number of depobj dependecies and regular deps with the iterators.
+ for (const OMPTaskDataTy::DependData &D : Dependencies) {
+ if (D.DepKind == OMPC_DEPEND_depobj) {
+ SmallVector<llvm::Value *, 4> Sizes =
+ emitDepobjElementsSizes(CGF, KmpDependInfoTy, D);
+ for (llvm::Value *Size : Sizes) {
+ NumOfDepobjElements =
+ CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size);
+ }
+ HasDepobjDeps = true;
+ continue;
+ }
+ // Include number of iterations, if any.
+ if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
+ for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
+ Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
+ NumOfRegularWithIterators =
+ CGF.Builder.CreateNUWMul(NumOfRegularWithIterators, Sz);
+ }
+ HasRegularWithIterators = true;
+ continue;
+ }
+ }
+
+ QualType KmpDependInfoArrayTy;
+ if (HasDepobjDeps || HasRegularWithIterators) {
+ NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies,
+ /*isSigned=*/false);
+ if (HasDepobjDeps) {
+ NumOfElements =
+ CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
+ }
+ if (HasRegularWithIterators) {
+ NumOfElements =
+ CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
+ }
+ OpaqueValueExpr OVE(Loc,
+ C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
+ VK_RValue);
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
+ RValue::get(NumOfElements));
+ KmpDependInfoArrayTy =
+ C.getVariableArrayType(KmpDependInfoTy, &OVE, ArrayType::Normal,
+ /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
+ // CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy);
+ // Properly emit variable-sized array.
+ auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
+ ImplicitParamDecl::Other);
+ CGF.EmitVarDecl(*PD);
+ DependenciesArray = CGF.GetAddrOfLocalVar(PD);
+ NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
+ /*isSigned=*/false);
+ } else {
+ KmpDependInfoArrayTy = C.getConstantArrayType(
+ KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr,
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+ DependenciesArray =
+ CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
+ DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
+ NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies,
+ /*isSigned=*/false);
+ }
+ unsigned Pos = 0;
+ for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
+ if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
+ Dependencies[I].IteratorExpr)
+ continue;
+ emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
+ DependenciesArray);
+ }
+ // Copy regular dependecies with iterators.
+ LValue PosLVal = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
+ for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
+ if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
+ !Dependencies[I].IteratorExpr)
+ continue;
+ emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I],
+ DependenciesArray);
+ }
+ // Copy final depobj arrays without iterators.
+ if (HasDepobjDeps) {
+ for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
+ if (Dependencies[I].DepKind != OMPC_DEPEND_depobj)
+ continue;
+ emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I],
+ DependenciesArray);
+ }
+ }
+ DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ DependenciesArray, CGF.VoidPtrTy);
+ return std::make_pair(NumOfElements, DependenciesArray);
+}
+
+Address CGOpenMPRuntime::emitDepobjDependClause(
+ CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies,
+ SourceLocation Loc) {
+ if (Dependencies.DepExprs.empty())
+ return Address::invalid();
+ // Process list of dependencies.
+ ASTContext &C = CGM.getContext();
+ Address DependenciesArray = Address::invalid();
+ unsigned NumDependencies = Dependencies.DepExprs.size();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+
+ llvm::Value *Size;
+ // Define type kmp_depend_info[<Dependencies.size()>];
+ // For depobj reserve one extra element to store the number of elements.
+ // It is required to handle depobj(x) update(in) construct.
+ // kmp_depend_info[<Dependencies.size()>] deps;
+ llvm::Value *NumDepsVal;
+ CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy);
+ if (const auto *IE =
+ cast_or_null<OMPIteratorExpr>(Dependencies.IteratorExpr)) {
+ NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1);
+ for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
+ Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
+ NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz);
+ }
+ Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1),
+ NumDepsVal);
+ CharUnits SizeInBytes =
+ C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align);
+ llvm::Value *RecSize = CGM.getSize(SizeInBytes);
+ Size = CGF.Builder.CreateNUWMul(Size, RecSize);
+ NumDepsVal =
+ CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, /*isSigned=*/false);
+ } else {
+ QualType KmpDependInfoArrayTy = C.getConstantArrayType(
+ KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1),
+ nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
+ CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
+ Size = CGM.getSize(Sz.alignTo(Align));
+ NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
+ }
+ // Need to allocate on the dynamic memory.
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ // Use default allocator.
+ llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ llvm::Value *Args[] = {ThreadID, Size, Allocator};
+
+ llvm::Value *Addr =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_alloc),
+ Args, ".dep.arr.addr");
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr, CGF.ConvertTypeForMem(KmpDependInfoTy)->getPointerTo());
+ DependenciesArray = Address(Addr, Align);
+ // Write number of elements in the first element of array for depobj.
+ LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
+ // deps[i].base_addr = NumDependencies;
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
+ llvm::PointerUnion<unsigned *, LValue *> Pos;
+ unsigned Idx = 1;
+ LValue PosLVal;
+ if (Dependencies.IteratorExpr) {
+ PosLVal = CGF.MakeAddrLValue(
+ CGF.CreateMemTemp(C.getSizeType(), "iterator.counter.addr"),
+ C.getSizeType());
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Idx), PosLVal,
+ /*IsInit=*/true);
+ Pos = &PosLVal;
+ } else {
+ Pos = &Idx;
+ }
+ emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
+ DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy);
+ return DependenciesArray;
+}
+
+void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
+ SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ DepobjLVal.getAddress(CGF),
+ C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
+ llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
+ Addr.getPointer(),
+ llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
+ CGF.VoidPtrTy);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ // Use default allocator.
+ llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator};
+
+ // _kmpc_free(gtid, addr, nullptr);
+ (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_free),
+ Args);
+}
+
+void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
+ OpenMPDependClauseKind NewDepKind,
+ SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+ QualType FlagsTy;
+ getDependTypes(C, KmpDependInfoTy, FlagsTy);
+ RecordDecl *KmpDependInfoRD =
+ cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
+ llvm::Value *NumDeps;
+ LValue Base;
+ std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
+
+ Address Begin = Base.getAddress(CGF);
+ // Cast from pointer to array type to pointer to single element.
+ llvm::Value *End = CGF.Builder.CreateGEP(Begin.getPointer(), NumDeps);
+ // The basic structure here is a while-do loop.
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
+ llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(BodyBB);
+ llvm::PHINode *ElementPHI =
+ CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
+ ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
+ Begin = Address(ElementPHI, Begin.getAlignment());
+ Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
+ Base.getTBAAInfo());
+ // deps[i].flags = NewDepKind;
+ RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind);
+ LValue FlagsLVal = CGF.EmitLValueForField(
+ Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
+ FlagsLVal);
+
+ // Shift the address forward by one element.
+ Address ElementNext =
+ CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext");
+ ElementPHI->addIncoming(ElementNext.getPointer(),
+ CGF.Builder.GetInsertBlock());
+ llvm::Value *IsEmpty =
+ CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
+ CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
+ // Done.
+ CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
+}
+
void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction,
@@ -5174,94 +5082,11 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
LValue TDBase = Result.TDBase;
const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
- ASTContext &C = CGM.getContext();
// Process list of dependences.
Address DependenciesArray = Address::invalid();
- unsigned NumDependencies = Data.Dependences.size();
- if (NumDependencies) {
- // Dependence kind for RTL.
- enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3, DepMutexInOutSet = 0x4 };
- enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
- RecordDecl *KmpDependInfoRD;
- QualType FlagsTy =
- C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
- llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
- if (KmpDependInfoTy.isNull()) {
- KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
- KmpDependInfoRD->startDefinition();
- addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
- addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
- addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
- KmpDependInfoRD->completeDefinition();
- KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
- } else {
- KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- }
- // Define type kmp_depend_info[<Dependences.size()>];
- QualType KmpDependInfoArrayTy = C.getConstantArrayType(
- KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
- nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
- // kmp_depend_info[<Dependences.size()>] deps;
- DependenciesArray =
- CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
- for (unsigned I = 0; I < NumDependencies; ++I) {
- const Expr *E = Data.Dependences[I].second;
- LValue Addr = CGF.EmitLValue(E);
- llvm::Value *Size;
- QualType Ty = E->getType();
- if (const auto *ASE =
- dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
- LValue UpAddrLVal =
- CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
- llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
- UpAddrLVal.getPointer(CGF), /*Idx0=*/1);
- llvm::Value *LowIntPtr =
- CGF.Builder.CreatePtrToInt(Addr.getPointer(CGF), CGM.SizeTy);
- llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
- Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
- } else {
- Size = CGF.getTypeSize(Ty);
- }
- LValue Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateConstArrayGEP(DependenciesArray, I),
- KmpDependInfoTy);
- // deps[i].base_addr = &<Dependences[i].second>;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
- CGF.EmitStoreOfScalar(
- CGF.Builder.CreatePtrToInt(Addr.getPointer(CGF), CGF.IntPtrTy),
- BaseAddrLVal);
- // deps[i].len = sizeof(<Dependences[i].second>);
- LValue LenLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), Len));
- CGF.EmitStoreOfScalar(Size, LenLVal);
- // deps[i].flags = <Dependences[i].first>;
- RTLDependenceKindTy DepKind;
- switch (Data.Dependences[I].first) {
- case OMPC_DEPEND_in:
- DepKind = DepIn;
- break;
- // Out and InOut dependencies must use the same code.
- case OMPC_DEPEND_out:
- case OMPC_DEPEND_inout:
- DepKind = DepInOut;
- break;
- case OMPC_DEPEND_mutexinoutset:
- DepKind = DepMutexInOutSet;
- break;
- case OMPC_DEPEND_source:
- case OMPC_DEPEND_sink:
- case OMPC_DEPEND_unknown:
- llvm_unreachable("Unknown task dependence type");
- }
- LValue FlagsLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
- CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
- FlagsLVal);
- }
- DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0), CGF.VoidPtrTy);
- }
+ llvm::Value *NumOfElements;
+ std::tie(NumOfElements, DependenciesArray) =
+ emitDependClause(CGF, Data.Dependences, Loc);
// NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
// libcall.
@@ -5273,28 +5098,30 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
llvm::Value *DepTaskArgs[7];
- if (NumDependencies) {
+ if (!Data.Dependences.empty()) {
DepTaskArgs[0] = UpLoc;
DepTaskArgs[1] = ThreadID;
DepTaskArgs[2] = NewTask;
- DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
+ DepTaskArgs[3] = NumOfElements;
DepTaskArgs[4] = DependenciesArray.getPointer();
DepTaskArgs[5] = CGF.Builder.getInt32(0);
DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
- auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
- &TaskArgs,
+ auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, &TaskArgs,
&DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
if (!Data.Tied) {
auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
}
- if (NumDependencies) {
+ if (!Data.Dependences.empty()) {
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps),
+ DepTaskArgs);
} else {
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_task),
TaskArgs);
}
// Check if parent region is untied and build return for untied task;
@@ -5304,26 +5131,27 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
};
llvm::Value *DepWaitTaskArgs[6];
- if (NumDependencies) {
+ if (!Data.Dependences.empty()) {
DepWaitTaskArgs[0] = UpLoc;
DepWaitTaskArgs[1] = ThreadID;
- DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
+ DepWaitTaskArgs[2] = NumOfElements;
DepWaitTaskArgs[3] = DependenciesArray.getPointer();
DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
- auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
- NumDependencies, &DepWaitTaskArgs,
+ auto &M = CGM.getModule();
+ auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
+ TaskEntry, &Data, &DepWaitTaskArgs,
Loc](CodeGenFunction &CGF, PrePostActionTy &) {
- CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
CodeGenFunction::RunCleanupsScope LocalScope(CGF);
// Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
// kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
// ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
// is specified.
- if (NumDependencies)
- CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
- DepWaitTaskArgs);
+ if (!Data.Dependences.empty())
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
+ DepWaitTaskArgs);
// Call proxy_task_entry(gtid, new_task);
auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
@@ -5338,9 +5166,12 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
// Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
// kmp_task_t *new_task);
RegionCodeGenTy RCG(CodeGen);
- CommonActionTy Action(
- RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs,
- RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs);
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_omp_task_begin_if0),
+ TaskArgs,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_omp_task_complete_if0),
+ TaskArgs);
RCG.setAction(Action);
RCG(CGF);
};
@@ -5434,7 +5265,9 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Result.TaskDupFn, CGF.VoidPtrTy)
: llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_taskloop),
+ TaskArgs);
}
/// Emit reduction operation for each element of array (required for
@@ -5776,8 +5609,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
Lock // kmp_critical_name *&<lock>
};
llvm::Value *Res = CGF.EmitRuntimeCall(
- createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
- : OMPRTL__kmpc_reduce),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(),
+ WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce),
Args);
// 5. Build switch(res)
@@ -5818,8 +5652,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
RegionCodeGenTy RCG(CodeGen);
CommonActionTy Action(
nullptr, llvm::None,
- createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
- : OMPRTL__kmpc_end_reduce),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
+ : OMPRTL___kmpc_end_reduce),
EndArgs);
RCG.setAction(Action);
RCG(CGF);
@@ -5942,7 +5777,8 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
Lock // kmp_critical_name *&<lock>
};
CommonActionTy Action(nullptr, llvm::None,
- createRuntimeFunction(OMPRTL__kmpc_end_reduce),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_reduce),
EndArgs);
AtomicRCG.setAction(Action);
AtomicRCG(CGF);
@@ -5969,12 +5805,12 @@ static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
{D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
Out << Prefix << Name << "_"
<< D->getCanonicalDecl()->getBeginLoc().getRawEncoding();
- return Out.str();
+ return std::string(Out.str());
}
/// Emits reduction initializer function:
/// \code
-/// void @.red_init(void* %arg) {
+/// void @.red_init(void* %arg, void* %orig) {
/// %0 = bitcast void* %arg to <type>*
/// store <type> <init>, <type>* %0
/// ret void
@@ -5984,10 +5820,15 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) {
ASTContext &C = CGM.getContext();
+ QualType VoidPtrTy = C.VoidPtrTy;
+ VoidPtrTy.addRestrict();
FunctionArgList Args;
- ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
ImplicitParamDecl::Other);
+ ImplicitParamDecl ParamOrig(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
+ ImplicitParamDecl::Other);
Args.emplace_back(&Param);
+ Args.emplace_back(&ParamOrig);
const auto &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
@@ -6012,28 +5853,25 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
- LValue SharedLVal;
+ LValue OrigLVal;
// If initializer uses initializer from declare reduction construct, emit a
// pointer to the address of the original reduction item (reuired by reduction
// initializer)
if (RCG.usesReductionInitializer(N)) {
- Address SharedAddr =
- CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
- CGF, CGM.getContext().VoidPtrTy,
- generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
+ Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
SharedAddr = CGF.EmitLoadOfPointer(
SharedAddr,
CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
- SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
+ OrigLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
} else {
- SharedLVal = CGF.MakeNaturalAlignAddrLValue(
+ OrigLVal = CGF.MakeNaturalAlignAddrLValue(
llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
CGM.getContext().VoidPtrTy);
}
// Emit the initializer:
// %0 = bitcast void* %arg to <type>*
// store <type> <init>, <type>* %0
- RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal,
+ RCG.emitInitialization(CGF, N, PrivateAddr, OrigLVal,
[](CodeGenFunction &) { return false; });
CGF.FinishFunction();
return Fn;
@@ -6173,18 +6011,20 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
return nullptr;
// Build typedef struct:
- // kmp_task_red_input {
+ // kmp_taskred_input {
// void *reduce_shar; // shared reduction item
+ // void *reduce_orig; // original reduction item used for initialization
// size_t reduce_size; // size of data item
// void *reduce_init; // data initialization routine
// void *reduce_fini; // data finalization routine
// void *reduce_comb; // data combiner routine
// kmp_task_red_flags_t flags; // flags for additional info from compiler
- // } kmp_task_red_input_t;
+ // } kmp_taskred_input_t;
ASTContext &C = CGM.getContext();
- RecordDecl *RD = C.buildImplicitRecord("kmp_task_red_input_t");
+ RecordDecl *RD = C.buildImplicitRecord("kmp_taskred_input_t");
RD->startDefinition();
const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ const FieldDecl *OrigFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
@@ -6199,8 +6039,8 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
RDType, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
// kmp_task_red_input_t .rd_input.[Size];
Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
- ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies,
- Data.ReductionOps);
+ ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
+ Data.ReductionCopies, Data.ReductionOps);
for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
// kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
@@ -6212,20 +6052,24 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
// ElemLVal.reduce_shar = &Shareds[Cnt];
LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
- RCG.emitSharedLValue(CGF, Cnt);
+ RCG.emitSharedOrigLValue(CGF, Cnt);
llvm::Value *CastedShared =
CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF));
CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
+ // ElemLVal.reduce_orig = &Origs[Cnt];
+ LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD);
+ llvm::Value *CastedOrig =
+ CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF));
+ CGF.EmitStoreOfScalar(CastedOrig, OrigLVal);
RCG.emitAggregateType(CGF, Cnt);
llvm::Value *SizeValInChars;
llvm::Value *SizeVal;
std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
- // We use delayed creation/initialization for VLAs, array sections and
- // custom reduction initializations. It is required because runtime does not
- // provide the way to pass the sizes of VLAs/array sections to
- // initializer/combiner/finalizer functions and does not pass the pointer to
- // original reduction item to the initializer. Instead threadprivate global
- // variables are used to store these values and use them in the functions.
+ // We use delayed creation/initialization for VLAs and array sections. It is
+ // required because runtime does not provide the way to pass the sizes of
+ // VLAs/array sections to initializer/combiner/finalizer functions. Instead
+ // threadprivate global variables are used to store these values and use
+ // them in the functions.
bool DelayedCreation = !!SizeVal;
SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
/*isSigned=*/false);
@@ -6236,7 +6080,6 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
llvm::Value *InitAddr =
CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
CGF.EmitStoreOfScalar(InitAddr, InitLVal);
- DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt);
// ElemLVal.reduce_fini = fini;
LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
@@ -6260,16 +6103,52 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
FlagsLVal.getType());
}
- // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void
- // *data);
+ if (Data.IsReductionWithTaskMod) {
+ // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
+ // is_ws, int num, void *data);
+ llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
+ CGM.IntTy, /*isSigned=*/true);
+ llvm::Value *Args[] = {
+ IdentTLoc, GTid,
+ llvm::ConstantInt::get(CGM.IntTy, Data.IsWorksharingReduction ? 1 : 0,
+ /*isSigned=*/true),
+ llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ TaskRedInput.getPointer(), CGM.VoidPtrTy)};
+ return CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_taskred_modifier_init),
+ Args);
+ }
+ // Build call void *__kmpc_taskred_init(int gtid, int num_data, void *data);
llvm::Value *Args[] = {
CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
/*isSigned=*/true),
llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
CGM.VoidPtrTy)};
- return CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args);
+ return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_taskred_init),
+ Args);
+}
+
+void CGOpenMPRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ bool IsWorksharingReduction) {
+ // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
+ // is_ws, int num, void *data);
+ llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
+ CGM.IntTy, /*isSigned=*/true);
+ llvm::Value *Args[] = {IdentTLoc, GTid,
+ llvm::ConstantInt::get(CGM.IntTy,
+ IsWorksharingReduction ? 1 : 0,
+ /*isSigned=*/true)};
+ (void)CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_task_reduction_modifier_fini),
+ Args);
}
void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
@@ -6287,16 +6166,6 @@ void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
}
- // Store address of the original reduction item if custom initializer is used.
- if (RCG.usesReductionInitializer(N)) {
- Address SharedAddr = getAddrOfArtificialThreadPrivate(
- CGF, CGM.getContext().VoidPtrTy,
- generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
- CGF.Builder.CreateStore(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- RCG.getSharedLValue(N).getPointer(CGF), CGM.VoidPtrTy),
- SharedAddr, /*IsVolatile=*/false);
- }
}
Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
@@ -6313,7 +6182,9 @@ Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
SharedLVal.getPointer(CGF), CGM.VoidPtrTy)};
return Address(
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
+ Args),
SharedLVal.getAlignment());
}
@@ -6321,11 +6192,19 @@ void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
return;
- // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- // Ignore return result until untied tasks are supported.
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
+
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
+ OMPBuilder.CreateTaskwait(CGF.Builder);
+ } else {
+ // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
+ // Ignore return result until untied tasks are supported.
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_omp_taskwait),
+ Args);
+ }
+
if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
Region->emitUntiedSwitch(CGF);
}
@@ -6382,7 +6261,9 @@ void CGOpenMPRuntime::emitCancellationPointCall(
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
// Ignore return result until untied tasks are supported.
llvm::Value *Result = CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_cancellationpoint),
+ Args);
// if (__kmpc_cancellationpoint()) {
// exit from construct;
// }
@@ -6407,17 +6288,18 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
return;
// Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 cncl_kind);
+ auto &M = CGM.getModule();
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
- auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ auto &&ThenGen = [this, &M, Loc, CancelRegion,
+ OMPRegionInfo](CodeGenFunction &CGF, PrePostActionTy &) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *Args[] = {
RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
// Ignore return result until untied tasks are supported.
llvm::Value *Result = CGF.EmitRuntimeCall(
- RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args);
// if (__kmpc_cancel()) {
// exit from construct;
// }
@@ -6442,16 +6324,106 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
}
}
+namespace {
+/// Cleanup action for uses_allocators support.
+class OMPUsesAllocatorsActionTy final : public PrePostActionTy {
+ ArrayRef<std::pair<const Expr *, const Expr *>> Allocators;
+
+public:
+ OMPUsesAllocatorsActionTy(
+ ArrayRef<std::pair<const Expr *, const Expr *>> Allocators)
+ : Allocators(Allocators) {}
+ void Enter(CodeGenFunction &CGF) override {
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (const auto &AllocatorData : Allocators) {
+ CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsInit(
+ CGF, AllocatorData.first, AllocatorData.second);
+ }
+ }
+ void Exit(CodeGenFunction &CGF) override {
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (const auto &AllocatorData : Allocators) {
+ CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsFini(CGF,
+ AllocatorData.first);
+ }
+ }
+};
+} // namespace
+
void CGOpenMPRuntime::emitTargetOutlinedFunction(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
assert(!ParentName.empty() && "Invalid target region parent name!");
HasEmittedTargetRegion = true;
+ SmallVector<std::pair<const Expr *, const Expr *>, 4> Allocators;
+ for (const auto *C : D.getClausesOfKind<OMPUsesAllocatorsClause>()) {
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ const OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
+ if (!D.AllocatorTraits)
+ continue;
+ Allocators.emplace_back(D.Allocator, D.AllocatorTraits);
+ }
+ }
+ OMPUsesAllocatorsActionTy UsesAllocatorAction(Allocators);
+ CodeGen.setAction(UsesAllocatorAction);
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
}
+void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
+ const Expr *Allocator,
+ const Expr *AllocatorTraits) {
+ llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
+ ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
+ // Use default memspace handle.
+ llvm::Value *MemSpaceHandle = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ llvm::Value *NumTraits = llvm::ConstantInt::get(
+ CGF.IntTy, cast<ConstantArrayType>(
+ AllocatorTraits->getType()->getAsArrayTypeUnsafe())
+ ->getSize()
+ .getLimitedValue());
+ LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
+ Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy);
+ AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
+ AllocatorTraitsLVal.getBaseInfo(),
+ AllocatorTraitsLVal.getTBAAInfo());
+ llvm::Value *Traits =
+ CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc());
+
+ llvm::Value *AllocatorVal =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_init_allocator),
+ {ThreadId, MemSpaceHandle, NumTraits, Traits});
+ // Store to allocator.
+ CGF.EmitVarDecl(*cast<VarDecl>(
+ cast<DeclRefExpr>(Allocator->IgnoreParenImpCasts())->getDecl()));
+ LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
+ AllocatorVal =
+ CGF.EmitScalarConversion(AllocatorVal, CGF.getContext().VoidPtrTy,
+ Allocator->getType(), Allocator->getExprLoc());
+ CGF.EmitStoreOfScalar(AllocatorVal, AllocatorLVal);
+}
+
+void CGOpenMPRuntime::emitUsesAllocatorsFini(CodeGenFunction &CGF,
+ const Expr *Allocator) {
+ llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
+ ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
+ LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
+ llvm::Value *AllocatorVal =
+ CGF.EmitLoadOfScalar(AllocatorLVal, Allocator->getExprLoc());
+ AllocatorVal = CGF.EmitScalarConversion(AllocatorVal, Allocator->getType(),
+ CGF.getContext().VoidPtrTy,
+ Allocator->getExprLoc());
+ (void)CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_destroy_allocator),
+ {ThreadId, AllocatorVal});
+}
+
void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
@@ -6483,7 +6455,7 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
+ OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
// If this target outline function is not an offload entry, we don't need to
// register it.
@@ -6669,6 +6641,8 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -6684,6 +6658,8 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -6697,6 +6673,8 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
case OMPD_requires:
case OMPD_unknown:
break;
+ default:
+ break;
}
llvm_unreachable("Unexpected directive kind.");
}
@@ -6980,6 +6958,8 @@ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -6995,6 +6975,8 @@ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -7008,6 +6990,8 @@ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
case OMPD_requires:
case OMPD_unknown:
break;
+ default:
+ break;
}
llvm_unreachable("Unsupported directive kind.");
}
@@ -7044,7 +7028,7 @@ public:
OMP_MAP_TARGET_PARAM = 0x20,
/// Signal that the runtime library has to return the device pointer
/// in the current position for the data being mapped. Used when we have the
- /// use_device_ptr clause.
+ /// use_device_ptr or use_device_addr clause.
OMP_MAP_RETURN_PARAM = 0x40,
/// This flag signals that the reference being passed is a pointer to
/// private data.
@@ -7112,26 +7096,30 @@ private:
ArrayRef<OpenMPMapModifierKind> MapModifiers;
bool ReturnDevicePointer = false;
bool IsImplicit = false;
+ bool ForDeviceAddr = false;
MapInfo() = default;
MapInfo(
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
- bool ReturnDevicePointer, bool IsImplicit)
+ ArrayRef<OpenMPMapModifierKind> MapModifiers, bool ReturnDevicePointer,
+ bool IsImplicit, bool ForDeviceAddr = false)
: Components(Components), MapType(MapType), MapModifiers(MapModifiers),
- ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
+ ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit),
+ ForDeviceAddr(ForDeviceAddr) {}
};
- /// If use_device_ptr is used on a pointer which is a struct member and there
- /// is no map information about it, then emission of that entry is deferred
- /// until the whole struct has been processed.
+ /// If use_device_ptr or use_device_addr is used on a decl which is a struct
+ /// member and there is no map information about it, then emission of that
+ /// entry is deferred until the whole struct has been processed.
struct DeferredDevicePtrEntryTy {
const Expr *IE = nullptr;
const ValueDecl *VD = nullptr;
+ bool ForDeviceAddr = false;
- DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD)
- : IE(IE), VD(VD) {}
+ DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD,
+ bool ForDeviceAddr)
+ : IE(IE), VD(VD), ForDeviceAddr(ForDeviceAddr) {}
};
/// The target directive from where the mappable clauses were extracted. It
@@ -7158,6 +7146,20 @@ private:
llvm::Value *getExprTypeSize(const Expr *E) const {
QualType ExprTy = E->getType().getCanonicalType();
+ // Calculate the size for array shaping expression.
+ if (const auto *OAE = dyn_cast<OMPArrayShapingExpr>(E)) {
+ llvm::Value *Size =
+ CGF.getTypeSize(OAE->getBase()->getType()->getPointeeType());
+ for (const Expr *SE : OAE->getDimensions()) {
+ llvm::Value *Sz = CGF.EmitScalarExpr(SE);
+ Sz = CGF.EmitScalarConversion(Sz, SE->getType(),
+ CGF.getContext().getSizeType(),
+ SE->getExprLoc());
+ Size = CGF.Builder.CreateNUWMul(Size, Sz);
+ }
+ return Size;
+ }
+
// Reference types are ignored for mapping purposes.
if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
ExprTy = RefTy->getPointeeType().getCanonicalType();
@@ -7173,7 +7175,7 @@ private:
// If there is no length associated with the expression and lower bound is
// not specified too, that means we are using the whole length of the
// base.
- if (!OAE->getLength() && OAE->getColonLoc().isValid() &&
+ if (!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
!OAE->getLowerBound())
return CGF.getTypeSize(BaseTy);
@@ -7188,7 +7190,7 @@ private:
// If we don't have a length at this point, that is because we have an
// array section with a single element.
- if (!OAE->getLength() && OAE->getColonLoc().isInvalid())
+ if (!OAE->getLength() && OAE->getColonLocFirst().isInvalid())
return ElemSize;
if (const Expr *LenExpr = OAE->getLength()) {
@@ -7198,7 +7200,7 @@ private:
LenExpr->getExprLoc());
return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
}
- assert(!OAE->getLength() && OAE->getColonLoc().isValid() &&
+ assert(!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
OAE->getLowerBound() && "expected array_section[lb:].");
// Size = sizetype - lb * elemtype;
llvm::Value *LengthVal = CGF.getTypeSize(BaseTy);
@@ -7271,7 +7273,7 @@ private:
return false;
// An array section with no colon always refer to a single element.
- if (OASE->getColonLoc().isInvalid())
+ if (OASE->getColonLocFirst().isInvalid())
return false;
const Expr *Length = OASE->getLength();
@@ -7305,13 +7307,12 @@ private:
/// \a IsFirstComponent should be set to true if the provided set of
/// components is the first associated with a capture.
void generateInfoForComponentList(
- OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
- bool IsImplicit,
+ bool IsImplicit, bool ForDeviceAddr = false,
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedElements = llvm::None) const {
// The following summarizes what has to be generated for each map and the
@@ -7489,6 +7490,7 @@ private:
const Expr *AssocExpr = I->getAssociatedExpression();
const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
+ const auto *OAShE = dyn_cast<OMPArrayShapingExpr>(AssocExpr);
if (isa<MemberExpr>(AssocExpr)) {
// The base is the 'this' pointer. The content of the pointer is going
@@ -7498,6 +7500,11 @@ private:
(OASE &&
isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
+ } else if (OAShE &&
+ isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
+ BP = Address(
+ CGF.EmitScalarExpr(OAShE->getBase()),
+ CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType()));
} else {
// The base is the reference to the variable.
// BP = &Var.
@@ -7580,29 +7587,44 @@ private:
// types.
const auto *OASE =
dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
+ const auto *OAShE =
+ dyn_cast<OMPArrayShapingExpr>(I->getAssociatedExpression());
+ const auto *UO = dyn_cast<UnaryOperator>(I->getAssociatedExpression());
+ const auto *BO = dyn_cast<BinaryOperator>(I->getAssociatedExpression());
bool IsPointer =
+ OAShE ||
(OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
.getCanonicalType()
->isAnyPointerType()) ||
I->getAssociatedExpression()->getType()->isAnyPointerType();
+ bool IsNonDerefPointer = IsPointer && !UO && !BO;
- if (Next == CE || IsPointer || IsFinalArraySection) {
+ if (Next == CE || IsNonDerefPointer || IsFinalArraySection) {
// If this is not the last component, we expect the pointer to be
// associated with an array expression or member expression.
assert((Next == CE ||
isa<MemberExpr>(Next->getAssociatedExpression()) ||
isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
+ isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) ||
+ isa<UnaryOperator>(Next->getAssociatedExpression()) ||
+ isa<BinaryOperator>(Next->getAssociatedExpression())) &&
"Unexpected expression");
- Address LB = CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
- .getAddress(CGF);
+ Address LB = Address::invalid();
+ if (OAShE) {
+ LB = Address(CGF.EmitScalarExpr(OAShE->getBase()),
+ CGF.getContext().getTypeAlignInChars(
+ OAShE->getBase()->getType()));
+ } else {
+ LB = CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
+ .getAddress(CGF);
+ }
// If this component is a pointer inside the base struct then we don't
// need to create any entry for it - it will be combined with the object
// it is pointing to into a single PTR_AND_OBJ entry.
- bool IsMemberPointer =
- IsPointer && EncounteredME &&
+ bool IsMemberPointerOrAddr =
+ (IsPointer || ForDeviceAddr) && EncounteredME &&
(dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
EncounteredME);
if (!OverlappedElements.empty()) {
@@ -7669,7 +7691,7 @@ private:
break;
}
llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
- if (!IsMemberPointer) {
+ if (!IsMemberPointerOrAddr) {
BasePointers.push_back(BP.getPointer());
Pointers.push_back(LB.getPointer());
Sizes.push_back(
@@ -7708,13 +7730,20 @@ private:
// mapped member. If the parent is "*this", then the value declaration
// is nullptr.
if (EncounteredME) {
- const auto *FD = dyn_cast<FieldDecl>(EncounteredME->getMemberDecl());
+ const auto *FD = cast<FieldDecl>(EncounteredME->getMemberDecl());
unsigned FieldIndex = FD->getFieldIndex();
// Update info about the lowest and highest elements for this struct
if (!PartialStruct.Base.isValid()) {
PartialStruct.LowestElem = {FieldIndex, LB};
- PartialStruct.HighestElem = {FieldIndex, LB};
+ if (IsFinalArraySection) {
+ Address HB =
+ CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
+ .getAddress(CGF);
+ PartialStruct.HighestElem = {FieldIndex, HB};
+ } else {
+ PartialStruct.HighestElem = {FieldIndex, LB};
+ }
PartialStruct.Base = BP;
} else if (FieldIndex < PartialStruct.LowestElem.first) {
PartialStruct.LowestElem = {FieldIndex, LB};
@@ -7851,6 +7880,19 @@ public:
for (const auto *D : C->varlists())
FirstPrivateDecls.try_emplace(
cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
+ // Extract implicit firstprivates from uses_allocators clauses.
+ for (const auto *C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
+ if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
+ FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
+ /*Implicit=*/true);
+ else if (const auto *VD = dyn_cast<VarDecl>(
+ cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
+ ->getDecl()))
+ FirstPrivateDecls.try_emplace(VD, /*Implicit=*/true);
+ }
+ }
// Extract device pointer clause information.
for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
for (auto L : C->component_lists())
@@ -7910,17 +7952,18 @@ public:
// Helper function to fill the information map for the different supported
// clauses.
- auto &&InfoGen = [&Info](
- const ValueDecl *D,
- OMPClauseMappableExprCommon::MappableExprComponentListRef L,
- OpenMPMapClauseKind MapType,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
- bool ReturnDevicePointer, bool IsImplicit) {
- const ValueDecl *VD =
- D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- Info[VD].emplace_back(L, MapType, MapModifiers, ReturnDevicePointer,
- IsImplicit);
- };
+ auto &&InfoGen =
+ [&Info](const ValueDecl *D,
+ OMPClauseMappableExprCommon::MappableExprComponentListRef L,
+ OpenMPMapClauseKind MapType,
+ ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ bool ReturnDevicePointer, bool IsImplicit,
+ bool ForDeviceAddr = false) {
+ const ValueDecl *VD =
+ D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
+ Info[VD].emplace_back(L, MapType, MapModifiers, ReturnDevicePointer,
+ IsImplicit, ForDeviceAddr);
+ };
assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive");
@@ -7990,7 +8033,7 @@ public:
// partial struct.
InfoGen(nullptr, L.second, OMPC_MAP_unknown, llvm::None,
/*ReturnDevicePointer=*/false, C->isImplicit());
- DeferredInfo[nullptr].emplace_back(IE, VD);
+ DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/false);
} else {
llvm::Value *Ptr =
CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
@@ -8002,6 +8045,70 @@ public:
}
}
+ // Look at the use_device_addr clause information and mark the existing map
+ // entries as such. If there is no map information for an entry in the
+ // use_device_addr list, we create one with map type 'alloc' and zero size
+ // section. It is the user fault if that was not mapped before. If there is
+ // no map information and the pointer is a struct member, then we defer the
+ // emission of that entry until the whole struct has been processed.
+ llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
+ for (const auto *C :
+ CurExecDir->getClausesOfKind<OMPUseDeviceAddrClause>()) {
+ for (const auto L : C->component_lists()) {
+ assert(!L.second.empty() && "Not expecting empty list of components!");
+ const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
+ if (!Processed.insert(VD).second)
+ continue;
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ const Expr *IE = L.second.back().getAssociatedExpression();
+ // If the first component is a member expression, we have to look into
+ // 'this', which maps to null in the map of map information. Otherwise
+ // look directly for the information.
+ auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
+
+ // We potentially have map information for this declaration already.
+ // Look for the first set of components that refer to it.
+ if (It != Info.end()) {
+ auto *CI = llvm::find_if(It->second, [VD](const MapInfo &MI) {
+ return MI.Components.back().getAssociatedDeclaration() == VD;
+ });
+ // If we found a map entry, signal that the pointer has to be returned
+ // and move on to the next declaration.
+ if (CI != It->second.end()) {
+ CI->ReturnDevicePointer = true;
+ continue;
+ }
+ }
+
+ // We didn't find any match in our map information - generate a zero
+ // size array section - if the pointer is a struct member we defer this
+ // action until the whole struct has been processed.
+ if (isa<MemberExpr>(IE)) {
+ // Insert the pointer into Info to be processed by
+ // generateInfoForComponentList. Because it is a member pointer
+ // without a pointee, no entry will be generated for it, therefore
+ // we need to generate one after the whole struct has been processed.
+ // Nonetheless, generateInfoForComponentList must be called to take
+ // the pointer into account for the calculation of the range of the
+ // partial struct.
+ InfoGen(nullptr, L.second, OMPC_MAP_unknown, llvm::None,
+ /*ReturnDevicePointer=*/false, C->isImplicit(),
+ /*ForDeviceAddr=*/true);
+ DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/true);
+ } else {
+ llvm::Value *Ptr;
+ if (IE->isGLValue())
+ Ptr = CGF.EmitLValue(IE).getPointer(CGF);
+ else
+ Ptr = CGF.EmitScalarExpr(IE);
+ BasePointers.emplace_back(Ptr, VD);
+ Pointers.push_back(Ptr);
+ Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
+ Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
+ }
+ }
+ }
+
for (const auto &M : Info) {
// We need to know when we generate information for the first component
// associated with a capture, because the mapping flags depend on it.
@@ -8020,10 +8127,10 @@ public:
// Remember the current base pointer index.
unsigned CurrentBasePointersIdx = CurBasePointers.size();
- generateInfoForComponentList(L.MapType, L.MapModifiers, L.Components,
- CurBasePointers, CurPointers, CurSizes,
- CurTypes, PartialStruct,
- IsFirstComponentList, L.IsImplicit);
+ generateInfoForComponentList(
+ L.MapType, L.MapModifiers, L.Components, CurBasePointers,
+ CurPointers, CurSizes, CurTypes, PartialStruct,
+ IsFirstComponentList, L.IsImplicit, L.ForDeviceAddr);
// If this entry relates with a device pointer, set the relevant
// declaration and add the 'return pointer' flag.
@@ -8043,21 +8150,35 @@ public:
}
// Append any pending zero-length pointers which are struct members and
- // used with use_device_ptr.
+ // used with use_device_ptr or use_device_addr.
auto CI = DeferredInfo.find(M.first);
if (CI != DeferredInfo.end()) {
for (const DeferredDevicePtrEntryTy &L : CI->second) {
- llvm::Value *BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
- llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
- this->CGF.EmitLValue(L.IE), L.IE->getExprLoc());
+ llvm::Value *BasePtr;
+ llvm::Value *Ptr;
+ if (L.ForDeviceAddr) {
+ if (L.IE->isGLValue())
+ Ptr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
+ else
+ Ptr = this->CGF.EmitScalarExpr(L.IE);
+ BasePtr = Ptr;
+ // Entry is RETURN_PARAM. Also, set the placeholder value
+ // MEMBER_OF=FFFF so that the entry is later updated with the
+ // correct value of MEMBER_OF.
+ CurTypes.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF);
+ } else {
+ BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
+ Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
+ L.IE->getExprLoc());
+ // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
+ // value MEMBER_OF=FFFF so that the entry is later updated with the
+ // correct value of MEMBER_OF.
+ CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
+ OMP_MAP_MEMBER_OF);
+ }
CurBasePointers.emplace_back(BasePtr, L.VD);
CurPointers.push_back(Ptr);
CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.Int64Ty));
- // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
- // value MEMBER_OF=FFFF so that the entry is later updated with the
- // correct value of MEMBER_OF.
- CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
- OMP_MAP_MEMBER_OF);
}
}
@@ -8126,10 +8247,10 @@ public:
for (const MapInfo &L : M.second) {
assert(!L.Components.empty() &&
"Not expecting declaration with no component lists.");
- generateInfoForComponentList(L.MapType, L.MapModifiers, L.Components,
- CurBasePointers, CurPointers, CurSizes,
- CurTypes, PartialStruct,
- IsFirstComponentList, L.IsImplicit);
+ generateInfoForComponentList(
+ L.MapType, L.MapModifiers, L.Components, CurBasePointers,
+ CurPointers, CurSizes, CurTypes, PartialStruct,
+ IsFirstComponentList, L.IsImplicit, L.ForDeviceAddr);
IsFirstComponentList = false;
}
@@ -8395,10 +8516,10 @@ public:
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedComponents = Pair.getSecond();
bool IsFirstComponentList = true;
- generateInfoForComponentList(MapType, MapModifiers, Components,
- BasePointers, Pointers, Sizes, Types,
- PartialStruct, IsFirstComponentList,
- IsImplicit, OverlappedComponents);
+ generateInfoForComponentList(
+ MapType, MapModifiers, Components, BasePointers, Pointers, Sizes,
+ Types, PartialStruct, IsFirstComponentList, IsImplicit,
+ /*ForDeviceAddr=*/false, OverlappedComponents);
}
// Go through other elements without overlapped elements.
bool IsFirstComponentList = OverlappedData.empty();
@@ -8759,6 +8880,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -8774,6 +8897,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -8786,6 +8911,7 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unexpected directive.");
}
}
@@ -8935,7 +9061,9 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// pre-existing components.
llvm::Value *OffloadingArgs[] = {Handle};
llvm::Value *PreviousSize = MapperCGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__tgt_mapper_num_components), OffloadingArgs);
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___tgt_mapper_num_components),
+ OffloadingArgs);
llvm::Value *ShiftedPreviousSize = MapperCGF.Builder.CreateShl(
PreviousSize,
MapperCGF.Builder.getInt64(MappableExprsHandler::getFlagMemberOffset()));
@@ -9041,7 +9169,8 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg,
CurSizeArg, CurMapType};
MapperCGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__tgt_push_mapper_component),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_push_mapper_component),
OffloadingArgs);
}
@@ -9085,8 +9214,9 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
// Evaluate if this is an array section.
llvm::BasicBlock *IsDeleteBB =
- MapperCGF.createBasicBlock("omp.array" + Prefix + ".evaldelete");
- llvm::BasicBlock *BodyBB = MapperCGF.createBasicBlock("omp.array" + Prefix);
+ MapperCGF.createBasicBlock(getName({"omp.array", Prefix, ".evaldelete"}));
+ llvm::BasicBlock *BodyBB =
+ MapperCGF.createBasicBlock(getName({"omp.array", Prefix}));
llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGE(
Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray");
MapperCGF.Builder.CreateCondBr(IsArray, IsDeleteBB, ExitBB);
@@ -9099,10 +9229,10 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
llvm::Value *DeleteCond;
if (IsInit) {
DeleteCond = MapperCGF.Builder.CreateIsNull(
- DeleteBit, "omp.array" + Prefix + ".delete");
+ DeleteBit, getName({"omp.array", Prefix, ".delete"}));
} else {
DeleteCond = MapperCGF.Builder.CreateIsNotNull(
- DeleteBit, "omp.array" + Prefix + ".delete");
+ DeleteBit, getName({"omp.array", Prefix, ".delete"}));
}
MapperCGF.Builder.CreateCondBr(DeleteCond, BodyBB, ExitBB);
@@ -9121,7 +9251,9 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
// data structure.
llvm::Value *OffloadingArgs[] = {Handle, Base, Begin, ArraySize, MapTypeArg};
MapperCGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__tgt_push_mapper_component), OffloadingArgs);
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___tgt_push_mapper_component),
+ OffloadingArgs);
}
void CGOpenMPRuntime::emitTargetNumIterationsCall(
@@ -9143,7 +9275,9 @@ void CGOpenMPRuntime::emitTargetNumIterationsCall(
if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD)) {
llvm::Value *Args[] = {DeviceID, NumIterations};
CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_push_target_tripcount), Args);
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_push_target_tripcount),
+ Args);
}
};
emitInlinedDirective(CGF, OMPD_unknown, CodeGen);
@@ -9152,7 +9286,7 @@ void CGOpenMPRuntime::emitTargetNumIterationsCall(
void CGOpenMPRuntime::emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
- const Expr *Device,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) {
@@ -9176,6 +9310,16 @@ void CGOpenMPRuntime::emitTargetCall(
auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
&MapTypesArray, &CS, RequiresOuterTask, &CapturedVars,
SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
+ if (Device.getInt() == OMPC_DEVICE_ancestor) {
+ // Reverse offloading is not supported, so just execute on the host.
+ if (RequiresOuterTask) {
+ CapturedVars.clear();
+ CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
+ }
+ emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
+ return;
+ }
+
// On top of the arrays that were filled up, the target offloading call
// takes as arguments the device id as well as the host pointer. The host
// pointer is used by the runtime library to identify the current target
@@ -9190,9 +9334,13 @@ void CGOpenMPRuntime::emitTargetCall(
// Emit device ID if any.
llvm::Value *DeviceID;
- if (Device) {
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
+ if (Device.getPointer()) {
+ assert((Device.getInt() == OMPC_DEVICE_unknown ||
+ Device.getInt() == OMPC_DEVICE_device_num) &&
+ "Expected device_num modifier.");
+ llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
+ DeviceID =
+ CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
} else {
DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
}
@@ -9256,8 +9404,9 @@ void CGOpenMPRuntime::emitTargetCall(
NumTeams,
NumThreads};
Return = CGF.EmitRuntimeCall(
- createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
- : OMPRTL__tgt_target_teams),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), HasNowait ? OMPRTL___tgt_target_teams_nowait
+ : OMPRTL___tgt_target_teams),
OffloadingArgs);
} else {
llvm::Value *OffloadingArgs[] = {DeviceID,
@@ -9268,8 +9417,9 @@ void CGOpenMPRuntime::emitTargetCall(
InputInfo.SizesArray.getPointer(),
MapTypesArray};
Return = CGF.EmitRuntimeCall(
- createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
- : OMPRTL__tgt_target),
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(),
+ HasNowait ? OMPRTL___tgt_target_nowait : OMPRTL___tgt_target),
OffloadingArgs);
}
@@ -9521,6 +9671,8 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -9536,6 +9688,8 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -9548,6 +9702,7 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown target directive for OpenMP device codegen.");
}
return;
@@ -9774,22 +9929,40 @@ void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
" Expected target-based directive.");
}
-void CGOpenMPRuntime::checkArchForUnifiedAddressing(
- const OMPRequiresDecl *D) {
+void CGOpenMPRuntime::processRequiresDirective(const OMPRequiresDecl *D) {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
HasRequiresUnifiedSharedMemory = true;
- break;
+ } else if (const auto *AC =
+ dyn_cast<OMPAtomicDefaultMemOrderClause>(Clause)) {
+ switch (AC->getAtomicDefaultMemOrderKind()) {
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_acq_rel:
+ RequiresAtomicOrdering = llvm::AtomicOrdering::AcquireRelease;
+ break;
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_seq_cst:
+ RequiresAtomicOrdering = llvm::AtomicOrdering::SequentiallyConsistent;
+ break;
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_relaxed:
+ RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
+ break;
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown:
+ break;
+ }
}
}
}
+llvm::AtomicOrdering CGOpenMPRuntime::getDefaultMemoryOrdering() const {
+ return RequiresAtomicOrdering;
+}
+
bool CGOpenMPRuntime::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
LangAS &AS) {
if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
return false;
const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
switch(A->getAllocatorType()) {
+ case OMPAllocateDeclAttr::OMPNullMemAlloc:
case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
// Not supported, fallback to the default mem space.
case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
@@ -9865,7 +10038,7 @@ llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
const auto &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
std::string ReqName = getName({"omp_offloading", "requires_reg"});
- RequiresRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, ReqName, FI);
+ RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI);
CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE;
// TODO: check for other requires clauses.
@@ -9880,8 +10053,9 @@ llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
"Target or declare target region expected.");
if (HasRequiresUnifiedSharedMemory)
Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_requires),
- llvm::ConstantInt::get(CGM.Int64Ty, Flags));
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_register_requires),
+ llvm::ConstantInt::get(CGM.Int64Ty, Flags));
CGF.FinishFunction();
}
return RequiresRegFn;
@@ -9907,7 +10081,8 @@ void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
RealArgs.append(std::begin(Args), std::end(Args));
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
- llvm::FunctionCallee RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
+ llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_fork_teams);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
}
@@ -9935,7 +10110,8 @@ void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
// Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
ThreadLimitVal};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_teams),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_push_num_teams),
PushNumTeamsArgs);
}
@@ -9989,7 +10165,8 @@ void CGOpenMPRuntime::emitTargetDataCalls(
llvm::Value *OffloadingArgs[] = {
DeviceID, PointerNum, BasePointersArrayArg,
PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_begin),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_target_data_begin),
OffloadingArgs);
// If device pointer privatization is required, emit the body of the region
@@ -10025,7 +10202,8 @@ void CGOpenMPRuntime::emitTargetDataCalls(
llvm::Value *OffloadingArgs[] = {
DeviceID, PointerNum, BasePointersArrayArg,
PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_end),
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___tgt_target_data_end),
OffloadingArgs);
};
@@ -10105,19 +10283,19 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
// Select the right runtime function call for each expected standalone
// directive.
const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
- OpenMPRTLFunction RTLFn;
+ RuntimeFunction RTLFn;
switch (D.getDirectiveKind()) {
case OMPD_target_enter_data:
- RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait
- : OMPRTL__tgt_target_data_begin;
+ RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait
+ : OMPRTL___tgt_target_data_begin;
break;
case OMPD_target_exit_data:
- RTLFn = HasNowait ? OMPRTL__tgt_target_data_end_nowait
- : OMPRTL__tgt_target_data_end;
+ RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait
+ : OMPRTL___tgt_target_data_end;
break;
case OMPD_target_update:
- RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait
- : OMPRTL__tgt_target_data_update;
+ RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait
+ : OMPRTL___tgt_target_data_update;
break;
case OMPD_parallel:
case OMPD_for:
@@ -10144,6 +10322,8 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_distribute:
@@ -10156,6 +10336,8 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -10178,10 +10360,13 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_target_parallel_for_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unexpected standalone target data directive.");
break;
}
- CGF.EmitRuntimeCall(createRuntimeFunction(RTLFn), OffloadingArgs);
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), RTLFn),
+ OffloadingArgs);
};
auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray](
@@ -10343,7 +10528,7 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
break;
case Linear:
Out << 'l';
- if (!!ParamAttr.StrideOrArg)
+ if (ParamAttr.StrideOrArg != 1)
Out << ParamAttr.StrideOrArg;
break;
case Uniform:
@@ -10420,7 +10605,7 @@ static bool getAArch64PBV(QualType QT, ASTContext &C) {
/// as defined by `LS(P)` in 3.2.1 of the AAVFABI.
/// TODO: Add support for references, section 3.2.1, item 1.
static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) {
- if (getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
+ if (!getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
QualType PTy = QT.getCanonicalType()->getPointeeType();
if (getAArch64PBV(PTy, C))
return C.getTypeSize(PTy);
@@ -10483,7 +10668,7 @@ static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
Out << 'l';
// Don't print the step value if it is not present or if it is
// equal to 1.
- if (!!ParamAttr.StrideOrArg && ParamAttr.StrideOrArg != 1)
+ if (ParamAttr.StrideOrArg != 1)
Out << ParamAttr.StrideOrArg;
break;
case Uniform:
@@ -10498,7 +10683,7 @@ static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
Out << 'a' << ParamAttr.Alignment;
}
- return Out.str();
+ return std::string(Out.str());
}
// Function used to add the attribute. The parameter `VLEN` is
@@ -10721,15 +10906,24 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
for (const Expr *E : Attr->linears()) {
E = E->IgnoreParenImpCasts();
unsigned Pos;
+ // Rescaling factor needed to compute the linear parameter
+ // value in the mangled name.
+ unsigned PtrRescalingFactor = 1;
if (isa<CXXThisExpr>(E)) {
Pos = ParamPositions[FD];
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
Pos = ParamPositions[PVD];
+ if (auto *P = dyn_cast<PointerType>(PVD->getType()))
+ PtrRescalingFactor = CGM.getContext()
+ .getTypeSizeInChars(P->getPointeeType())
+ .getQuantity();
}
ParamAttrTy &ParamAttr = ParamAttrs[Pos];
ParamAttr.Kind = Linear;
+ // Assuming a stride of 1, for `linear` without modifiers.
+ ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
if (*SI) {
Expr::EvalResult Result;
if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
@@ -10745,6 +10939,11 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
ParamAttr.StrideOrArg = Result.Val.getInt();
}
}
+ // If we are using a linear clause on a pointer, we need to
+ // rescale the value of linear_step with the byte size of the
+ // pointee type.
+ if (Linear == ParamAttr.Kind)
+ ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
++SI;
++MI;
}
@@ -10837,10 +11036,9 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
// dims.upper = num_iterations;
LValue UpperLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), UpperFD));
- llvm::Value *NumIterVal =
- CGF.EmitScalarConversion(CGF.EmitScalarExpr(NumIterations[I]),
- D.getNumIterations()->getType(), Int64Ty,
- D.getNumIterations()->getExprLoc());
+ llvm::Value *NumIterVal = CGF.EmitScalarConversion(
+ CGF.EmitScalarExpr(NumIterations[I]), NumIterations[I]->getType(),
+ Int64Ty, NumIterations[I]->getExprLoc());
CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
// dims.stride = 1;
LValue StrideLVal = CGF.EmitLValueForField(
@@ -10859,13 +11057,13 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
CGM.VoidPtrTy)};
- llvm::FunctionCallee RTLFn =
- createRuntimeFunction(OMPRTL__kmpc_doacross_init);
+ llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_doacross_init);
CGF.EmitRuntimeCall(RTLFn, Args);
llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
- llvm::FunctionCallee FiniRTLFn =
- createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
+ llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_doacross_fini);
CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
llvm::makeArrayRef(FiniArgs));
}
@@ -10893,10 +11091,12 @@ void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
llvm::FunctionCallee RTLFn;
if (C->getDependencyKind() == OMPC_DEPEND_source) {
- RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
+ RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_doacross_post);
} else {
assert(C->getDependencyKind() == OMPC_DEPEND_sink);
- RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait);
+ RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
+ OMPRTL___kmpc_doacross_wait);
}
CGF.EmitRuntimeCall(RTLFn, Args);
}
@@ -10969,7 +11169,8 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
return Address::invalid();
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
// Use the default allocation.
- if (AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
+ if ((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
+ AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
!AA->getAllocator())
return Address::invalid();
llvm::Value *Size;
@@ -10999,296 +11200,23 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
llvm::Value *Args[] = {ThreadID, Size, Allocator};
llvm::Value *Addr =
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_alloc), Args,
- CVD->getName() + ".void.addr");
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_alloc),
+ Args, getName({CVD->getName(), ".void.addr"}));
llvm::Value *FiniArgs[OMPAllocateCleanupTy::CleanupArgs] = {ThreadID, Addr,
Allocator};
- llvm::FunctionCallee FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_free);
+ llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_free);
CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
llvm::makeArrayRef(FiniArgs));
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr,
CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
- CVD->getName() + ".addr");
+ getName({CVD->getName(), ".addr"}));
return Address(Addr, Align);
}
-namespace {
-using OMPContextSelectorData =
- OpenMPCtxSelectorData<ArrayRef<StringRef>, llvm::APSInt>;
-using CompleteOMPContextSelectorData = SmallVector<OMPContextSelectorData, 4>;
-} // anonymous namespace
-
-/// Checks current context and returns true if it matches the context selector.
-template <OpenMPContextSelectorSetKind CtxSet, OpenMPContextSelectorKind Ctx,
- typename... Arguments>
-static bool checkContext(const OMPContextSelectorData &Data,
- Arguments... Params) {
- assert(Data.CtxSet != OMP_CTX_SET_unknown && Data.Ctx != OMP_CTX_unknown &&
- "Unknown context selector or context selector set.");
- return false;
-}
-
-/// Checks for implementation={vendor(<vendor>)} context selector.
-/// \returns true iff <vendor>="llvm", false otherwise.
-template <>
-bool checkContext<OMP_CTX_SET_implementation, OMP_CTX_vendor>(
- const OMPContextSelectorData &Data) {
- return llvm::all_of(Data.Names,
- [](StringRef S) { return !S.compare_lower("llvm"); });
-}
-
-/// Checks for device={kind(<kind>)} context selector.
-/// \returns true if <kind>="host" and compilation is for host.
-/// true if <kind>="nohost" and compilation is for device.
-/// true if <kind>="cpu" and compilation is for Arm, X86 or PPC CPU.
-/// true if <kind>="gpu" and compilation is for NVPTX or AMDGCN.
-/// false otherwise.
-template <>
-bool checkContext<OMP_CTX_SET_device, OMP_CTX_kind, CodeGenModule &>(
- const OMPContextSelectorData &Data, CodeGenModule &CGM) {
- for (StringRef Name : Data.Names) {
- if (!Name.compare_lower("host")) {
- if (CGM.getLangOpts().OpenMPIsDevice)
- return false;
- continue;
- }
- if (!Name.compare_lower("nohost")) {
- if (!CGM.getLangOpts().OpenMPIsDevice)
- return false;
- continue;
- }
- switch (CGM.getTriple().getArch()) {
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- case llvm::Triple::aarch64_32:
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- if (Name.compare_lower("cpu"))
- return false;
- break;
- case llvm::Triple::amdgcn:
- case llvm::Triple::nvptx:
- case llvm::Triple::nvptx64:
- if (Name.compare_lower("gpu"))
- return false;
- break;
- case llvm::Triple::UnknownArch:
- case llvm::Triple::arc:
- case llvm::Triple::avr:
- case llvm::Triple::bpfel:
- case llvm::Triple::bpfeb:
- case llvm::Triple::hexagon:
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- case llvm::Triple::msp430:
- case llvm::Triple::r600:
- case llvm::Triple::riscv32:
- case llvm::Triple::riscv64:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcv9:
- case llvm::Triple::sparcel:
- case llvm::Triple::systemz:
- case llvm::Triple::tce:
- case llvm::Triple::tcele:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::xcore:
- case llvm::Triple::le32:
- case llvm::Triple::le64:
- case llvm::Triple::amdil:
- case llvm::Triple::amdil64:
- case llvm::Triple::hsail:
- case llvm::Triple::hsail64:
- case llvm::Triple::spir:
- case llvm::Triple::spir64:
- case llvm::Triple::kalimba:
- case llvm::Triple::shave:
- case llvm::Triple::lanai:
- case llvm::Triple::wasm32:
- case llvm::Triple::wasm64:
- case llvm::Triple::renderscript32:
- case llvm::Triple::renderscript64:
- case llvm::Triple::ve:
- return false;
- }
- }
- return true;
-}
-
-static bool matchesContext(CodeGenModule &CGM,
- const CompleteOMPContextSelectorData &ContextData) {
- for (const OMPContextSelectorData &Data : ContextData) {
- switch (Data.Ctx) {
- case OMP_CTX_vendor:
- assert(Data.CtxSet == OMP_CTX_SET_implementation &&
- "Expected implementation context selector set.");
- if (!checkContext<OMP_CTX_SET_implementation, OMP_CTX_vendor>(Data))
- return false;
- break;
- case OMP_CTX_kind:
- assert(Data.CtxSet == OMP_CTX_SET_device &&
- "Expected device context selector set.");
- if (!checkContext<OMP_CTX_SET_device, OMP_CTX_kind, CodeGenModule &>(Data,
- CGM))
- return false;
- break;
- case OMP_CTX_unknown:
- llvm_unreachable("Unknown context selector kind.");
- }
- }
- return true;
-}
-
-static CompleteOMPContextSelectorData
-translateAttrToContextSelectorData(ASTContext &C,
- const OMPDeclareVariantAttr *A) {
- CompleteOMPContextSelectorData Data;
- for (unsigned I = 0, E = A->scores_size(); I < E; ++I) {
- Data.emplace_back();
- auto CtxSet = static_cast<OpenMPContextSelectorSetKind>(
- *std::next(A->ctxSelectorSets_begin(), I));
- auto Ctx = static_cast<OpenMPContextSelectorKind>(
- *std::next(A->ctxSelectors_begin(), I));
- Data.back().CtxSet = CtxSet;
- Data.back().Ctx = Ctx;
- const Expr *Score = *std::next(A->scores_begin(), I);
- Data.back().Score = Score->EvaluateKnownConstInt(C);
- switch (Ctx) {
- case OMP_CTX_vendor:
- assert(CtxSet == OMP_CTX_SET_implementation &&
- "Expected implementation context selector set.");
- Data.back().Names =
- llvm::makeArrayRef(A->implVendors_begin(), A->implVendors_end());
- break;
- case OMP_CTX_kind:
- assert(CtxSet == OMP_CTX_SET_device &&
- "Expected device context selector set.");
- Data.back().Names =
- llvm::makeArrayRef(A->deviceKinds_begin(), A->deviceKinds_end());
- break;
- case OMP_CTX_unknown:
- llvm_unreachable("Unknown context selector kind.");
- }
- }
- return Data;
-}
-
-static bool isStrictSubset(const CompleteOMPContextSelectorData &LHS,
- const CompleteOMPContextSelectorData &RHS) {
- llvm::SmallDenseMap<std::pair<int, int>, llvm::StringSet<>, 4> RHSData;
- for (const OMPContextSelectorData &D : RHS) {
- auto &Pair = RHSData.FindAndConstruct(std::make_pair(D.CtxSet, D.Ctx));
- Pair.getSecond().insert(D.Names.begin(), D.Names.end());
- }
- bool AllSetsAreEqual = true;
- for (const OMPContextSelectorData &D : LHS) {
- auto It = RHSData.find(std::make_pair(D.CtxSet, D.Ctx));
- if (It == RHSData.end())
- return false;
- if (D.Names.size() > It->getSecond().size())
- return false;
- if (llvm::set_union(It->getSecond(), D.Names))
- return false;
- AllSetsAreEqual =
- AllSetsAreEqual && (D.Names.size() == It->getSecond().size());
- }
-
- return LHS.size() != RHS.size() || !AllSetsAreEqual;
-}
-
-static bool greaterCtxScore(const CompleteOMPContextSelectorData &LHS,
- const CompleteOMPContextSelectorData &RHS) {
- // Score is calculated as sum of all scores + 1.
- llvm::APSInt LHSScore(llvm::APInt(64, 1), /*isUnsigned=*/false);
- bool RHSIsSubsetOfLHS = isStrictSubset(RHS, LHS);
- if (RHSIsSubsetOfLHS) {
- LHSScore = llvm::APSInt::get(0);
- } else {
- for (const OMPContextSelectorData &Data : LHS) {
- if (Data.Score.getBitWidth() > LHSScore.getBitWidth()) {
- LHSScore = LHSScore.extend(Data.Score.getBitWidth()) + Data.Score;
- } else if (Data.Score.getBitWidth() < LHSScore.getBitWidth()) {
- LHSScore += Data.Score.extend(LHSScore.getBitWidth());
- } else {
- LHSScore += Data.Score;
- }
- }
- }
- llvm::APSInt RHSScore(llvm::APInt(64, 1), /*isUnsigned=*/false);
- if (!RHSIsSubsetOfLHS && isStrictSubset(LHS, RHS)) {
- RHSScore = llvm::APSInt::get(0);
- } else {
- for (const OMPContextSelectorData &Data : RHS) {
- if (Data.Score.getBitWidth() > RHSScore.getBitWidth()) {
- RHSScore = RHSScore.extend(Data.Score.getBitWidth()) + Data.Score;
- } else if (Data.Score.getBitWidth() < RHSScore.getBitWidth()) {
- RHSScore += Data.Score.extend(RHSScore.getBitWidth());
- } else {
- RHSScore += Data.Score;
- }
- }
- }
- return llvm::APSInt::compareValues(LHSScore, RHSScore) >= 0;
-}
-
-/// Finds the variant function that matches current context with its context
-/// selector.
-static const FunctionDecl *getDeclareVariantFunction(CodeGenModule &CGM,
- const FunctionDecl *FD) {
- if (!FD->hasAttrs() || !FD->hasAttr<OMPDeclareVariantAttr>())
- return FD;
- // Iterate through all DeclareVariant attributes and check context selectors.
- const OMPDeclareVariantAttr *TopMostAttr = nullptr;
- CompleteOMPContextSelectorData TopMostData;
- for (const auto *A : FD->specific_attrs<OMPDeclareVariantAttr>()) {
- CompleteOMPContextSelectorData Data =
- translateAttrToContextSelectorData(CGM.getContext(), A);
- if (!matchesContext(CGM, Data))
- continue;
- // If the attribute matches the context, find the attribute with the highest
- // score.
- if (!TopMostAttr || !greaterCtxScore(TopMostData, Data)) {
- TopMostAttr = A;
- TopMostData.swap(Data);
- }
- }
- if (!TopMostAttr)
- return FD;
- return cast<FunctionDecl>(
- cast<DeclRefExpr>(TopMostAttr->getVariantFuncRef()->IgnoreParenImpCasts())
- ->getDecl());
-}
-
-bool CGOpenMPRuntime::emitDeclareVariant(GlobalDecl GD, bool IsForDefinition) {
- const auto *D = cast<FunctionDecl>(GD.getDecl());
- // If the original function is defined already, use its definition.
- StringRef MangledName = CGM.getMangledName(GD);
- llvm::GlobalValue *Orig = CGM.GetGlobalValue(MangledName);
- if (Orig && !Orig->isDeclaration())
- return false;
- const FunctionDecl *NewFD = getDeclareVariantFunction(CGM, D);
- // Emit original function if it does not have declare variant attribute or the
- // context does not match.
- if (NewFD == D)
- return false;
- GlobalDecl NewGD = GD.getWithDecl(NewFD);
- if (tryEmitDeclareVariant(NewGD, GD, Orig, IsForDefinition)) {
- DeferredVariantFunction.erase(D);
- return true;
- }
- DeferredVariantFunction.insert(std::make_pair(D, std::make_pair(NewGD, GD)));
- return true;
-}
-
CGOpenMPRuntime::NontemporalDeclsRAII::NontemporalDeclsRAII(
CodeGenModule &CGM, const OMPLoopDirective &S)
: CGM(CGM), NeedToPush(S.hasClausesOfKind<OMPNontemporalClause>()) {
@@ -11329,17 +11257,101 @@ bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const {
[VD](const NontemporalDeclsSet &Set) { return Set.count(VD) > 0; });
}
+void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis(
+ const OMPExecutableDirective &S,
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled)
+ const {
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToCheckForLPCs;
+ // Vars in target/task regions must be excluded completely.
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()) ||
+ isOpenMPTaskingDirective(S.getDirectiveKind())) {
+ SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, S.getDirectiveKind());
+ const CapturedStmt *CS = S.getCapturedStmt(CaptureRegions.front());
+ for (const CapturedStmt::Capture &Cap : CS->captures()) {
+ if (Cap.capturesVariable() || Cap.capturesVariableByCopy())
+ NeedToCheckForLPCs.insert(Cap.getCapturedVar());
+ }
+ }
+ // Exclude vars in private clauses.
+ for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ NeedToCheckForLPCs.insert(DRE->getDecl());
+ }
+ }
+ for (const Decl *VD : NeedToCheckForLPCs) {
+ for (const LastprivateConditionalData &Data :
+ llvm::reverse(CGM.getOpenMPRuntime().LastprivateConditionalStack)) {
+ if (Data.DeclToUniqueName.count(VD) > 0) {
+ if (!Data.Disabled)
+ NeedToAddForLPCsAsDisabled.insert(VD);
+ break;
+ }
+ }
+ }
+}
+
CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal)
: CGM(CGF.CGM),
- NeedToPush(llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
- [](const OMPLastprivateClause *C) {
- return C->getKind() ==
- OMPC_LASTPRIVATE_conditional;
- })) {
+ Action((CGM.getLangOpts().OpenMP >= 50 &&
+ llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
+ [](const OMPLastprivateClause *C) {
+ return C->getKind() ==
+ OMPC_LASTPRIVATE_conditional;
+ }))
+ ? ActionToDo::PushAsLastprivateConditional
+ : ActionToDo::DoNotPush) {
assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
- if (!NeedToPush)
+ if (CGM.getLangOpts().OpenMP < 50 || Action == ActionToDo::DoNotPush)
return;
+ assert(Action == ActionToDo::PushAsLastprivateConditional &&
+ "Expected a push action.");
LastprivateConditionalData &Data =
CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
@@ -11347,107 +11359,136 @@ CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
continue;
for (const Expr *Ref : C->varlists()) {
- Data.DeclToUniqeName.try_emplace(
+ Data.DeclToUniqueName.insert(std::make_pair(
cast<DeclRefExpr>(Ref->IgnoreParenImpCasts())->getDecl(),
- generateUniqueName(CGM, "pl_cond", Ref));
+ SmallString<16>(generateUniqueName(CGM, "pl_cond", Ref))));
}
}
Data.IVLVal = IVLVal;
- // In simd only mode or for simd directives no need to generate threadprivate
- // references for the loop iteration counter, we can use the original one
- // since outlining cannot happen in simd regions.
- if (CGF.getLangOpts().OpenMPSimd ||
- isOpenMPSimdDirective(S.getDirectiveKind())) {
- Data.UseOriginalIV = true;
+ Data.Fn = CGF.CurFn;
+}
+
+CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
+ CodeGenFunction &CGF, const OMPExecutableDirective &S)
+ : CGM(CGF.CGM), Action(ActionToDo::DoNotPush) {
+ assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.");
+ if (CGM.getLangOpts().OpenMP < 50)
return;
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToAddForLPCsAsDisabled;
+ tryToDisableInnerAnalysis(S, NeedToAddForLPCsAsDisabled);
+ if (!NeedToAddForLPCsAsDisabled.empty()) {
+ Action = ActionToDo::DisableLastprivateConditional;
+ LastprivateConditionalData &Data =
+ CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
+ for (const Decl *VD : NeedToAddForLPCsAsDisabled)
+ Data.DeclToUniqueName.insert(std::make_pair(VD, SmallString<16>()));
+ Data.Fn = CGF.CurFn;
+ Data.Disabled = true;
}
- llvm::SmallString<16> Buffer;
- llvm::raw_svector_ostream OS(Buffer);
- PresumedLoc PLoc =
- CGM.getContext().getSourceManager().getPresumedLoc(S.getBeginLoc());
- assert(PLoc.isValid() && "Source location is expected to be always valid.");
+}
- llvm::sys::fs::UniqueID ID;
- if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
- CGM.getDiags().Report(diag::err_cannot_open_file)
- << PLoc.getFilename() << EC.message();
- OS << "$pl_cond_" << ID.getDevice() << "_" << ID.getFile() << "_"
- << PLoc.getLine() << "_" << PLoc.getColumn() << "$iv";
- Data.IVName = OS.str();
+CGOpenMPRuntime::LastprivateConditionalRAII
+CGOpenMPRuntime::LastprivateConditionalRAII::disable(
+ CodeGenFunction &CGF, const OMPExecutableDirective &S) {
+ return LastprivateConditionalRAII(CGF, S);
}
CGOpenMPRuntime::LastprivateConditionalRAII::~LastprivateConditionalRAII() {
- if (!NeedToPush)
+ if (CGM.getLangOpts().OpenMP < 50)
return;
- CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
+ if (Action == ActionToDo::DisableLastprivateConditional) {
+ assert(CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
+ "Expected list of disabled private vars.");
+ CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
+ }
+ if (Action == ActionToDo::PushAsLastprivateConditional) {
+ assert(
+ !CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
+ "Expected list of lastprivate conditional vars.");
+ CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
+ }
}
-void CGOpenMPRuntime::initLastprivateConditionalCounter(
- CodeGenFunction &CGF, const OMPExecutableDirective &S) {
- if (CGM.getLangOpts().OpenMPSimd ||
- !llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
- [](const OMPLastprivateClause *C) {
- return C->getKind() == OMPC_LASTPRIVATE_conditional;
- }))
- return;
- const CGOpenMPRuntime::LastprivateConditionalData &Data =
- LastprivateConditionalStack.back();
- if (Data.UseOriginalIV)
- return;
- // Global loop counter. Required to handle inner parallel-for regions.
- // global_iv = iv;
- Address GlobIVAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
- CGF, Data.IVLVal.getType(), Data.IVName);
- LValue GlobIVLVal = CGF.MakeAddrLValue(GlobIVAddr, Data.IVLVal.getType());
- llvm::Value *IVVal = CGF.EmitLoadOfScalar(Data.IVLVal, S.getBeginLoc());
- CGF.EmitStoreOfScalar(IVVal, GlobIVLVal);
+Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
+ const VarDecl *VD) {
+ ASTContext &C = CGM.getContext();
+ auto I = LastprivateConditionalToTypes.find(CGF.CurFn);
+ if (I == LastprivateConditionalToTypes.end())
+ I = LastprivateConditionalToTypes.try_emplace(CGF.CurFn).first;
+ QualType NewType;
+ const FieldDecl *VDField;
+ const FieldDecl *FiredField;
+ LValue BaseLVal;
+ auto VI = I->getSecond().find(VD);
+ if (VI == I->getSecond().end()) {
+ RecordDecl *RD = C.buildImplicitRecord("lasprivate.conditional");
+ RD->startDefinition();
+ VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
+ FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
+ RD->completeDefinition();
+ NewType = C.getRecordType(RD);
+ Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
+ BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
+ I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
+ } else {
+ NewType = std::get<0>(VI->getSecond());
+ VDField = std::get<1>(VI->getSecond());
+ FiredField = std::get<2>(VI->getSecond());
+ BaseLVal = std::get<3>(VI->getSecond());
+ }
+ LValue FiredLVal =
+ CGF.EmitLValueForField(BaseLVal, FiredField);
+ CGF.EmitStoreOfScalar(
+ llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
+ FiredLVal);
+ return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
}
namespace {
/// Checks if the lastprivate conditional variable is referenced in LHS.
class LastprivateConditionalRefChecker final
: public ConstStmtVisitor<LastprivateConditionalRefChecker, bool> {
- CodeGenFunction &CGF;
ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM;
const Expr *FoundE = nullptr;
const Decl *FoundD = nullptr;
StringRef UniqueDeclName;
LValue IVLVal;
- StringRef IVName;
+ llvm::Function *FoundFn = nullptr;
SourceLocation Loc;
- bool UseOriginalIV = false;
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
for (const CGOpenMPRuntime::LastprivateConditionalData &D :
llvm::reverse(LPM)) {
- auto It = D.DeclToUniqeName.find(E->getDecl());
- if (It == D.DeclToUniqeName.end())
+ auto It = D.DeclToUniqueName.find(E->getDecl());
+ if (It == D.DeclToUniqueName.end())
continue;
+ if (D.Disabled)
+ return false;
FoundE = E;
FoundD = E->getDecl()->getCanonicalDecl();
- UniqueDeclName = It->getSecond();
+ UniqueDeclName = It->second;
IVLVal = D.IVLVal;
- IVName = D.IVName;
- UseOriginalIV = D.UseOriginalIV;
+ FoundFn = D.Fn;
break;
}
return FoundE == E;
}
bool VisitMemberExpr(const MemberExpr *E) {
- if (!CGF.IsWrappedCXXThis(E->getBase()))
+ if (!CodeGenFunction::IsWrappedCXXThis(E->getBase()))
return false;
for (const CGOpenMPRuntime::LastprivateConditionalData &D :
llvm::reverse(LPM)) {
- auto It = D.DeclToUniqeName.find(E->getMemberDecl());
- if (It == D.DeclToUniqeName.end())
+ auto It = D.DeclToUniqueName.find(E->getMemberDecl());
+ if (It == D.DeclToUniqueName.end())
continue;
+ if (D.Disabled)
+ return false;
FoundE = E;
FoundD = E->getMemberDecl()->getCanonicalDecl();
- UniqueDeclName = It->getSecond();
+ UniqueDeclName = It->second;
IVLVal = D.IVLVal;
- IVName = D.IVName;
- UseOriginalIV = D.UseOriginalIV;
+ FoundFn = D.Fn;
break;
}
return FoundE == E;
@@ -11465,62 +11506,41 @@ public:
return false;
}
explicit LastprivateConditionalRefChecker(
- CodeGenFunction &CGF,
ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM)
- : CGF(CGF), LPM(LPM) {}
- std::tuple<const Expr *, const Decl *, StringRef, LValue, StringRef, bool>
+ : LPM(LPM) {}
+ std::tuple<const Expr *, const Decl *, StringRef, LValue, llvm::Function *>
getFoundData() const {
- return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, IVName,
- UseOriginalIV);
+ return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn);
}
};
} // namespace
-void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
- const Expr *LHS) {
- if (CGF.getLangOpts().OpenMP < 50)
- return;
- LastprivateConditionalRefChecker Checker(CGF, LastprivateConditionalStack);
- if (!Checker.Visit(LHS))
- return;
- const Expr *FoundE;
- const Decl *FoundD;
- StringRef UniqueDeclName;
- LValue IVLVal;
- StringRef IVName;
- bool UseOriginalIV;
- std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, IVName, UseOriginalIV) =
- Checker.getFoundData();
-
+void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
+ LValue IVLVal,
+ StringRef UniqueDeclName,
+ LValue LVal,
+ SourceLocation Loc) {
// Last updated loop counter for the lastprivate conditional var.
// int<xx> last_iv = 0;
llvm::Type *LLIVTy = CGF.ConvertTypeForMem(IVLVal.getType());
llvm::Constant *LastIV =
- getOrCreateInternalVariable(LLIVTy, UniqueDeclName + "$iv");
+ getOrCreateInternalVariable(LLIVTy, getName({UniqueDeclName, "iv"}));
cast<llvm::GlobalVariable>(LastIV)->setAlignment(
IVLVal.getAlignment().getAsAlign());
LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
- // Private address of the lastprivate conditional in the current context.
- // priv_a
- LValue LVal = CGF.EmitLValue(FoundE);
// Last value of the lastprivate conditional.
// decltype(priv_a) last_a;
llvm::Constant *Last = getOrCreateInternalVariable(
- LVal.getAddress(CGF).getElementType(), UniqueDeclName);
+ CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
cast<llvm::GlobalVariable>(Last)->setAlignment(
LVal.getAlignment().getAsAlign());
LValue LastLVal =
CGF.MakeAddrLValue(Last, LVal.getType(), LVal.getAlignment());
// Global loop counter. Required to handle inner parallel-for regions.
- // global_iv
- if (!UseOriginalIV) {
- Address IVAddr =
- getAddrOfArtificialThreadPrivate(CGF, IVLVal.getType(), IVName);
- IVLVal = CGF.MakeAddrLValue(IVAddr, IVLVal.getType());
- }
- llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, FoundE->getExprLoc());
+ // iv
+ llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, Loc);
// #pragma omp critical(a)
// if (last_iv <= iv) {
@@ -11528,11 +11548,10 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
// last_a = priv_a;
// }
auto &&CodeGen = [&LastIVLVal, &IVLVal, IVVal, &LVal, &LastLVal,
- FoundE](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- llvm::Value *LastIVVal =
- CGF.EmitLoadOfScalar(LastIVLVal, FoundE->getExprLoc());
- // (last_iv <= global_iv) ? Check if the variable is updated and store new
+ llvm::Value *LastIVVal = CGF.EmitLoadOfScalar(LastIVLVal, Loc);
+ // (last_iv <= iv) ? Check if the variable is updated and store new
// value in global var.
llvm::Value *CmpRes;
if (IVLVal.getType()->isSignedIntegerType()) {
@@ -11548,19 +11567,18 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
// {
CGF.EmitBlock(ThenBB);
- // last_iv = global_iv;
+ // last_iv = iv;
CGF.EmitStoreOfScalar(IVVal, LastIVLVal);
// last_a = priv_a;
switch (CGF.getEvaluationKind(LVal.getType())) {
case TEK_Scalar: {
- llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, FoundE->getExprLoc());
+ llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, Loc);
CGF.EmitStoreOfScalar(PrivVal, LastLVal);
break;
}
case TEK_Complex: {
- CodeGenFunction::ComplexPairTy PrivVal =
- CGF.EmitLoadOfComplex(LVal, FoundE->getExprLoc());
+ CodeGenFunction::ComplexPairTy PrivVal = CGF.EmitLoadOfComplex(LVal, Loc);
CGF.EmitStoreOfComplex(PrivVal, LastLVal, /*isInit=*/false);
break;
}
@@ -11580,7 +11598,100 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
RegionCodeGenTy ThenRCG(CodeGen);
ThenRCG(CGF);
} else {
- emitCriticalRegion(CGF, UniqueDeclName, CodeGen, FoundE->getExprLoc());
+ emitCriticalRegion(CGF, UniqueDeclName, CodeGen, Loc);
+ }
+}
+
+void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
+ const Expr *LHS) {
+ if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
+ return;
+ LastprivateConditionalRefChecker Checker(LastprivateConditionalStack);
+ if (!Checker.Visit(LHS))
+ return;
+ const Expr *FoundE;
+ const Decl *FoundD;
+ StringRef UniqueDeclName;
+ LValue IVLVal;
+ llvm::Function *FoundFn;
+ std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn) =
+ Checker.getFoundData();
+ if (FoundFn != CGF.CurFn) {
+ // Special codegen for inner parallel regions.
+ // ((struct.lastprivate.conditional*)&priv_a)->Fired = 1;
+ auto It = LastprivateConditionalToTypes[FoundFn].find(FoundD);
+ assert(It != LastprivateConditionalToTypes[FoundFn].end() &&
+ "Lastprivate conditional is not found in outer region.");
+ QualType StructTy = std::get<0>(It->getSecond());
+ const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
+ LValue PrivLVal = CGF.EmitLValue(FoundE);
+ Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ PrivLVal.getAddress(CGF),
+ CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)));
+ LValue BaseLVal =
+ CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl);
+ LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl);
+ CGF.EmitAtomicStore(RValue::get(llvm::ConstantInt::get(
+ CGF.ConvertTypeForMem(FiredDecl->getType()), 1)),
+ FiredLVal, llvm::AtomicOrdering::Unordered,
+ /*IsVolatile=*/true, /*isInit=*/false);
+ return;
+ }
+
+ // Private address of the lastprivate conditional in the current context.
+ // priv_a
+ LValue LVal = CGF.EmitLValue(FoundE);
+ emitLastprivateConditionalUpdate(CGF, IVLVal, UniqueDeclName, LVal,
+ FoundE->getExprLoc());
+}
+
+void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls) {
+ if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
+ return;
+ auto Range = llvm::reverse(LastprivateConditionalStack);
+ auto It = llvm::find_if(
+ Range, [](const LastprivateConditionalData &D) { return !D.Disabled; });
+ if (It == Range.end() || It->Fn != CGF.CurFn)
+ return;
+ auto LPCI = LastprivateConditionalToTypes.find(It->Fn);
+ assert(LPCI != LastprivateConditionalToTypes.end() &&
+ "Lastprivates must be registered already.");
+ SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
+ const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
+ for (const auto &Pair : It->DeclToUniqueName) {
+ const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
+ if (!CS->capturesVariable(VD) || IgnoredDecls.count(VD) > 0)
+ continue;
+ auto I = LPCI->getSecond().find(Pair.first);
+ assert(I != LPCI->getSecond().end() &&
+ "Lastprivate must be rehistered already.");
+ // bool Cmp = priv_a.Fired != 0;
+ LValue BaseLVal = std::get<3>(I->getSecond());
+ LValue FiredLVal =
+ CGF.EmitLValueForField(BaseLVal, std::get<2>(I->getSecond()));
+ llvm::Value *Res = CGF.EmitLoadOfScalar(FiredLVal, D.getBeginLoc());
+ llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Res);
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lpc.then");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("lpc.done");
+ // if (Cmp) {
+ CGF.Builder.CreateCondBr(Cmp, ThenBB, DoneBB);
+ CGF.EmitBlock(ThenBB);
+ Address Addr = CGF.GetAddrOfLocalVar(VD);
+ LValue LVal;
+ if (VD->getType()->isReferenceType())
+ LVal = CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
+ AlignmentSource::Decl);
+ else
+ LVal = CGF.MakeAddrLValue(Addr, VD->getType().getNonReferenceType(),
+ AlignmentSource::Decl);
+ emitLastprivateConditionalUpdate(CGF, It->IVLVal, Pair.second, LVal,
+ D.getBeginLoc());
+ auto AL = ApplyDebugLocation::CreateArtificial(CGF);
+ CGF.EmitBlock(DoneBB, /*IsFinal=*/true);
+ // }
}
}
@@ -11589,10 +11700,10 @@ void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
SourceLocation Loc) {
if (CGF.getLangOpts().OpenMP < 50)
return;
- auto It = LastprivateConditionalStack.back().DeclToUniqeName.find(VD);
- assert(It != LastprivateConditionalStack.back().DeclToUniqeName.end() &&
+ auto It = LastprivateConditionalStack.back().DeclToUniqueName.find(VD);
+ assert(It != LastprivateConditionalStack.back().DeclToUniqueName.end() &&
"Unknown lastprivate conditional variable.");
- StringRef UniqueName = It->getSecond();
+ StringRef UniqueName = It->second;
llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(UniqueName);
// The variable was not updated in the region - exit.
if (!GV)
@@ -11750,7 +11861,8 @@ Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
ArrayRef<const Expr *> Vars,
- SourceLocation Loc) {
+ SourceLocation Loc,
+ llvm::AtomicOrdering AO) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -11785,6 +11897,12 @@ llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
llvm_unreachable("Not supported in SIMD-only mode");
}
+void CGOpenMPSIMDRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ bool IsWorksharingReduction) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
SourceLocation Loc,
ReductionCodeGen &RCG,
@@ -11826,7 +11944,7 @@ void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
void CGOpenMPSIMDRuntime::emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
- const Expr *Device,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) {
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h
index 8159f5e8b790..eb22f155f5ef 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -20,12 +20,15 @@
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
@@ -35,6 +38,7 @@ class GlobalVariable;
class StructType;
class Type;
class Value;
+class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
@@ -80,11 +84,10 @@ public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
- typename std::enable_if<
- !std::is_same<typename std::remove_reference<Callable>::type,
- RegionCodeGenTy>::value>::type * = nullptr)
+ std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
+ RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
- Callback(CallbackFn<typename std::remove_reference<Callable>::type>),
+ Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
@@ -99,9 +102,18 @@ struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
+ SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
- SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> Dependences;
+ struct DependData {
+ OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
+ const Expr *IteratorExpr = nullptr;
+ SmallVector<const Expr *, 4> DepExprs;
+ explicit DependData() = default;
+ DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
+ : DepKind(DepKind), IteratorExpr(IteratorExpr) {}
+ };
+ SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
@@ -109,6 +121,8 @@ struct OMPTaskDataTy final {
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
+ bool IsReductionWithTaskMod = false;
+ bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
@@ -116,20 +130,26 @@ class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
- /// Reference to the original shared item.
+ /// Reference to the item shared between tasks to reduce into.
+ const Expr *Shared = nullptr;
+ /// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
- ReductionData(const Expr *Ref, const Expr *Private, const Expr *ReductionOp)
- : Ref(Ref), Private(Private), ReductionOp(ReductionOp) {}
+ ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
+ const Expr *ReductionOp)
+ : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
+ }
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
- /// List of addresses of original shared variables/expressions.
+ /// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
+ /// List of addresses of original variables/expressions.
+ SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
@@ -149,12 +169,12 @@ private:
const OMPDeclareReductionDecl *DRD);
public:
- ReductionCodeGen(ArrayRef<const Expr *> Shareds,
+ ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
- /// Emits lvalue for a reduction item.
+ /// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
- void emitSharedLValue(CodeGenFunction &CGF, unsigned N);
+ void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
@@ -186,6 +206,8 @@ public:
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
+ /// Returns LValue for the original reduction item.
+ LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
@@ -230,26 +252,42 @@ public:
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
- llvm::SmallDenseMap<CanonicalDeclPtr<const Decl>, SmallString<16>>
- DeclToUniqeName;
+ llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
+ DeclToUniqueName;
LValue IVLVal;
- SmallString<16> IVName;
- /// True if original lvalue for loop counter can be used in codegen (simd
- /// region or simd only mode) and no need to create threadprivate
- /// references.
- bool UseOriginalIV = false;
+ llvm::Function *Fn = nullptr;
+ bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
+ enum class ActionToDo {
+ DoNotPush,
+ PushAsLastprivateConditional,
+ DisableLastprivateConditional,
+ };
CodeGenModule &CGM;
- const bool NeedToPush;
+ ActionToDo Action = ActionToDo::DoNotPush;
+
+ /// Check and try to disable analysis of inner regions for changes in
+ /// lastprivate conditional.
+ void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>>
+ &NeedToAddForLPCsAsDisabled) const;
- public:
LastprivateConditionalRAII(CodeGenFunction &CGF,
- const OMPExecutableDirective &S, LValue IVLVal);
+ const OMPExecutableDirective &S);
+
+ public:
+ explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
+ const OMPExecutableDirective &S,
+ LValue IVLVal);
+ static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
+ const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
+ llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
+
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
@@ -319,17 +357,6 @@ protected:
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
- /// Tries to emit declare variant function for \p OldGD from \p NewGD.
- /// \param OrigAddr LLVM IR value for \p OldGD.
- /// \param IsForDefinition true, if requested emission for the definition of
- /// \p OldGD.
- /// \returns true, was able to emit a definition function for \p OldGD, which
- /// points to \p NewGD.
- virtual bool tryEmitDeclareVariant(const GlobalDecl &NewGD,
- const GlobalDecl &OldGD,
- llvm::GlobalValue *OrigAddr,
- bool IsForDefinition);
-
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
@@ -345,6 +372,8 @@ protected:
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
+ /// An OpenMP-IR-Builder instance.
+ llvm::OpenMPIRBuilder OMPBuilder;
/// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
@@ -392,6 +421,13 @@ private:
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
+ /// Maps local variables marked as lastprivate conditional to their internal
+ /// types.
+ llvm::DenseMap<llvm::Function *,
+ llvm::DenseMap<CanonicalDeclPtr<const Decl>,
+ std::tuple<QualType, const FieldDecl *,
+ const FieldDecl *, LValue>>>
+ LastprivateConditionalToTypes;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
@@ -428,6 +464,16 @@ private:
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
+ /// Type typedef struct kmp_task_affinity_info {
+ /// kmp_intptr_t base_addr;
+ /// size_t len;
+ /// struct {
+ /// bool flag1 : 1;
+ /// bool flag2 : 1;
+ /// kmp_int32 reserved : 30;
+ /// } flags;
+ /// } kmp_task_affinity_info_t;
+ QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
@@ -664,12 +710,6 @@ private:
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
- /// Mapping of the original functions to their variants and original global
- /// decl.
- llvm::MapVector<CanonicalDeclPtr<const FunctionDecl>,
- std::pair<GlobalDecl, GlobalDecl>>
- DeferredVariantFunction;
-
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
@@ -684,6 +724,9 @@ private:
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
+ /// Atomic ordering from the omp requires directive.
+ llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
+
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
@@ -710,11 +753,6 @@ private:
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
- /// Returns specified OpenMP runtime function.
- /// \param Function OpenMP runtime function.
- /// \return Specified function.
- llvm::FunctionCallee createRuntimeFunction(unsigned Function);
-
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
@@ -826,6 +864,19 @@ private:
const OMPLoopDirective &D)>
SizeEmitter);
+ /// Emit update for lastprivate conditional data.
+ void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
+ StringRef UniqueDeclName, LValue LVal,
+ SourceLocation Loc);
+
+ /// Returns the number of the elements and the address of the depobj
+ /// dependency array.
+ /// \return Number of elements in depobj array and the pointer to the array of
+ /// dependencies.
+ std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
+ LValue DepobjLVal,
+ SourceLocation Loc);
+
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
@@ -1220,7 +1271,7 @@ public:
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
- SourceLocation Loc);
+ SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
@@ -1381,18 +1432,34 @@ public:
/// should be emitted for reduction:
/// \code
///
- /// _task_red_item_t red_data[n];
+ /// _taskred_item_t red_data[n];
/// ...
- /// red_data[i].shar = &origs[i];
+ /// red_data[i].shar = &shareds[i];
+ /// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
- /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
+ /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
+ /// For reduction clause with task modifier it emits the next call:
+ /// \code
///
+ /// _taskred_item_t red_data[n];
+ /// ...
+ /// red_data[i].shar = &shareds[i];
+ /// red_data[i].orig = &origs[i];
+ /// red_data[i].size = sizeof(origs[i]);
+ /// red_data[i].f_init = (void*)RedInit<i>;
+ /// red_data[i].f_fini = (void*)RedDest<i>;
+ /// red_data[i].f_comb = (void*)RedOp<i>;
+ /// red_data[i].flags = <Flag_i>;
+ /// ...
+ /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
+ /// red_data);
+ /// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
@@ -1403,11 +1470,16 @@ public:
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
+ /// Emits the following code for reduction clause with task modifier:
+ /// \code
+ /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
+ /// \endcode
+ virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
+ bool IsWorksharingReduction);
+
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
- /// initializer/combiner/finalizer functions + emits threadprivate variable to
- /// store the pointer to the original reduction item for the custom
- /// initializer defined by declare reduction construct.
+ /// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
@@ -1467,16 +1539,16 @@ public:
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
- /// target directive, or null if no device clause is used.
+ /// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
- virtual void
- emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
- const Expr *IfCond, const Expr *Device,
- llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
- const OMPLoopDirective &D)>
- SizeEmitter);
+ virtual void emitTargetCall(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
+ const OMPLoopDirective &D)>
+ SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
@@ -1675,7 +1747,10 @@ public:
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
- virtual void checkArchForUnifiedAddressing(const OMPRequiresDecl *D);
+ virtual void processRequiresDirective(const OMPRequiresDecl *D);
+
+ /// Gets default memory ordering as specified in requires directive.
+ llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
@@ -1685,17 +1760,13 @@ public:
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
- /// Emits the definition of the declare variant function.
- virtual bool emitDeclareVariant(GlobalDecl GD, bool IsForDefinition);
-
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
- /// Initializes global counter for lastprivate conditional.
- virtual void
- initLastprivateConditionalCounter(CodeGenFunction &CGF,
- const OMPExecutableDirective &S);
+ /// Create specialized alloca to handle lastprivate conditionals.
+ Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
+ const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
@@ -1713,6 +1784,30 @@ public:
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
+ /// Checks if the lastprivate conditional was updated in inner region and
+ /// writes the value.
+ /// \code
+ /// lastprivate(conditional: a)
+ /// ...
+ /// <type> a;bool Fired = false;
+ /// #pragma omp ... shared(a)
+ /// {
+ /// lp_a = ...;
+ /// Fired = true;
+ /// }
+ /// if (Fired) {
+ /// #pragma omp critical(a)
+ /// if (last_iv_a <= iv) {
+ /// last_iv_a = iv;
+ /// global_a = lp_a;
+ /// }
+ /// Fired = false;
+ /// }
+ /// \endcode
+ virtual void checkAndEmitSharedLastprivateConditional(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
+
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
@@ -1721,6 +1816,41 @@ public:
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
+
+ /// Emits list of dependecies based on the provided data (array of
+ /// dependence/expression pairs).
+ /// \returns Pointer to the first element of the array casted to VoidPtr type.
+ std::pair<llvm::Value *, Address>
+ emitDependClause(CodeGenFunction &CGF,
+ ArrayRef<OMPTaskDataTy::DependData> Dependencies,
+ SourceLocation Loc);
+
+ /// Emits list of dependecies based on the provided data (array of
+ /// dependence/expression pairs) for depobj construct. In this case, the
+ /// variable is allocated in dynamically. \returns Pointer to the first
+ /// element of the array casted to VoidPtr type.
+ Address emitDepobjDependClause(CodeGenFunction &CGF,
+ const OMPTaskDataTy::DependData &Dependencies,
+ SourceLocation Loc);
+
+ /// Emits the code to destroy the dependency object provided in depobj
+ /// directive.
+ void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
+ SourceLocation Loc);
+
+ /// Updates the dependency kind in the specified depobj object.
+ /// \param DepobjLVal LValue for the main depobj object.
+ /// \param NewDepKind New dependency kind.
+ void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
+ OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
+
+ /// Initializes user defined allocators specified in the uses_allocators
+ /// clauses.
+ void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
+ const Expr *AllocatorTraits);
+
+ /// Destroys user defined allocators specified in the uses_allocators clause.
+ void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
};
/// Class supports emissionof SIMD-only code.
@@ -1985,7 +2115,7 @@ public:
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
- SourceLocation Loc) override;
+ SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
@@ -2107,18 +2237,34 @@ public:
/// should be emitted for reduction:
/// \code
///
- /// _task_red_item_t red_data[n];
+ /// _taskred_item_t red_data[n];
/// ...
- /// red_data[i].shar = &origs[i];
+ /// red_data[i].shar = &shareds[i];
+ /// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
- /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
+ /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
+ /// For reduction clause with task modifier it emits the next call:
+ /// \code
///
+ /// _taskred_item_t red_data[n];
+ /// ...
+ /// red_data[i].shar = &shareds[i];
+ /// red_data[i].orig = &origs[i];
+ /// red_data[i].size = sizeof(origs[i]);
+ /// red_data[i].f_init = (void*)RedInit<i>;
+ /// red_data[i].f_fini = (void*)RedDest<i>;
+ /// red_data[i].f_comb = (void*)RedOp<i>;
+ /// red_data[i].flags = <Flag_i>;
+ /// ...
+ /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
+ /// red_data);
+ /// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
@@ -2128,6 +2274,13 @@ public:
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
+ /// Emits the following code for reduction clause with task modifier:
+ /// \code
+ /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
+ /// \endcode
+ void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
+ bool IsWorksharingReduction) override;
+
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
@@ -2191,14 +2344,14 @@ public:
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
- /// target directive, or null if no device clause is used.
- void
- emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
- const Expr *IfCond, const Expr *Device,
- llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
- const OMPLoopDirective &D)>
- SizeEmitter) override;
+ /// target directive, or null if no device clause is used and device modifier.
+ void emitTargetCall(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
+ const OMPLoopDirective &D)>
+ SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
index d00d84b79cfe..cbd443134e7a 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -38,11 +38,9 @@ enum OpenMPRTLFunctionNVPTX {
/// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
/// Call to void __kmpc_kernel_prepare_parallel(void
- /// *outlined_function, int16_t
- /// IsOMPRuntimeInitialized);
+ /// *outlined_function);
OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
- /// Call to bool __kmpc_kernel_parallel(void **outlined_function,
- /// int16_t IsOMPRuntimeInitialized);
+ /// Call to bool __kmpc_kernel_parallel(void **outlined_function);
OMPRTL_NVPTX__kmpc_kernel_parallel,
/// Call to void __kmpc_kernel_end_parallel();
OMPRTL_NVPTX__kmpc_kernel_end_parallel,
@@ -85,6 +83,9 @@ enum OpenMPRTLFunctionNVPTX {
/// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
/// int16_t UseSharedMemory);
OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
+ /// Call to void* __kmpc_data_sharing_push_stack(size_t size, int16_t
+ /// UseSharedMemory);
+ OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
/// Call to void __kmpc_data_sharing_pop_stack(void *a);
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
/// Call to void __kmpc_begin_sharing_variables(void ***args,
@@ -341,8 +342,7 @@ class CheckVarsEscapingDeclContext final
if (!Attr)
return;
if (((Attr->getCaptureKind() != OMPC_map) &&
- !isOpenMPPrivate(
- static_cast<OpenMPClauseKind>(Attr->getCaptureKind()))) ||
+ !isOpenMPPrivate(Attr->getCaptureKind())) ||
((Attr->getCaptureKind() == OMPC_map) &&
!FD->getType()->isAnyPointerType()))
return;
@@ -786,6 +786,8 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -801,6 +803,8 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -813,6 +817,7 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unexpected directive.");
}
}
@@ -862,6 +867,8 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -877,6 +884,8 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -889,6 +898,7 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
break;
}
llvm_unreachable(
@@ -1031,6 +1041,8 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -1046,6 +1058,8 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -1058,6 +1072,7 @@ static bool hasNestedLightweightDirective(ASTContext &Ctx,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
llvm_unreachable("Unexpected directive.");
}
}
@@ -1113,6 +1128,8 @@ static bool supportsLightweightRuntime(ASTContext &Ctx,
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
@@ -1128,6 +1145,8 @@ static bool supportsLightweightRuntime(ASTContext &Ctx,
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
@@ -1140,6 +1159,7 @@ static bool supportsLightweightRuntime(ASTContext &Ctx,
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
+ default:
break;
}
llvm_unreachable(
@@ -1444,8 +1464,7 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
// TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {WorkFn.getPointer(),
- /*RequiresOMPRuntime=*/Bld.getInt16(1)};
+ llvm::Value *Args[] = {WorkFn.getPointer()};
llvm::Value *Ret = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
@@ -1573,17 +1592,16 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
}
case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
/// Build void __kmpc_kernel_prepare_parallel(
- /// void *outlined_function, int16_t IsOMPRuntimeInitialized);
- llvm::Type *TypeParams[] = {CGM.Int8PtrTy, CGM.Int16Ty};
+ /// void *outlined_function);
+ llvm::Type *TypeParams[] = {CGM.Int8PtrTy};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_parallel: {
- /// Build bool __kmpc_kernel_parallel(void **outlined_function,
- /// int16_t IsOMPRuntimeInitialized);
- llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy, CGM.Int16Ty};
+ /// Build bool __kmpc_kernel_parallel(void **outlined_function);
+ llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy};
llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
auto *FnTy =
llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
@@ -1738,6 +1756,16 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
break;
}
+ case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
+ // Build void *__kmpc_data_sharing_push_stack(size_t size, int16_t
+ // UseSharedMemory);
+ llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(
+ FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
+ break;
+ }
case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
// Build void __kmpc_data_sharing_pop_stack(void *a);
llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
@@ -1915,19 +1943,6 @@ unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
llvm_unreachable("Unknown flags are requested.");
}
-bool CGOpenMPRuntimeNVPTX::tryEmitDeclareVariant(const GlobalDecl &NewGD,
- const GlobalDecl &OldGD,
- llvm::GlobalValue *OrigAddr,
- bool IsForDefinition) {
- // Emit the function in OldGD with the body from NewGD, if NewGD is defined.
- auto *NewFD = cast<FunctionDecl>(NewGD.getDecl());
- if (NewFD->isDefined()) {
- CGM.emitOpenMPDeviceFunctionRedefinition(OldGD, NewGD, OrigAddr);
- return true;
- }
- return false;
-}
-
CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, "_", "$") {
if (!CGM.getLangOpts().OpenMPIsDevice)
@@ -2208,7 +2223,7 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
GlobalRecCastAddr = Phi;
I->getSecond().GlobalRecordAddr = Phi;
I->getSecond().IsInSPMDModeFlag = IsSPMD;
- } else if (IsInTTDRegion) {
+ } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
assert(GlobalizedRecords.back().Records.size() < 2 &&
"Expected less than 2 globalized records: one for target and one "
"for teams.");
@@ -2281,12 +2296,16 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
} else {
// TODO: allow the usage of shared memory to be controlled by
// the user, for now, default to global.
+ bool UseSharedMemory =
+ IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
llvm::Value *GlobalRecordSizeArg[] = {
llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
- CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
+ CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
+ IsInTTDRegion
+ ? OMPRTL_NVPTX__kmpc_data_sharing_push_stack
+ : OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
GlobalRecordSizeArg);
GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
GlobalRecValue, GlobalRecPtrTy);
@@ -2433,7 +2452,7 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
CGF.EmitBlock(ExitBB);
- } else if (IsInTTDRegion) {
+ } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
assert(GlobalizedRecords.back().RegionCounter > 0 &&
"region counter must be > 0.");
--GlobalizedRecords.back().RegionCounter;
@@ -2546,7 +2565,7 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
// Prepare for parallel region. Indicate the outlined function.
- llvm::Value *Args[] = {ID, /*RequiresOMPRuntime=*/Bld.getInt16(1)};
+ llvm::Value *Args[] = {ID};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
Args);
@@ -4754,6 +4773,7 @@ Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
switch (A->getAllocatorType()) {
// Use the default allocator here as by default local vars are
// threadlocal.
+ case OMPAllocateDeclAttr::OMPNullMemAlloc:
case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
case OMPAllocateDeclAttr::OMPThreadMemAlloc:
case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
@@ -4920,6 +4940,7 @@ bool CGOpenMPRuntimeNVPTX::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
return false;
const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
switch(A->getAllocatorType()) {
+ case OMPAllocateDeclAttr::OMPNullMemAlloc:
case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
// Not supported, fallback to the default mem space.
case OMPAllocateDeclAttr::OMPThreadMemAlloc:
@@ -4962,7 +4983,7 @@ static CudaArch getCudaArch(CodeGenModule &CGM) {
/// Check to see if target architecture supports unified addressing which is
/// a restriction for OpenMP requires clause "unified_shared_memory".
-void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
+void CGOpenMPRuntimeNVPTX::processRequiresDirective(
const OMPRequiresDecl *D) {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
@@ -4990,6 +5011,7 @@ void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
case CudaArch::SM_70:
case CudaArch::SM_72:
case CudaArch::SM_75:
+ case CudaArch::SM_80:
case CudaArch::GFX600:
case CudaArch::GFX601:
case CudaArch::GFX700:
@@ -5010,6 +5032,7 @@ void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
+ case CudaArch::GFX1030:
case CudaArch::UNKNOWN:
break;
case CudaArch::LAST:
@@ -5017,7 +5040,7 @@ void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
}
}
}
- CGOpenMPRuntime::checkArchForUnifiedAddressing(D);
+ CGOpenMPRuntime::processRequiresDirective(D);
}
/// Get number of SMs and number of blocks per SM.
@@ -5047,6 +5070,7 @@ static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
case CudaArch::SM_70:
case CudaArch::SM_72:
case CudaArch::SM_75:
+ case CudaArch::SM_80:
return {84, 32};
case CudaArch::GFX600:
case CudaArch::GFX601:
@@ -5068,6 +5092,7 @@ static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
+ case CudaArch::GFX1030:
case CudaArch::UNKNOWN:
break;
case CudaArch::LAST:
@@ -5077,7 +5102,8 @@ static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
}
void CGOpenMPRuntimeNVPTX::clear() {
- if (!GlobalizedRecords.empty()) {
+ if (!GlobalizedRecords.empty() &&
+ !CGM.getLangOpts().OpenMPCUDATargetParallel) {
ASTContext &C = CGM.getContext();
llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
index 4159af0a622f..c52ae43817c7 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -193,18 +193,6 @@ protected:
/// Full/Lightweight runtime mode. Used for better optimization.
unsigned getDefaultLocationReserved2Flags() const override;
- /// Tries to emit declare variant function for \p OldGD from \p NewGD.
- /// \param OrigAddr LLVM IR value for \p OldGD.
- /// \param IsForDefinition true, if requested emission for the definition of
- /// \p OldGD.
- /// \returns true, was able to emit a definition function for \p OldGD, which
- /// points to \p NewGD.
- /// NVPTX backend does not support global aliases, so just use the function,
- /// emitted for \p NewGD instead of \p OldGD.
- bool tryEmitDeclareVariant(const GlobalDecl &NewGD, const GlobalDecl &OldGD,
- llvm::GlobalValue *OrigAddr,
- bool IsForDefinition) override;
-
public:
explicit CGOpenMPRuntimeNVPTX(CodeGenModule &CGM);
void clear() override;
@@ -395,7 +383,7 @@ public:
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
- void checkArchForUnifiedAddressing(const OMPRequiresDecl *D) override;
+ void processRequiresDirective(const OMPRequiresDecl *D) override;
/// Returns default address space for the constant firstprivates, __constant__
/// address space by default.
diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 4de64a32f2ac..4e5d1d3f16f6 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -385,7 +385,8 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
Run = FieldEnd;
continue;
}
- llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
+ llvm::Type *Type =
+ Types.ConvertTypeForMem(Field->getType(), /*ForBitFields=*/true);
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == FieldEnd || BitOffset >= Tail) {
@@ -405,15 +406,17 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
return;
}
- // Check if OffsetInRecord is better as a single field run. When OffsetInRecord
- // has legal integer width, and its bitfield offset is naturally aligned, it
- // is better to make the bitfield a separate storage component so as it can be
- // accessed directly with lower cost.
+ // Check if OffsetInRecord (the size in bits of the current run) is better
+ // as a single field run. When OffsetInRecord has legal integer width, and
+ // its bitfield offset is naturally aligned, it is better to make the
+ // bitfield a separate storage component so as it can be accessed directly
+ // with lower cost.
auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
uint64_t StartBitOffset) {
if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
return false;
- if (!DataLayout.isLegalInteger(OffsetInRecord))
+ if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
+ !DataLayout.fitsInLegalInteger(OffsetInRecord))
return false;
// Make sure StartBitOffset is natually aligned if it is treated as an
// IType integer.
@@ -729,8 +732,8 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
}
-CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
- llvm::StructType *Ty) {
+std::unique_ptr<CGRecordLayout>
+CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
CGRecordLowering Builder(*this, D, /*Packed=*/false);
Builder.lower(/*NonVirtualBaseType=*/false);
@@ -757,9 +760,9 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
// but we may need to recursively layout D while laying D out as a base type.
Ty->setBody(Builder.FieldTypes, Builder.Packed);
- CGRecordLayout *RL =
- new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
- Builder.IsZeroInitializableAsBase);
+ auto RL = std::make_unique<CGRecordLayout>(
+ Ty, BaseTy, (bool)Builder.IsZeroInitializable,
+ (bool)Builder.IsZeroInitializableAsBase);
RL->NonVirtualBases.swap(Builder.NonVirtualBases);
RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 138459c68dbf..672909849bb7 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "CGDebugInfo.h"
+#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
@@ -18,12 +19,14 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace CodeGen;
@@ -246,6 +249,12 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPFlushDirectiveClass:
EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
break;
+ case Stmt::OMPDepobjDirectiveClass:
+ EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
+ break;
+ case Stmt::OMPScanDirectiveClass:
+ EmitOMPScanDirective(cast<OMPScanDirective>(*S));
+ break;
case Stmt::OMPOrderedDirectiveClass:
EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
break;
@@ -601,6 +610,13 @@ void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
}
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
+ bool nomerge = false;
+ for (const auto *A : S.getAttrs())
+ if (A->getKind() == attr::NoMerge) {
+ nomerge = true;
+ break;
+ }
+ SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
EmitStmt(S.getSubStmt(), S.getAttrs());
}
@@ -721,8 +737,8 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
EmitBlock(LoopHeader.getBlock());
const SourceRange &R = S.getSourceRange();
- LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs,
- SourceLocToDebugLoc(R.getBegin()),
+ LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
+ WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
// Create an exit block for when the condition fails, which will
@@ -823,7 +839,7 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
EmitBlock(LoopCond.getBlock());
const SourceRange &R = S.getSourceRange();
- LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
+ LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
@@ -881,7 +897,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
EmitBlock(CondBlock);
const SourceRange &R = S.getSourceRange();
- LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
+ LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
@@ -982,7 +998,7 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
EmitBlock(CondBlock);
const SourceRange &R = S.getSourceRange();
- LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
+ LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
@@ -1054,6 +1070,19 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
EmitBranchThroughCleanup(ReturnBlock);
}
+namespace {
+// RAII struct used to save and restore a return statment's result expression.
+struct SaveRetExprRAII {
+ SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
+ : OldRetExpr(CGF.RetExpr), CGF(CGF) {
+ CGF.RetExpr = RetExpr;
+ }
+ ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
+ const Expr *OldRetExpr;
+ CodeGenFunction &CGF;
+};
+} // namespace
+
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
/// if the function returns void, or may be missing one if the function returns
/// non-void. Fun stuff :).
@@ -1079,20 +1108,28 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// Emit the result value, even if unused, to evaluate the side effects.
const Expr *RV = S.getRetValue();
- // Treat block literals in a return expression as if they appeared
- // in their own scope. This permits a small, easily-implemented
- // exception to our over-conservative rules about not jumping to
- // statements following block literals with non-trivial cleanups.
- RunCleanupsScope cleanupScope(*this);
- if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) {
- enterFullExpression(fe);
- RV = fe->getSubExpr();
- }
+ // Record the result expression of the return statement. The recorded
+ // expression is used to determine whether a block capture's lifetime should
+ // end at the end of the full expression as opposed to the end of the scope
+ // enclosing the block expression.
+ //
+ // This permits a small, easily-implemented exception to our over-conservative
+ // rules about not jumping to statements following block literals with
+ // non-trivial cleanups.
+ SaveRetExprRAII SaveRetExpr(RV, *this);
+ RunCleanupsScope cleanupScope(*this);
+ if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
+ RV = EWC->getSubExpr();
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.
- if (getLangOpts().ElideConstructors &&
- S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
+ // Check if the NRVO candidate was not globalized in OpenMP mode.
+ if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
+ S.getNRVOCandidate()->isNRVOVariable() &&
+ (!getLangOpts().OpenMP ||
+ !CGM.getOpenMPRuntime()
+ .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
+ .isValid())) {
// Apply the named return value optimization for this return statement,
// which means doing nothing: the appropriate result has already been
// constructed into the NRVO variable.
@@ -2091,8 +2128,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Update largest vector width for any vector types.
if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
} else {
ArgTypes.push_back(Dest.getAddress(*this).getType());
Args.push_back(Dest.getPointer(*this));
@@ -2116,8 +2154,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Update largest vector width for any vector types.
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
if (Info.allowsRegister())
InOutConstraints += llvm::utostr(i);
else
@@ -2203,21 +2242,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Update largest vector width for any vector types.
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
ArgTypes.push_back(Arg->getType());
Args.push_back(Arg);
Constraints += InputConstraint;
}
- // Append the "input" part of inout constraints last.
- for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
- ArgTypes.push_back(InOutArgTypes[i]);
- Args.push_back(InOutArgs[i]);
- }
- Constraints += InOutConstraints;
-
// Labels
SmallVector<llvm::BasicBlock *, 16> Transfer;
llvm::BasicBlock *Fallthrough = nullptr;
@@ -2225,7 +2258,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
IsGCCAsmGoto = GS->isAsmGoto();
if (IsGCCAsmGoto) {
- for (auto *E : GS->labels()) {
+ for (const auto *E : GS->labels()) {
JumpDest Dest = getJumpDestForLabel(E->getLabel());
Transfer.push_back(Dest.getBlock());
llvm::BlockAddress *BA =
@@ -2236,19 +2269,31 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Constraints += ',';
Constraints += 'X';
}
- StringRef Name = "asm.fallthrough";
- Fallthrough = createBasicBlock(Name);
+ Fallthrough = createBasicBlock("asm.fallthrough");
}
}
+ // Append the "input" part of inout constraints last.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
// Clobbers
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
StringRef Clobber = S.getClobber(i);
if (Clobber == "memory")
ReadOnly = ReadNone = false;
- else if (Clobber != "cc")
+ else if (Clobber != "cc") {
Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
+ if (CGM.getCodeGenOpts().StackClashProtector &&
+ getTarget().isSPRegName(Clobber)) {
+ CGM.getDiags().Report(S.getAsmLoc(),
+ diag::warn_stack_clash_protection_inline_asm);
+ }
+ }
if (!Constraints.empty())
Constraints += ',';
@@ -2287,9 +2332,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (IsGCCAsmGoto) {
llvm::CallBrInst *Result =
Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
+ EmitBlock(Fallthrough);
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
ReadNone, S, ResultRegTypes, *this, RegResults);
- EmitBlock(Fallthrough);
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index dc3899f0e4ea..cfd5eda8cc80 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -18,14 +18,22 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtOpenMP.h"
+#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/AtomicOrdering.h"
using namespace clang;
using namespace CodeGen;
using namespace llvm::omp;
+static const VarDecl *getBaseDecl(const Expr *Ref);
+
namespace {
/// Lexical scope for OpenMP executable constructs, that handles correct codegen
/// for captured expressions.
@@ -53,7 +61,8 @@ class OMPLexicalScope : public CodeGenFunction::LexicalScope {
static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
return CGF.LambdaCaptureFields.lookup(VD) ||
(CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
- (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl));
+ (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
+ cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
}
public:
@@ -214,6 +223,12 @@ public:
if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
CGF.EmitVarDecl(*OED);
}
+ } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) {
+ for (const Expr *E : UDP->varlists()) {
+ const Decl *D = getBaseDecl(E);
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
+ CGF.EmitVarDecl(*OED);
+ }
}
}
if (!isOpenMPSimdDirective(S.getDirectiveKind()))
@@ -365,26 +380,28 @@ static QualType getCanonicalParamType(ASTContext &C, QualType T) {
}
namespace {
- /// Contains required data for proper outlined function codegen.
- struct FunctionOptions {
- /// Captured statement for which the function is generated.
- const CapturedStmt *S = nullptr;
- /// true if cast to/from UIntPtr is required for variables captured by
- /// value.
- const bool UIntPtrCastRequired = true;
- /// true if only casted arguments must be registered as local args or VLA
- /// sizes.
- const bool RegisterCastedArgsOnly = false;
- /// Name of the generated function.
- const StringRef FunctionName;
- explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
- bool RegisterCastedArgsOnly,
- StringRef FunctionName)
- : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
- RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
- FunctionName(FunctionName) {}
- };
-}
+/// Contains required data for proper outlined function codegen.
+struct FunctionOptions {
+ /// Captured statement for which the function is generated.
+ const CapturedStmt *S = nullptr;
+ /// true if cast to/from UIntPtr is required for variables captured by
+ /// value.
+ const bool UIntPtrCastRequired = true;
+ /// true if only casted arguments must be registered as local args or VLA
+ /// sizes.
+ const bool RegisterCastedArgsOnly = false;
+ /// Name of the generated function.
+ const StringRef FunctionName;
+ /// Location of the non-debug version of the outlined function.
+ SourceLocation Loc;
+ explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
+ bool RegisterCastedArgsOnly, StringRef FunctionName,
+ SourceLocation Loc)
+ : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
+ RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
+ FunctionName(FunctionName), Loc(Loc) {}
+};
+} // namespace
static llvm::Function *emitOutlinedFunctionPrologue(
CodeGenFunction &CGF, FunctionArgList &Args,
@@ -485,7 +502,9 @@ static llvm::Function *emitOutlinedFunctionPrologue(
// Generate the function.
CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
- FO.S->getBeginLoc(), CD->getBody()->getBeginLoc());
+ FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(),
+ FO.UIntPtrCastRequired ? FO.Loc
+ : CD->getBody()->getBeginLoc());
unsigned Cnt = CD->getContextParamPosition();
I = FO.S->captures().begin();
for (const FieldDecl *FD : RD->fields()) {
@@ -560,7 +579,8 @@ static llvm::Function *emitOutlinedFunctionPrologue(
}
llvm::Function *
-CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
+CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
+ SourceLocation Loc) {
assert(
CapturedStmtInfo &&
"CapturedStmtInfo should be set when generating the captured function");
@@ -577,7 +597,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
if (NeedWrapperFunction)
Out << "_debug__";
FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
- Out.str());
+ Out.str(), Loc);
llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
VLASizes, CXXThisValue, FO);
CodeGenFunction::OMPPrivateScope LocalScope(*this);
@@ -600,7 +620,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true,
/*RegisterCastedArgsOnly=*/true,
- CapturedStmtInfo->getHelperName());
+ CapturedStmtInfo->getHelperName(), Loc);
CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
Args.clear();
@@ -632,8 +652,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
}
CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
}
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, S.getBeginLoc(),
- F, CallArgs);
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs);
WrapperCGF.FinishFunction();
return WrapperF;
}
@@ -747,11 +766,12 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
getLangOpts().OpenMPIsDevice &&
isOpenMPTargetExecutionDirective(D.getDirectiveKind());
bool FirstprivateIsLastprivate = false;
- llvm::DenseSet<const VarDecl *> Lastprivates;
+ llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
for (const auto *D : C->varlists())
- Lastprivates.insert(
- cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
+ Lastprivates.try_emplace(
+ cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(),
+ C->getKind());
}
llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
@@ -761,8 +781,8 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
bool MustEmitFirstprivateCopy =
CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
- auto IRef = C->varlist_begin();
- auto InitsRef = C->inits().begin();
+ const auto *IRef = C->varlist_begin();
+ const auto *InitsRef = C->inits().begin();
for (const Expr *IInit : C->private_copies()) {
const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
bool ThisFirstprivateIsLastprivate =
@@ -853,14 +873,34 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
});
} else {
Address OriginalAddr = OriginalLVal.getAddress(*this);
- IsRegistered = PrivateScope.addPrivate(
- OrigVD, [this, VDInit, OriginalAddr, VD]() {
+ IsRegistered =
+ PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD,
+ ThisFirstprivateIsLastprivate,
+ OrigVD, &Lastprivates, IRef]() {
// Emit private VarDecl with copy init.
// Remap temp VDInit variable to the address of the original
// variable (for proper handling of captured global variables).
setAddrOfLocalVar(VDInit, OriginalAddr);
EmitDecl(*VD);
LocalDeclMap.erase(VDInit);
+ if (ThisFirstprivateIsLastprivate &&
+ Lastprivates[OrigVD->getCanonicalDecl()] ==
+ OMPC_LASTPRIVATE_conditional) {
+ // Create/init special variable for lastprivate conditionals.
+ Address VDAddr =
+ CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
+ *this, OrigVD);
+ llvm::Value *V = EmitLoadOfScalar(
+ MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(),
+ AlignmentSource::Decl),
+ (*IRef)->getExprLoc());
+ EmitStoreOfScalar(V,
+ MakeAddrLValue(VDAddr, (*IRef)->getType(),
+ AlignmentSource::Decl));
+ LocalDeclMap.erase(VD);
+ setAddrOfLocalVar(VD, VDAddr);
+ return VDAddr;
+ }
return GetAddrOfLocalVar(VD);
});
}
@@ -990,8 +1030,8 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
!getLangOpts().OpenMPSimd)
break;
- auto IRef = C->varlist_begin();
- auto IDestRef = C->destination_exprs().begin();
+ const auto *IRef = C->varlist_begin();
+ const auto *IDestRef = C->destination_exprs().begin();
for (const Expr *IInit : C->private_copies()) {
// Keep the address of the original variable for future update at the end
// of the loop.
@@ -1013,7 +1053,15 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
// for 'firstprivate' clause.
if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
- bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
+ bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C,
+ OrigVD]() {
+ if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
+ Address VDAddr =
+ CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this,
+ OrigVD);
+ setAddrOfLocalVar(VD, VDAddr);
+ return VDAddr;
+ }
// Emit private VarDecl with copy init.
EmitDecl(*VD);
return GetAddrOfLocalVar(VD);
@@ -1099,7 +1147,7 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
PrivateAddr =
Address(Builder.CreateLoad(PrivateAddr),
- getNaturalTypeAlignment(RefTy->getPointeeType()));
+ CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
// Store the last value to the private copy in the last iteration.
if (C->getKind() == OMPC_LASTPRIVATE_conditional)
CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
@@ -1122,7 +1170,7 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
void CodeGenFunction::EmitOMPReductionClauseInit(
const OMPExecutableDirective &D,
- CodeGenFunction::OMPPrivateScope &PrivateScope) {
+ CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) {
if (!HaveInsertPoint())
return;
SmallVector<const Expr *, 4> Shareds;
@@ -1130,32 +1178,36 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<const Expr *, 4> LHSs;
SmallVector<const Expr *, 4> RHSs;
+ OMPTaskDataTy Data;
+ SmallVector<const Expr *, 4> TaskLHSs;
+ SmallVector<const Expr *, 4> TaskRHSs;
for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
- auto IPriv = C->privates().begin();
- auto IRed = C->reduction_ops().begin();
- auto ILHS = C->lhs_exprs().begin();
- auto IRHS = C->rhs_exprs().begin();
- for (const Expr *Ref : C->varlists()) {
- Shareds.emplace_back(Ref);
- Privates.emplace_back(*IPriv);
- ReductionOps.emplace_back(*IRed);
- LHSs.emplace_back(*ILHS);
- RHSs.emplace_back(*IRHS);
- std::advance(IPriv, 1);
- std::advance(IRed, 1);
- std::advance(ILHS, 1);
- std::advance(IRHS, 1);
+ if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan))
+ continue;
+ Shareds.append(C->varlist_begin(), C->varlist_end());
+ Privates.append(C->privates().begin(), C->privates().end());
+ ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ if (C->getModifier() == OMPC_REDUCTION_task) {
+ Data.ReductionVars.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOps.append(C->reduction_ops().begin(),
+ C->reduction_ops().end());
+ TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
}
}
- ReductionCodeGen RedCG(Shareds, Privates, ReductionOps);
+ ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
unsigned Count = 0;
- auto ILHS = LHSs.begin();
- auto IRHS = RHSs.begin();
- auto IPriv = Privates.begin();
+ auto *ILHS = LHSs.begin();
+ auto *IRHS = RHSs.begin();
+ auto *IPriv = Privates.begin();
for (const Expr *IRef : Shareds) {
const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
// Emit private VarDecl with reduction init.
- RedCG.emitSharedLValue(*this, Count);
+ RedCG.emitSharedOrigLValue(*this, Count);
RedCG.emitAggregateType(*this, Count);
AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
@@ -1222,6 +1274,118 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
++IPriv;
++Count;
}
+ if (!Data.ReductionVars.empty()) {
+ Data.IsReductionWithTaskMod = true;
+ Data.IsWorksharingReduction =
+ isOpenMPWorksharingDirective(D.getDirectiveKind());
+ llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit(
+ *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data);
+ const Expr *TaskRedRef = nullptr;
+ switch (D.getDirectiveKind()) {
+ case OMPD_parallel:
+ TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_for:
+ TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_sections:
+ TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_parallel_for:
+ TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_parallel_master:
+ TaskRedRef =
+ cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_parallel_sections:
+ TaskRedRef =
+ cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_target_parallel:
+ TaskRedRef =
+ cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_target_parallel_for:
+ TaskRedRef =
+ cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_distribute_parallel_for:
+ TaskRedRef =
+ cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr();
+ break;
+ case OMPD_teams_distribute_parallel_for:
+ TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D)
+ .getTaskReductionRefExpr();
+ break;
+ case OMPD_target_teams_distribute_parallel_for:
+ TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D)
+ .getTaskReductionRefExpr();
+ break;
+ case OMPD_simd:
+ case OMPD_for_simd:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_parallel_for_simd:
+ case OMPD_task:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
+ case OMPD_ordered:
+ case OMPD_atomic:
+ case OMPD_teams:
+ case OMPD_target:
+ case OMPD_cancellation_point:
+ case OMPD_cancel:
+ case OMPD_target_data:
+ case OMPD_target_enter_data:
+ case OMPD_target_exit_data:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_master_taskloop:
+ case OMPD_master_taskloop_simd:
+ case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_master_taskloop_simd:
+ case OMPD_distribute:
+ case OMPD_target_update:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_distribute_simd:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_threadprivate:
+ case OMPD_allocate:
+ case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
+ case OMPD_declare_simd:
+ case OMPD_requires:
+ case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
+ case OMPD_unknown:
+ default:
+ llvm_unreachable("Enexpected directive with task reductions.");
+ }
+
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl());
+ EmitVarDecl(*VD);
+ EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD),
+ /*Volatile=*/false, TaskRedRef->getType());
+ }
}
void CodeGenFunction::EmitOMPReductionClauseFinal(
@@ -1233,14 +1397,25 @@ void CodeGenFunction::EmitOMPReductionClauseFinal(
llvm::SmallVector<const Expr *, 8> RHSExprs;
llvm::SmallVector<const Expr *, 8> ReductionOps;
bool HasAtLeastOneReduction = false;
+ bool IsReductionWithTaskMod = false;
for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
+ // Do not emit for inscan reductions.
+ if (C->getModifier() == OMPC_REDUCTION_inscan)
+ continue;
HasAtLeastOneReduction = true;
Privates.append(C->privates().begin(), C->privates().end());
LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ IsReductionWithTaskMod =
+ IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task;
}
if (HasAtLeastOneReduction) {
+ if (IsReductionWithTaskMod) {
+ CGM.getOpenMPRuntime().emitTaskReductionFini(
+ *this, D.getBeginLoc(),
+ isOpenMPWorksharingDirective(D.getDirectiveKind()));
+ }
bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
isOpenMPParallelDirective(D.getDirectiveKind()) ||
ReductionKind == OMPD_simd;
@@ -1288,6 +1463,63 @@ typedef llvm::function_ref<void(CodeGenFunction &,
CodeGenBoundParametersTy;
} // anonymous namespace
+static void
+checkForLastprivateConditionalUpdate(CodeGenFunction &CGF,
+ const OMPExecutableDirective &S) {
+ if (CGF.getLangOpts().OpenMP < 50)
+ return;
+ llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls;
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
+ }
+ }
+ for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
+ }
+ }
+ // Privates should ne analyzed since they are not captured at all.
+ // Task reductions may be skipped - tasks are ignored.
+ // Firstprivates do not return value but may be passed by reference - no need
+ // to check for updated lastprivate conditional.
+ for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ if (!Ref->getType()->isScalarType())
+ continue;
+ const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
+ }
+ }
+ CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional(
+ CGF, S, PrivateDecls);
+}
+
static void emitCommonOMPParallelDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &S,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
@@ -1334,9 +1566,97 @@ static void emitEmptyBoundParameters(CodeGenFunction &,
const OMPExecutableDirective &,
llvm::SmallVectorImpl<llvm::Value *> &) {}
-void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
+Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
+ CodeGenFunction &CGF, const VarDecl *VD) {
+ CodeGenModule &CGM = CGF.CGM;
+ auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+
+ if (!VD)
+ return Address::invalid();
+ const VarDecl *CVD = VD->getCanonicalDecl();
+ if (!CVD->hasAttr<OMPAllocateDeclAttr>())
+ return Address::invalid();
+ const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
+ // Use the default allocation.
+ if (AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
+ !AA->getAllocator())
+ return Address::invalid();
+ llvm::Value *Size;
+ CharUnits Align = CGM.getContext().getDeclAlign(CVD);
+ if (CVD->getType()->isVariablyModifiedType()) {
+ Size = CGF.getTypeSize(CVD->getType());
+ // Align the size: ((size + align - 1) / align) * align
+ Size = CGF.Builder.CreateNUWAdd(
+ Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
+ Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
+ Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
+ } else {
+ CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
+ Size = CGM.getSize(Sz.alignTo(Align));
+ }
+
+ assert(AA->getAllocator() &&
+ "Expected allocator expression for non-default allocator.");
+ llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
+ // According to the standard, the original allocator type is a enum (integer).
+ // Convert to pointer type, if required.
+ if (Allocator->getType()->isIntegerTy())
+ Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy);
+ else if (Allocator->getType()->isPointerTy())
+ Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
+ CGM.VoidPtrTy);
+
+ llvm::Value *Addr = OMPBuilder.CreateOMPAlloc(
+ CGF.Builder, Size, Allocator,
+ getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", "."));
+ llvm::CallInst *FreeCI =
+ OMPBuilder.CreateOMPFree(CGF.Builder, Addr, Allocator);
+
+ CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI);
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr,
+ CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
+ getNameWithSeparators({CVD->getName(), ".addr"}, ".", "."));
+ return Address(Addr, Align);
+}
+
+Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
+ CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr,
+ SourceLocation Loc) {
+ CodeGenModule &CGM = CGF.CGM;
+ if (CGM.getLangOpts().OpenMPUseTLS &&
+ CGM.getContext().getTargetInfo().isTLSSupported())
+ return VDAddr;
+
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+
+ llvm::Type *VarTy = VDAddr.getElementType();
+ llvm::Value *Data =
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy);
+ llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy));
+ std::string Suffix = getNameWithSeparators({"cache", ""});
+ llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
+
+ llvm::CallInst *ThreadPrivateCacheCall =
+ OMPBuilder.CreateCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
+
+ return Address(ThreadPrivateCacheCall, VDAddr.getAlignment());
+}
- if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) {
+std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators(
+ ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) {
+ SmallString<128> Buffer;
+ llvm::raw_svector_ostream OS(Buffer);
+ StringRef Sep = FirstSeparator;
+ for (StringRef Part : Parts) {
+ OS << Sep << Part;
+ Sep = Separator;
+ }
+ return OS.str().str();
+}
+void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
// Check if we have any if clause associated with the directive.
llvm::Value *IfCond = nullptr;
if (const auto *C = S.getSingleClause<OMPIfClause>())
@@ -1357,15 +1677,7 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
// The cleanup callback that finalizes all variabels at the given location,
// thus calls destructors etc.
auto FiniCB = [this](InsertPointTy IP) {
- CGBuilderTy::InsertPointGuard IPG(Builder);
- assert(IP.getBlock()->end() != IP.getPoint() &&
- "OpenMP IR Builder should cause terminated block!");
- llvm::BasicBlock *IPBB = IP.getBlock();
- llvm::BasicBlock *DestBB = IPBB->splitBasicBlock(IP.getPoint());
- IPBB->getTerminator()->eraseFromParent();
- Builder.SetInsertPoint(IPBB);
- CodeGenFunction::JumpDest Dest = getJumpDestInCurrentScope(DestBB);
- EmitBranchThroughCleanup(Dest);
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
};
// Privatization callback that performs appropriate action for
@@ -1387,32 +1699,17 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
auto BodyGenCB = [ParallelRegionBodyStmt,
this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
llvm::BasicBlock &ContinuationBB) {
- auto OldAllocaIP = AllocaInsertPt;
- AllocaInsertPt = &*AllocaIP.getPoint();
-
- auto OldReturnBlock = ReturnBlock;
- ReturnBlock = getJumpDestInCurrentScope(&ContinuationBB);
-
- llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
- CodeGenIPBB->splitBasicBlock(CodeGenIP.getPoint());
- llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator();
- CodeGenIPBBTI->removeFromParent();
-
- Builder.SetInsertPoint(CodeGenIPBB);
-
- EmitStmt(ParallelRegionBodyStmt);
-
- Builder.Insert(CodeGenIPBBTI);
-
- AllocaInsertPt = OldAllocaIP;
- ReturnBlock = OldReturnBlock;
+ OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP,
+ ContinuationBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt,
+ CodeGenIP, ContinuationBB);
};
CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
- Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB,
- FiniCB, IfCond, NumThreads,
- ProcBind, S.hasCancel()));
+ Builder.restoreIP(OMPBuilder.CreateParallel(Builder, BodyGenCB, PrivCB,
+ FiniCB, IfCond, NumThreads,
+ ProcBind, S.hasCancel()));
return;
}
@@ -1436,10 +1733,16 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
};
- emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
- emitEmptyBoundParameters);
- emitPostUpdateForReductionClause(*this, S,
- [](CodeGenFunction &) { return nullptr; });
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
+ emitEmptyBoundParameters);
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
@@ -1506,6 +1809,27 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
getProfileCount(D.getBody()));
EmitBlock(NextBB);
}
+
+ OMPPrivateScope InscanScope(*this);
+ EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true);
+ bool IsInscanRegion = InscanScope.Privatize();
+ if (IsInscanRegion) {
+ // Need to remember the block before and after scan directive
+ // to dispatch them correctly depending on the clause used in
+ // this directive, inclusive or exclusive. For inclusive scan the natural
+ // order of the blocks is used, for exclusive clause the blocks must be
+ // executed in reverse order.
+ OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb");
+ OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb");
+ // No need to allocate inscan exit block, in simd mode it is selected in the
+ // codegen for the scan directive.
+ if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd)
+ OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb");
+ OMPScanDispatch = createBasicBlock("omp.inscan.dispatch");
+ EmitBranch(OMPScanDispatch);
+ EmitBlock(OMPBeforeScanBlock);
+ }
+
// Emit loop variables for C++ range loops.
const Stmt *Body =
D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
@@ -1515,13 +1839,17 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
Body, /*TryImperfectlyNestedLoops=*/true),
D.getCollapsedNumber());
+ // Jump to the dispatcher at the end of the loop body.
+ if (IsInscanRegion)
+ EmitBranch(OMPScanExitBlock);
+
// The end (updates/cleanups).
EmitBlock(Continue.getBlock());
BreakContinueStack.pop_back();
}
void CodeGenFunction::EmitOMPInnerLoop(
- const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
+ const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond,
const Expr *IncExpr,
const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
@@ -1531,8 +1859,19 @@ void CodeGenFunction::EmitOMPInnerLoop(
auto CondBlock = createBasicBlock("omp.inner.for.cond");
EmitBlock(CondBlock);
const SourceRange R = S.getSourceRange();
- LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
- SourceLocToDebugLoc(R.getEnd()));
+
+ // If attributes are attached, push to the basic block with them.
+ const auto &OMPED = cast<OMPExecutableDirective>(S);
+ const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt();
+ const Stmt *SS = ICS->getCapturedStmt();
+ const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS);
+ if (AS)
+ LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(),
+ AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
+ else
+ LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
@@ -1671,7 +2010,7 @@ static void emitAlignedClause(CodeGenFunction &CGF,
"alignment is not power of 2");
if (Alignment != 0) {
llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
- CGF.EmitAlignmentAssumption(
+ CGF.emitAlignmentAssumption(
PtrValue, E, /*No second loc needed*/ SourceLocation(),
llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment));
}
@@ -1835,6 +2174,18 @@ void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
LoopStack.setParallel(!IsMonotonic);
LoopStack.setVectorizeEnable();
emitSimdlenSafelenClause(*this, D, IsMonotonic);
+ if (const auto *C = D.getSingleClause<OMPOrderClause>())
+ if (C->getKind() == OMPC_ORDER_concurrent)
+ LoopStack.setParallel(/*Enable=*/true);
+ if ((D.getDirectiveKind() == OMPD_simd ||
+ (getLangOpts().OpenMPSimd &&
+ isOpenMPSimdDirective(D.getDirectiveKind()))) &&
+ llvm::any_of(D.getClausesOfKind<OMPReductionClause>(),
+ [](const OMPReductionClause *C) {
+ return C->getModifier() == OMPC_REDUCTION_inscan;
+ }))
+ // Disable parallel access in case of prefix sum.
+ LoopStack.setParallel(/*Enable=*/false);
}
void CodeGenFunction::EmitOMPSimdFinal(
@@ -1886,7 +2237,6 @@ void CodeGenFunction::EmitOMPSimdFinal(
static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
const OMPLoopDirective &S,
CodeGenFunction::JumpDest LoopExit) {
- CGF.CGM.getOpenMPRuntime().initLastprivateConditionalCounter(CGF, S);
CGF.EmitOMPLoopBody(S, LoopExit);
CGF.EmitStopPoint(&S);
}
@@ -1917,12 +2267,14 @@ static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S,
BodyCodeGen(CGF);
};
const Expr *IfCond = nullptr;
- for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
- if (CGF.getLangOpts().OpenMP >= 50 &&
- (C->getNameModifier() == OMPD_unknown ||
- C->getNameModifier() == OMPD_simd)) {
- IfCond = C->getCondition();
- break;
+ if (isOpenMPSimdDirective(S.getDirectiveKind())) {
+ for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
+ if (CGF.getLangOpts().OpenMP >= 50 &&
+ (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_simd)) {
+ IfCond = C->getCondition();
+ break;
+ }
}
}
if (IfCond) {
@@ -2007,10 +2359,8 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
CGF.EmitOMPInnerLoop(
S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
[&S](CodeGenFunction &CGF) {
- CGF.CGM.getOpenMPRuntime().initLastprivateConditionalCounter(
- CGF, S);
- CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest());
- CGF.EmitStopPoint(&S);
+ emitOMPLoopBodyWithStopPoint(CGF, S,
+ CodeGenFunction::JumpDest());
},
[](CodeGenFunction &) {});
});
@@ -2031,11 +2381,19 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
}
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
+ ParentLoopDirectiveForScanRegion ScanRegion(*this, S);
+ OMPFirstScanLoop = true;
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
emitOMPSimdRegion(CGF, S, Action);
};
- OMPLexicalScope Scope(*this, S, OMPD_unknown);
- CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPOuterLoop(
@@ -2103,10 +2461,14 @@ void CodeGenFunction::EmitOMPOuterLoop(
[&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
// Generate !llvm.loop.parallel metadata for loads and stores for loops
// with dynamic/guided scheduling and without ordered clause.
- if (!isOpenMPSimdDirective(S.getDirectiveKind()))
+ if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
CGF.LoopStack.setParallel(!IsMonotonic);
- else
+ if (const auto *C = S.getSingleClause<OMPOrderClause>())
+ if (C->getKind() == OMPC_ORDER_concurrent)
+ CGF.LoopStack.setParallel(/*Enable=*/true);
+ } else {
CGF.EmitOMPSimdInit(S, IsMonotonic);
+ }
},
[&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
&LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
@@ -2612,6 +2974,14 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule,
/* Chunked */ Chunk != nullptr) && HasChunkSizeOne &&
isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
+ bool IsMonotonic =
+ Ordered ||
+ ((ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
+ ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) &&
+ !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
+ ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
+ ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
+ ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
/* Chunked */ Chunk != nullptr) ||
StaticChunkedOne) &&
@@ -2620,9 +2990,13 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
emitCommonSimdLoop(
*this, S,
- [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- if (isOpenMPSimdDirective(S.getDirectiveKind()))
- CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true);
+ [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
+ if (isOpenMPSimdDirective(S.getDirectiveKind())) {
+ CGF.EmitOMPSimdInit(S, IsMonotonic);
+ } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
+ if (C->getKind() == OMPC_ORDER_concurrent)
+ CGF.LoopStack.setParallel(/*Enable=*/true);
+ }
},
[IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk,
&S, ScheduleKind, LoopExit,
@@ -2663,10 +3037,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
: S.getCond(),
StaticChunkedOne ? S.getDistInc() : S.getInc(),
[&S, LoopExit](CodeGenFunction &CGF) {
- CGF.CGM.getOpenMPRuntime()
- .initLastprivateConditionalCounter(CGF, S);
- CGF.EmitOMPLoopBody(S, LoopExit);
- CGF.EmitStopPoint(&S);
+ emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit);
},
[](CodeGenFunction &) {});
});
@@ -2678,11 +3049,6 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
};
OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
} else {
- const bool IsMonotonic =
- Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
- ScheduleKind.Schedule == OMPC_SCHEDULE_unknown ||
- ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
- ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
const OMPLoopArguments LoopArguments(
@@ -2755,16 +3121,233 @@ emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
return {LBVal, UBVal};
}
+/// Emits the code for the directive with inscan reductions.
+/// The code is the following:
+/// \code
+/// size num_iters = <num_iters>;
+/// <type> buffer[num_iters];
+/// #pragma omp ...
+/// for (i: 0..<num_iters>) {
+/// <input phase>;
+/// buffer[i] = red;
+/// }
+/// for (int k = 0; k != ceil(log2(num_iters)); ++k)
+/// for (size cnt = last_iter; cnt >= pow(2, k); --k)
+/// buffer[i] op= buffer[i-pow(2,k)];
+/// #pragma omp ...
+/// for (0..<num_iters>) {
+/// red = InclusiveScan ? buffer[i] : buffer[i-1];
+/// <scan phase>;
+/// }
+/// \endcode
+static void emitScanBasedDirective(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen,
+ llvm::function_ref<void(CodeGenFunction &)> FirstGen,
+ llvm::function_ref<void(CodeGenFunction &)> SecondGen) {
+ llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
+ NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
+ SmallVector<const Expr *, 4> Shareds;
+ SmallVector<const Expr *, 4> Privates;
+ SmallVector<const Expr *, 4> ReductionOps;
+ SmallVector<const Expr *, 4> LHSs;
+ SmallVector<const Expr *, 4> RHSs;
+ SmallVector<const Expr *, 4> CopyOps;
+ SmallVector<const Expr *, 4> CopyArrayTemps;
+ SmallVector<const Expr *, 4> CopyArrayElems;
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ assert(C->getModifier() == OMPC_REDUCTION_inscan &&
+ "Only inscan reductions are expected.");
+ Shareds.append(C->varlist_begin(), C->varlist_end());
+ Privates.append(C->privates().begin(), C->privates().end());
+ ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
+ CopyArrayTemps.append(C->copy_array_temps().begin(),
+ C->copy_array_temps().end());
+ CopyArrayElems.append(C->copy_array_elems().begin(),
+ C->copy_array_elems().end());
+ }
+ {
+ // Emit buffers for each reduction variables.
+ // ReductionCodeGen is required to emit correctly the code for array
+ // reductions.
+ ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
+ unsigned Count = 0;
+ auto *ITA = CopyArrayTemps.begin();
+ for (const Expr *IRef : Privates) {
+ const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
+ // Emit variably modified arrays, used for arrays/array sections
+ // reductions.
+ if (PrivateVD->getType()->isVariablyModifiedType()) {
+ RedCG.emitSharedOrigLValue(CGF, Count);
+ RedCG.emitAggregateType(CGF, Count);
+ }
+ CodeGenFunction::OpaqueValueMapping DimMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe())
+ ->getSizeExpr()),
+ RValue::get(OMPScanNumIterations));
+ // Emit temp buffer.
+ CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl()));
+ ++ITA;
+ ++Count;
+ }
+ }
+ CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S);
+ {
+ // Emit loop with input phase:
+ // #pragma omp ...
+ // for (i: 0..<num_iters>) {
+ // <input phase>;
+ // buffer[i] = red;
+ // }
+ CGF.OMPFirstScanLoop = true;
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ FirstGen(CGF);
+ }
+ // Emit prefix reduction:
+ // for (int k = 0; k <= ceil(log2(n)); ++k)
+ llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body");
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit");
+ llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
+ llvm::Value *Arg =
+ CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
+ llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
+ F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
+ LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
+ LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
+ llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
+ OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc());
+ CGF.EmitBlock(LoopBB);
+ auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
+ // size pow2k = 1;
+ auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
+ Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
+ Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
+ // for (size i = n - 1; i >= 2 ^ k; --i)
+ // tmp[i] op= tmp[i-pow2k];
+ llvm::BasicBlock *InnerLoopBB =
+ CGF.createBasicBlock("omp.inner.log.scan.body");
+ llvm::BasicBlock *InnerExitBB =
+ CGF.createBasicBlock("omp.inner.log.scan.exit");
+ llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
+ CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
+ CGF.EmitBlock(InnerLoopBB);
+ auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
+ IVal->addIncoming(NMin1, LoopBB);
+ {
+ CodeGenFunction::OMPPrivateScope PrivScope(CGF);
+ auto *ILHS = LHSs.begin();
+ auto *IRHS = RHSs.begin();
+ for (const Expr *CopyArrayElem : CopyArrayElems) {
+ const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ Address LHSAddr = Address::invalid();
+ {
+ CodeGenFunction::OpaqueValueMapping IdxMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(IVal));
+ LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ }
+ PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; });
+ Address RHSAddr = Address::invalid();
+ {
+ llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
+ CodeGenFunction::OpaqueValueMapping IdxMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(OffsetIVal));
+ RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ }
+ PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; });
+ ++ILHS;
+ ++IRHS;
+ }
+ PrivScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitReduction(
+ CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
+ {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown});
+ }
+ llvm::Value *NextIVal =
+ CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
+ IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
+ CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
+ CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
+ CGF.EmitBlock(InnerExitBB);
+ llvm::Value *Next =
+ CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
+ Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
+ // pow2k <<= 1;
+ llvm::Value *NextPow2K = CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
+ Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
+ llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
+ CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
+ auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc());
+ CGF.EmitBlock(ExitBB);
+
+ CGF.OMPFirstScanLoop = false;
+ SecondGen(CGF);
+}
+
+static bool emitWorksharingDirective(CodeGenFunction &CGF,
+ const OMPLoopDirective &S,
+ bool HasCancel) {
+ bool HasLastprivates;
+ if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
+ [](const OMPReductionClause *C) {
+ return C->getModifier() == OMPC_REDUCTION_inscan;
+ })) {
+ const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ OMPLoopScope LoopScope(CGF, S);
+ return CGF.EmitScalarExpr(S.getNumIterations());
+ };
+ const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPCancelStackRAII CancelRegion(
+ CGF, S.getDirectiveKind(), HasCancel);
+ (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
+ emitForLoopBounds,
+ emitDispatchForLoopBounds);
+ // Emit an implicit barrier at the end.
+ CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(),
+ OMPD_for);
+ };
+ const auto &&SecondGen = [&S, HasCancel,
+ &HasLastprivates](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPCancelStackRAII CancelRegion(
+ CGF, S.getDirectiveKind(), HasCancel);
+ HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
+ emitForLoopBounds,
+ emitDispatchForLoopBounds);
+ };
+ emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
+ } else {
+ CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
+ HasCancel);
+ HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
+ emitForLoopBounds,
+ emitDispatchForLoopBounds);
+ }
+ return HasLastprivates;
+}
+
void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
PrePostActionTy &) {
- OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
- HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
- emitForLoopBounds,
- emitDispatchForLoopBounds);
+ HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel());
};
{
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
S.hasCancel());
@@ -2773,17 +3356,19 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
// Emit an implicit barrier at the end.
if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
PrePostActionTy &) {
- HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
- emitForLoopBounds,
- emitDispatchForLoopBounds);
+ HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
};
{
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
}
@@ -2791,6 +3376,8 @@ void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
// Emit an implicit barrier at the end.
if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
@@ -2808,7 +3395,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, CapturedStmt, CS,
&HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
- ASTContext &C = CGF.getContext();
+ const ASTContext &C = CGF.getContext();
QualType KmpInt32Ty =
C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Emit helper vars inits.
@@ -2830,11 +3417,13 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
// Generate condition for loop.
- BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
- OK_Ordinary, S.getBeginLoc(), FPOptions());
+ BinaryOperator *Cond = BinaryOperator::Create(
+ C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary,
+ S.getBeginLoc(), FPOptionsOverride());
// Increment for loop counter.
- UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
- S.getBeginLoc(), true);
+ UnaryOperator *Inc = UnaryOperator::Create(
+ C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
+ S.getBeginLoc(), true, FPOptionsOverride());
auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
// Iterate through all sections and emit a switch construct:
// switch (IV) {
@@ -2847,7 +3436,6 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// break;
// }
// .omp.sections.exit:
- CGF.CGM.getOpenMPRuntime().initLastprivateConditionalCounter(CGF, S);
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
llvm::SwitchInst *SwitchStmt =
CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
@@ -2905,7 +3493,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// IV = LB;
CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
// while (idx <= UB) { BODY; ++idx; }
- CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
+ CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen,
[](CodeGenFunction &) {});
// Tell the runtime we are done.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
@@ -2949,6 +3537,8 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
{
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, OMPD_unknown);
EmitSections(S);
}
@@ -2957,6 +3547,8 @@ void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
OMPD_sections);
}
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
@@ -2995,6 +3587,8 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
{
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
CopyprivateVars, DestExprs,
@@ -3007,6 +3601,8 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
*this, S.getBeginLoc(),
S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
}
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
@@ -3018,11 +3614,75 @@ static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
}
void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+
+ const CapturedStmt *CS = S.getInnermostCapturedStmt();
+ const Stmt *MasterRegionBodyStmt = CS->getCapturedStmt();
+
+ auto FiniCB = [this](InsertPointTy IP) {
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
+ };
+
+ auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt,
+ CodeGenIP, FiniBB);
+ };
+
+ CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
+ Builder.restoreIP(OMPBuilder.CreateMaster(Builder, BodyGenCB, FiniCB));
+
+ return;
+ }
OMPLexicalScope Scope(*this, S, OMPD_unknown);
emitMaster(*this, S);
}
void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+
+ const CapturedStmt *CS = S.getInnermostCapturedStmt();
+ const Stmt *CriticalRegionBodyStmt = CS->getCapturedStmt();
+ const Expr *Hint = nullptr;
+ if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
+ Hint = HintClause->getHint();
+
+ // TODO: This is slightly different from what's currently being done in
+ // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything
+ // about typing is final.
+ llvm::Value *HintInst = nullptr;
+ if (Hint)
+ HintInst =
+ Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false);
+
+ auto FiniCB = [this](InsertPointTy IP) {
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
+ };
+
+ auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt,
+ CodeGenIP, FiniBB);
+ };
+
+ CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
+ Builder.restoreIP(OMPBuilder.CreateCritical(
+ Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(),
+ HintInst));
+
+ return;
+ }
+
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
@@ -3042,12 +3702,16 @@ void CodeGenFunction::EmitOMPParallelForDirective(
// directives: 'parallel' with 'for' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
- CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
- emitDispatchForLoopBounds);
+ (void)emitWorksharingDirective(CGF, S, S.hasCancel());
};
- emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
- emitEmptyBoundParameters);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
+ emitEmptyBoundParameters);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPParallelForSimdDirective(
@@ -3056,11 +3720,16 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective(
// directives: 'parallel' with 'for' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
- emitDispatchForLoopBounds);
+ (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
};
- emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen,
- emitEmptyBoundParameters);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
+ emitEmptyBoundParameters);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPParallelMasterDirective(
@@ -3086,10 +3755,16 @@ void CodeGenFunction::EmitOMPParallelMasterDirective(
emitMaster(CGF, S);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
};
- emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
- emitEmptyBoundParameters);
- emitPostUpdateForReductionClause(*this, S,
- [](CodeGenFunction &) { return nullptr; });
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
+ emitEmptyBoundParameters);
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPParallelSectionsDirective(
@@ -3100,8 +3775,14 @@ void CodeGenFunction::EmitOMPParallelSectionsDirective(
Action.Enter(CGF);
CGF.EmitSections(S);
};
- emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
- emitEmptyBoundParameters);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
+ emitEmptyBoundParameters);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
}
void CodeGenFunction::EmitOMPTaskBasedDirective(
@@ -3188,33 +3869,28 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
SmallVector<const Expr *, 4> LHSs;
SmallVector<const Expr *, 4> RHSs;
for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
- auto IPriv = C->privates().begin();
- auto IRed = C->reduction_ops().begin();
- auto ILHS = C->lhs_exprs().begin();
- auto IRHS = C->rhs_exprs().begin();
- for (const Expr *Ref : C->varlists()) {
- Data.ReductionVars.emplace_back(Ref);
- Data.ReductionCopies.emplace_back(*IPriv);
- Data.ReductionOps.emplace_back(*IRed);
- LHSs.emplace_back(*ILHS);
- RHSs.emplace_back(*IRHS);
- std::advance(IPriv, 1);
- std::advance(IRed, 1);
- std::advance(ILHS, 1);
- std::advance(IRHS, 1);
- }
+ Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOps.append(C->reduction_ops().begin(),
+ C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
}
Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
*this, S.getBeginLoc(), LHSs, RHSs, Data);
// Build list of dependences.
- for (const auto *C : S.getClausesOfKind<OMPDependClause>())
- for (const Expr *IRef : C->varlists())
- Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
+ for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
+ OMPTaskDataTy::DependData &DD =
+ Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
+ DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
+ }
auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
CapturedRegion](CodeGenFunction &CGF,
PrePostActionTy &Action) {
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
+ llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
!Data.LastprivateVars.empty()) {
llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
@@ -3241,6 +3917,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".firstpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
+ FirstprivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
for (const Expr *E : Data.LastprivateVars) {
@@ -3271,13 +3948,21 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
}
if (Data.Reductions) {
+ OMPPrivateScope FirstprivateScope(CGF);
+ for (const auto &Pair : FirstprivatePtrs) {
+ Address Replacement(CGF.Builder.CreateLoad(Pair.second),
+ CGF.getContext().getDeclAlign(Pair.first));
+ FirstprivateScope.addPrivate(Pair.first,
+ [Replacement]() { return Replacement; });
+ }
+ (void)FirstprivateScope.Privatize();
OMPLexicalScope LexScope(CGF, S, CapturedRegion);
- ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies,
- Data.ReductionOps);
+ ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
+ Data.ReductionCopies, Data.ReductionOps);
llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
- RedCG.emitSharedLValue(CGF, Cnt);
+ RedCG.emitSharedOrigLValue(CGF, Cnt);
RedCG.emitAggregateType(CGF, Cnt);
// FIXME: This must removed once the runtime library is fixed.
// Emit required threadprivate variables for
@@ -3322,9 +4007,9 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
// privatized earlier.
OMPPrivateScope InRedScope(CGF);
if (!InRedVars.empty()) {
- ReductionCodeGen RedCG(InRedVars, InRedPrivs, InRedOps);
+ ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
- RedCG.emitSharedLValue(CGF, Cnt);
+ RedCG.emitSharedOrigLValue(CGF, Cnt);
RedCG.emitAggregateType(CGF, Cnt);
// The taskgroup descriptor variable is always implicit firstprivate and
// privatized already during processing of the firstprivates.
@@ -3333,9 +4018,13 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
// initializer/combiner/finalizer.
CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
RedCG, Cnt);
- llvm::Value *ReductionsPtr =
- CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]),
- TaskgroupDescriptors[Cnt]->getExprLoc());
+ llvm::Value *ReductionsPtr;
+ if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
+ ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr),
+ TRExpr->getExprLoc());
+ } else {
+ ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement = Address(
@@ -3448,9 +4137,11 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
}
(void)TargetScope.Privatize();
// Build list of dependences.
- for (const auto *C : S.getClausesOfKind<OMPDependClause>())
- for (const Expr *IRef : C->varlists())
- Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
+ for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
+ OMPTaskDataTy::DependData &DD =
+ Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
+ DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
+ }
auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD,
&InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
// Set proper addresses for generated private copies.
@@ -3537,6 +4228,8 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
SharedsTy, CapturedStruct, IfCond,
Data);
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
}
@@ -3562,21 +4255,13 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
SmallVector<const Expr *, 4> RHSs;
OMPTaskDataTy Data;
for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
- auto IPriv = C->privates().begin();
- auto IRed = C->reduction_ops().begin();
- auto ILHS = C->lhs_exprs().begin();
- auto IRHS = C->rhs_exprs().begin();
- for (const Expr *Ref : C->varlists()) {
- Data.ReductionVars.emplace_back(Ref);
- Data.ReductionCopies.emplace_back(*IPriv);
- Data.ReductionOps.emplace_back(*IRed);
- LHSs.emplace_back(*ILHS);
- RHSs.emplace_back(*IRHS);
- std::advance(IPriv, 1);
- std::advance(IRed, 1);
- std::advance(ILHS, 1);
- std::advance(IRHS, 1);
- }
+ Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOps.append(C->reduction_ops().begin(),
+ C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
}
llvm::Value *ReductionDesc =
CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
@@ -3593,6 +4278,9 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
}
void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
+ llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>()
+ ? llvm::AtomicOrdering::NotAtomic
+ : llvm::AtomicOrdering::AcquireRelease;
CGM.getOpenMPRuntime().emitFlush(
*this,
[&S]() -> ArrayRef<const Expr *> {
@@ -3601,7 +4289,233 @@ void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
FlushClause->varlist_end());
return llvm::None;
}(),
- S.getBeginLoc());
+ S.getBeginLoc(), AO);
+}
+
+void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) {
+ const auto *DO = S.getSingleClause<OMPDepobjClause>();
+ LValue DOLVal = EmitLValue(DO->getDepobj());
+ if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
+ OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(),
+ DC->getModifier());
+ Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
+ Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause(
+ *this, Dependencies, DC->getBeginLoc());
+ EmitStoreOfScalar(DepAddr.getPointer(), DOLVal);
+ return;
+ }
+ if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) {
+ CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc());
+ return;
+ }
+ if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) {
+ CGM.getOpenMPRuntime().emitUpdateClause(
+ *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc());
+ return;
+ }
+}
+
+void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
+ if (!OMPParentLoopDirectiveForScan)
+ return;
+ const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan;
+ bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>();
+ SmallVector<const Expr *, 4> Shareds;
+ SmallVector<const Expr *, 4> Privates;
+ SmallVector<const Expr *, 4> LHSs;
+ SmallVector<const Expr *, 4> RHSs;
+ SmallVector<const Expr *, 4> ReductionOps;
+ SmallVector<const Expr *, 4> CopyOps;
+ SmallVector<const Expr *, 4> CopyArrayTemps;
+ SmallVector<const Expr *, 4> CopyArrayElems;
+ for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) {
+ if (C->getModifier() != OMPC_REDUCTION_inscan)
+ continue;
+ Shareds.append(C->varlist_begin(), C->varlist_end());
+ Privates.append(C->privates().begin(), C->privates().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
+ CopyArrayTemps.append(C->copy_array_temps().begin(),
+ C->copy_array_temps().end());
+ CopyArrayElems.append(C->copy_array_elems().begin(),
+ C->copy_array_elems().end());
+ }
+ if (ParentDir.getDirectiveKind() == OMPD_simd ||
+ (getLangOpts().OpenMPSimd &&
+ isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) {
+ // For simd directive and simd-based directives in simd only mode, use the
+ // following codegen:
+ // int x = 0;
+ // #pragma omp simd reduction(inscan, +: x)
+ // for (..) {
+ // <first part>
+ // #pragma omp scan inclusive(x)
+ // <second part>
+ // }
+ // is transformed to:
+ // int x = 0;
+ // for (..) {
+ // int x_priv = 0;
+ // <first part>
+ // x = x_priv + x;
+ // x_priv = x;
+ // <second part>
+ // }
+ // and
+ // int x = 0;
+ // #pragma omp simd reduction(inscan, +: x)
+ // for (..) {
+ // <first part>
+ // #pragma omp scan exclusive(x)
+ // <second part>
+ // }
+ // to
+ // int x = 0;
+ // for (..) {
+ // int x_priv = 0;
+ // <second part>
+ // int temp = x;
+ // x = x_priv + x;
+ // x_priv = temp;
+ // <first part>
+ // }
+ llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce");
+ EmitBranch(IsInclusive
+ ? OMPScanReduce
+ : BreakContinueStack.back().ContinueBlock.getBlock());
+ EmitBlock(OMPScanDispatch);
+ {
+ // New scope for correct construction/destruction of temp variables for
+ // exclusive scan.
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock);
+ EmitBlock(OMPScanReduce);
+ if (!IsInclusive) {
+ // Create temp var and copy LHS value to this temp value.
+ // TMP = LHS;
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ const Expr *TempExpr = CopyArrayTemps[I];
+ EmitAutoVarDecl(
+ *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
+ LValue DestLVal = EmitLValue(TempExpr);
+ LValue SrcLVal = EmitLValue(LHSs[I]);
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
+ SrcLVal.getAddress(*this),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+ }
+ CGM.getOpenMPRuntime().emitReduction(
+ *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
+ {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd});
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ LValue DestLVal;
+ LValue SrcLVal;
+ if (IsInclusive) {
+ DestLVal = EmitLValue(RHSs[I]);
+ SrcLVal = EmitLValue(LHSs[I]);
+ } else {
+ const Expr *TempExpr = CopyArrayTemps[I];
+ DestLVal = EmitLValue(RHSs[I]);
+ SrcLVal = EmitLValue(TempExpr);
+ }
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
+ SrcLVal.getAddress(*this),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+ }
+ EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
+ OMPScanExitBlock = IsInclusive
+ ? BreakContinueStack.back().ContinueBlock.getBlock()
+ : OMPScanReduce;
+ EmitBlock(OMPAfterScanBlock);
+ return;
+ }
+ if (!IsInclusive) {
+ EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
+ EmitBlock(OMPScanExitBlock);
+ }
+ if (OMPFirstScanLoop) {
+ // Emit buffer[i] = red; at the end of the input phase.
+ const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
+ .getIterationVariable()
+ ->IgnoreParenImpCasts();
+ LValue IdxLVal = EmitLValue(IVExpr);
+ llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
+ IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ const Expr *OrigExpr = Shareds[I];
+ const Expr *CopyArrayElem = CopyArrayElems[I];
+ OpaqueValueMapping IdxMapping(
+ *this,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(IdxVal));
+ LValue DestLVal = EmitLValue(CopyArrayElem);
+ LValue SrcLVal = EmitLValue(OrigExpr);
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
+ SrcLVal.getAddress(*this),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+ }
+ EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
+ if (IsInclusive) {
+ EmitBlock(OMPScanExitBlock);
+ EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
+ }
+ EmitBlock(OMPScanDispatch);
+ if (!OMPFirstScanLoop) {
+ // Emit red = buffer[i]; at the entrance to the scan phase.
+ const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
+ .getIterationVariable()
+ ->IgnoreParenImpCasts();
+ LValue IdxLVal = EmitLValue(IVExpr);
+ llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
+ IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
+ llvm::BasicBlock *ExclusiveExitBB = nullptr;
+ if (!IsInclusive) {
+ llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec");
+ ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit");
+ llvm::Value *Cmp = Builder.CreateIsNull(IdxVal);
+ Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB);
+ EmitBlock(ContBB);
+ // Use idx - 1 iteration for exclusive scan.
+ IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1));
+ }
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ const Expr *OrigExpr = Shareds[I];
+ const Expr *CopyArrayElem = CopyArrayElems[I];
+ OpaqueValueMapping IdxMapping(
+ *this,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(IdxVal));
+ LValue SrcLVal = EmitLValue(CopyArrayElem);
+ LValue DestLVal = EmitLValue(OrigExpr);
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
+ SrcLVal.getAddress(*this),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+ if (!IsInclusive) {
+ EmitBlock(ExclusiveExitBB);
+ }
+ }
+ EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock
+ : OMPAfterScanBlock);
+ EmitBlock(OMPAfterScanBlock);
}
void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
@@ -3790,7 +4704,7 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
});
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
- RT.emitForStaticFinish(*this, S.getBeginLoc(), S.getDirectiveKind());
+ RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind());
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
@@ -3843,11 +4757,12 @@ void CodeGenFunction::EmitOMPDistributeDirective(
}
static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
- const CapturedStmt *S) {
+ const CapturedStmt *S,
+ SourceLocation Loc) {
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
CGF.CapturedStmtInfo = &CapStmtInfo;
- llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
+ llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc);
Fn->setDoesNotRecurse();
return Fn;
}
@@ -3867,7 +4782,8 @@ void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
if (C) {
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
- llvm::Function *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
+ llvm::Function *OutlinedFn =
+ emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc());
CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
OutlinedFn, CapturedVars);
} else {
@@ -3918,16 +4834,22 @@ convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
return ComplexVal;
}
-static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
LValue LVal, RValue RVal) {
- if (LVal.isGlobalReg()) {
+ if (LVal.isGlobalReg())
CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
- } else {
- CGF.EmitAtomicStore(RVal, LVal,
- IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic,
- LVal.isVolatile(), /*isInit=*/false);
- }
+ else
+ CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false);
+}
+
+static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF,
+ llvm::AtomicOrdering AO, LValue LVal,
+ SourceLocation Loc) {
+ if (LVal.isGlobalReg())
+ return CGF.EmitLoadOfLValue(LVal, Loc);
+ return CGF.EmitAtomicLoad(
+ LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO),
+ LVal.isVolatile());
}
void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
@@ -3948,7 +4870,7 @@ void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
}
}
-static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
const Expr *X, const Expr *V,
SourceLocation Loc) {
// v = x;
@@ -3956,34 +4878,54 @@ static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
LValue XLValue = CGF.EmitLValue(X);
LValue VLValue = CGF.EmitLValue(V);
- RValue Res = XLValue.isGlobalReg()
- ? CGF.EmitLoadOfLValue(XLValue, Loc)
- : CGF.EmitAtomicLoad(
- XLValue, Loc,
- IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic,
- XLValue.isVolatile());
- // OpenMP, 2.12.6, atomic Construct
- // Any atomic construct with a seq_cst clause forces the atomically
- // performed operation to include an implicit flush operation without a
- // list.
- if (IsSeqCst)
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+ RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc);
+ // OpenMP, 2.17.7, atomic Construct
+ // If the read or capture clause is specified and the acquire, acq_rel, or
+ // seq_cst clause is specified then the strong flush on exit from the atomic
+ // operation is also an acquire flush.
+ switch (AO) {
+ case llvm::AtomicOrdering::Acquire:
+ case llvm::AtomicOrdering::AcquireRelease:
+ case llvm::AtomicOrdering::SequentiallyConsistent:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Acquire);
+ break;
+ case llvm::AtomicOrdering::Monotonic:
+ case llvm::AtomicOrdering::Release:
+ break;
+ case llvm::AtomicOrdering::NotAtomic:
+ case llvm::AtomicOrdering::Unordered:
+ llvm_unreachable("Unexpected ordering.");
+ }
CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
}
-static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
- const Expr *X, const Expr *E,
- SourceLocation Loc) {
+static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF,
+ llvm::AtomicOrdering AO, const Expr *X,
+ const Expr *E, SourceLocation Loc) {
// x = expr;
assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
- emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
- // OpenMP, 2.12.6, atomic Construct
- // Any atomic construct with a seq_cst clause forces the atomically
- // performed operation to include an implicit flush operation without a
- // list.
- if (IsSeqCst)
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+ emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
+ // OpenMP, 2.17.7, atomic Construct
+ // If the write, update, or capture clause is specified and the release,
+ // acq_rel, or seq_cst clause is specified then the strong flush on entry to
+ // the atomic operation is also a release flush.
+ switch (AO) {
+ case llvm::AtomicOrdering::Release:
+ case llvm::AtomicOrdering::AcquireRelease:
+ case llvm::AtomicOrdering::SequentiallyConsistent:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Release);
+ break;
+ case llvm::AtomicOrdering::Acquire:
+ case llvm::AtomicOrdering::Monotonic:
+ break;
+ case llvm::AtomicOrdering::NotAtomic:
+ case llvm::AtomicOrdering::Unordered:
+ llvm_unreachable("Unexpected ordering.");
+ }
}
static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
@@ -4104,10 +5046,10 @@ std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
return Res;
}
-static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
- const Expr *X, const Expr *E,
- const Expr *UE, bool IsXLHSInRHSPart,
- SourceLocation Loc) {
+static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF,
+ llvm::AtomicOrdering AO, const Expr *X,
+ const Expr *E, const Expr *UE,
+ bool IsXLHSInRHSPart, SourceLocation Loc) {
assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
"Update expr in 'atomic update' must be a binary operator.");
const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
@@ -4120,9 +5062,6 @@ static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
LValue XLValue = CGF.EmitLValue(X);
RValue ExprRValue = CGF.EmitAnyExpr(E);
- llvm::AtomicOrdering AO = IsSeqCst
- ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic;
const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
@@ -4134,12 +5073,25 @@ static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
};
(void)CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
- // OpenMP, 2.12.6, atomic Construct
- // Any atomic construct with a seq_cst clause forces the atomically
- // performed operation to include an implicit flush operation without a
- // list.
- if (IsSeqCst)
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
+ // OpenMP, 2.17.7, atomic Construct
+ // If the write, update, or capture clause is specified and the release,
+ // acq_rel, or seq_cst clause is specified then the strong flush on entry to
+ // the atomic operation is also a release flush.
+ switch (AO) {
+ case llvm::AtomicOrdering::Release:
+ case llvm::AtomicOrdering::AcquireRelease:
+ case llvm::AtomicOrdering::SequentiallyConsistent:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Release);
+ break;
+ case llvm::AtomicOrdering::Acquire:
+ case llvm::AtomicOrdering::Monotonic:
+ break;
+ case llvm::AtomicOrdering::NotAtomic:
+ case llvm::AtomicOrdering::Unordered:
+ llvm_unreachable("Unexpected ordering.");
+ }
}
static RValue convertToType(CodeGenFunction &CGF, RValue Value,
@@ -4159,7 +5111,8 @@ static RValue convertToType(CodeGenFunction &CGF, RValue Value,
llvm_unreachable("Must be a scalar or complex.");
}
-static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF,
+ llvm::AtomicOrdering AO,
bool IsPostfixUpdate, const Expr *V,
const Expr *X, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
@@ -4170,9 +5123,6 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
LValue VLValue = CGF.EmitLValue(V);
LValue XLValue = CGF.EmitLValue(X);
RValue ExprRValue = CGF.EmitAnyExpr(E);
- llvm::AtomicOrdering AO = IsSeqCst
- ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic;
QualType NewVValType;
if (UE) {
// 'x' is updated with some additional value.
@@ -4200,6 +5150,7 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
};
auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
if (Res.first) {
// 'atomicrmw' instruction was generated.
if (IsPostfixUpdate) {
@@ -4226,6 +5177,7 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
Loc, Gen);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
if (Res.first) {
// 'atomicrmw' instruction was generated.
NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
@@ -4233,32 +5185,54 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
}
// Emit post-update store to 'v' of old/new 'x' value.
CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
- // OpenMP, 2.12.6, atomic Construct
- // Any atomic construct with a seq_cst clause forces the atomically
- // performed operation to include an implicit flush operation without a
- // list.
- if (IsSeqCst)
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
+ CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
+ // OpenMP, 2.17.7, atomic Construct
+ // If the write, update, or capture clause is specified and the release,
+ // acq_rel, or seq_cst clause is specified then the strong flush on entry to
+ // the atomic operation is also a release flush.
+ // If the read or capture clause is specified and the acquire, acq_rel, or
+ // seq_cst clause is specified then the strong flush on exit from the atomic
+ // operation is also an acquire flush.
+ switch (AO) {
+ case llvm::AtomicOrdering::Release:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Release);
+ break;
+ case llvm::AtomicOrdering::Acquire:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Acquire);
+ break;
+ case llvm::AtomicOrdering::AcquireRelease:
+ case llvm::AtomicOrdering::SequentiallyConsistent:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::AcquireRelease);
+ break;
+ case llvm::AtomicOrdering::Monotonic:
+ break;
+ case llvm::AtomicOrdering::NotAtomic:
+ case llvm::AtomicOrdering::Unordered:
+ llvm_unreachable("Unexpected ordering.");
+ }
}
static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
- bool IsSeqCst, bool IsPostfixUpdate,
+ llvm::AtomicOrdering AO, bool IsPostfixUpdate,
const Expr *X, const Expr *V, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
SourceLocation Loc) {
switch (Kind) {
case OMPC_read:
- emitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
+ emitOMPAtomicReadExpr(CGF, AO, X, V, Loc);
break;
case OMPC_write:
- emitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
+ emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc);
break;
case OMPC_unknown:
case OMPC_update:
- emitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
+ emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc);
break;
case OMPC_capture:
- emitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
+ emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE,
IsXLHSInRHSPart, Loc);
break;
case OMPC_if:
@@ -4277,12 +5251,17 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_collapse:
case OMPC_default:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_shared:
case OMPC_linear:
case OMPC_aligned:
case OMPC_copyin:
case OMPC_copyprivate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_proc_bind:
case OMPC_schedule:
case OMPC_ordered:
@@ -4308,6 +5287,7 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -4317,38 +5297,76 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
- bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
+ llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic;
+ bool MemOrderingSpecified = false;
+ if (S.getSingleClause<OMPSeqCstClause>()) {
+ AO = llvm::AtomicOrdering::SequentiallyConsistent;
+ MemOrderingSpecified = true;
+ } else if (S.getSingleClause<OMPAcqRelClause>()) {
+ AO = llvm::AtomicOrdering::AcquireRelease;
+ MemOrderingSpecified = true;
+ } else if (S.getSingleClause<OMPAcquireClause>()) {
+ AO = llvm::AtomicOrdering::Acquire;
+ MemOrderingSpecified = true;
+ } else if (S.getSingleClause<OMPReleaseClause>()) {
+ AO = llvm::AtomicOrdering::Release;
+ MemOrderingSpecified = true;
+ } else if (S.getSingleClause<OMPRelaxedClause>()) {
+ AO = llvm::AtomicOrdering::Monotonic;
+ MemOrderingSpecified = true;
+ }
OpenMPClauseKind Kind = OMPC_unknown;
for (const OMPClause *C : S.clauses()) {
- // Find first clause (skip seq_cst clause, if it is first).
- if (C->getClauseKind() != OMPC_seq_cst) {
+ // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause,
+ // if it is first).
+ if (C->getClauseKind() != OMPC_seq_cst &&
+ C->getClauseKind() != OMPC_acq_rel &&
+ C->getClauseKind() != OMPC_acquire &&
+ C->getClauseKind() != OMPC_release &&
+ C->getClauseKind() != OMPC_relaxed) {
Kind = C->getClauseKind();
break;
}
}
-
- const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
- if (const auto *FE = dyn_cast<FullExpr>(CS))
- enterFullExpression(FE);
- // Processing for statements under 'atomic capture'.
- if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
- for (const Stmt *C : Compound->body()) {
- if (const auto *FE = dyn_cast<FullExpr>(C))
- enterFullExpression(FE);
+ if (!MemOrderingSpecified) {
+ llvm::AtomicOrdering DefaultOrder =
+ CGM.getOpenMPRuntime().getDefaultMemoryOrdering();
+ if (DefaultOrder == llvm::AtomicOrdering::Monotonic ||
+ DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent ||
+ (DefaultOrder == llvm::AtomicOrdering::AcquireRelease &&
+ Kind == OMPC_capture)) {
+ AO = DefaultOrder;
+ } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) {
+ if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) {
+ AO = llvm::AtomicOrdering::Release;
+ } else if (Kind == OMPC_read) {
+ assert(Kind == OMPC_read && "Unexpected atomic kind.");
+ AO = llvm::AtomicOrdering::Acquire;
+ }
}
}
- auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
+ const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
+
+ auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF,
PrePostActionTy &) {
CGF.EmitStopPoint(CS);
- emitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
- S.getV(), S.getExpr(), S.getUpdateExpr(),
- S.isXLHSInRHSPart(), S.getBeginLoc());
+ emitOMPAtomicExpr(CGF, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(),
+ S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(),
+ S.getBeginLoc());
};
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
@@ -4370,6 +5388,8 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
return;
}
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S);
llvm::Function *Fn = nullptr;
llvm::Constant *FnID = nullptr;
@@ -4384,9 +5404,10 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
}
// Check if we have any device clause associated with the directive.
- const Expr *Device = nullptr;
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device(
+ nullptr, OMPC_DEVICE_unknown);
if (auto *C = S.getSingleClause<OMPDeviceClause>())
- Device = C->getDevice();
+ Device.setPointerAndInt(C->getDevice(), C->getModifier());
// Check if we have an if clause whose conditional always evaluates to false
// or if we do not have any targets specified. If so the target region is not
@@ -4856,7 +5877,8 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
break;
}
}
- if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
// TODO: This check is necessary as we only generate `omp parallel` through
// the OpenMPIRBuilder for now.
if (S.getCancelRegion() == OMPD_parallel) {
@@ -4865,7 +5887,7 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
IfCondition = EmitScalarExpr(IfCond,
/*IgnoreResultAssign=*/true);
return Builder.restoreIP(
- OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion()));
+ OMPBuilder.CreateCancel(Builder, IfCondition, S.getCancelRegion()));
}
}
@@ -4876,7 +5898,8 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
CodeGenFunction::JumpDest
CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
if (Kind == OMPD_parallel || Kind == OMPD_task ||
- Kind == OMPD_target_parallel)
+ Kind == OMPD_target_parallel || Kind == OMPD_taskloop ||
+ Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop)
return ReturnBlock;
assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
@@ -4888,9 +5911,8 @@ CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
}
void CodeGenFunction::EmitOMPUseDevicePtrClause(
- const OMPClause &NC, OMPPrivateScope &PrivateScope,
+ const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
- const auto &C = cast<OMPUseDevicePtrClause>(NC);
auto OrigVarIt = C.varlist_begin();
auto InitIt = C.inits().begin();
for (const Expr *PvtVarIt : C.private_copies()) {
@@ -4951,6 +5973,60 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
}
}
+static const VarDecl *getBaseDecl(const Expr *Ref) {
+ const Expr *Base = Ref->IgnoreParenImpCasts();
+ while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = ASE->getBase()->IgnoreParenImpCasts();
+ return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl());
+}
+
+void CodeGenFunction::EmitOMPUseDeviceAddrClause(
+ const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
+ const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
+ llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
+ for (const Expr *Ref : C.varlists()) {
+ const VarDecl *OrigVD = getBaseDecl(Ref);
+ if (!Processed.insert(OrigVD).second)
+ continue;
+ // In order to identify the right initializer we need to match the
+ // declaration used by the mapping logic. In some cases we may get
+ // OMPCapturedExprDecl that refers to the original declaration.
+ const ValueDecl *MatchingVD = OrigVD;
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
+ // OMPCapturedExprDecl are used to privative fields of the current
+ // structure.
+ const auto *ME = cast<MemberExpr>(OED->getInit());
+ assert(isa<CXXThisExpr>(ME->getBase()) &&
+ "Base should be the current struct!");
+ MatchingVD = ME->getMemberDecl();
+ }
+
+ // If we don't have information about the current list item, move on to
+ // the next one.
+ auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
+ if (InitAddrIt == CaptureDeviceAddrMap.end())
+ continue;
+
+ Address PrivAddr = InitAddrIt->getSecond();
+ // For declrefs and variable length array need to load the pointer for
+ // correct mapping, since the pointer to the data was passed to the runtime.
+ if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) ||
+ MatchingVD->getType()->isArrayType())
+ PrivAddr =
+ EmitLoadOfPointer(PrivAddr, getContext()
+ .getPointerType(OrigVD->getType())
+ ->castAs<PointerType>());
+ llvm::Type *RealTy =
+ ConvertTypeForMem(OrigVD->getType().getNonReferenceType())
+ ->getPointerTo();
+ PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy);
+
+ (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; });
+ }
+}
+
// Generate the instructions for '#pragma omp target data' directive.
void CodeGenFunction::EmitOMPTargetDataDirective(
const OMPTargetDataDirective &S) {
@@ -4995,9 +6071,13 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
Info.CaptureDeviceAddrMap);
+ for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>())
+ CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope,
+ Info.CaptureDeviceAddrMap);
(void)PrivateScope.Privatize();
RCG(CGF);
} else {
+ OMPLexicalScope Scope(CGF, S, OMPD_unknown);
RCG(CGF);
}
};
@@ -5222,7 +6302,11 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop);
- Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
+ Address CapturedStruct = Address::invalid();
+ {
+ OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
+ CapturedStruct = GenerateCapturedStmtArgument(*CS);
+ }
QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
@@ -5322,8 +6406,8 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
CGF.EmitOMPInnerLoop(
S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
[&S](CodeGenFunction &CGF) {
- CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest());
- CGF.EmitStopPoint(&S);
+ emitOMPLoopBodyWithStopPoint(CGF, S,
+ CodeGenFunction::JumpDest());
},
[](CodeGenFunction &) {});
});
@@ -5376,11 +6460,15 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
}
void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
EmitOMPTaskLoopBasedDirective(S);
}
void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
const OMPTaskLoopSimdDirective &S) {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S);
EmitOMPTaskLoopBasedDirective(S);
}
@@ -5391,6 +6479,8 @@ void CodeGenFunction::EmitOMPMasterTaskLoopDirective(
Action.Enter(CGF);
EmitOMPTaskLoopBasedDirective(S);
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false);
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
}
@@ -5401,6 +6491,8 @@ void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective(
Action.Enter(CGF);
EmitOMPTaskLoopBasedDirective(S);
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
OMPLexicalScope Scope(*this, S);
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
}
@@ -5413,10 +6505,12 @@ void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective(
Action.Enter(CGF);
CGF.EmitOMPTaskLoopBasedDirective(S);
};
- OMPLexicalScope Scope(CGF, S, llvm::None, /*EmitPreInitStmt=*/false);
+ OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false);
CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
S.getBeginLoc());
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen,
emitEmptyBoundParameters);
}
@@ -5433,6 +6527,8 @@ void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective(
CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
S.getBeginLoc());
};
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen,
emitEmptyBoundParameters);
}
@@ -5461,19 +6557,43 @@ void CodeGenFunction::EmitOMPTargetUpdateDirective(
void CodeGenFunction::EmitSimpleOMPExecutableDirective(
const OMPExecutableDirective &D) {
+ if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) {
+ EmitOMPScanDirective(*SD);
+ return;
+ }
if (!D.hasAssociatedStmt() || !D.getAssociatedStmt())
return;
auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ OMPPrivateScope GlobalsScope(CGF);
+ if (isOpenMPTaskingDirective(D.getDirectiveKind())) {
+ // Capture global firstprivates to avoid crash.
+ for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
+ for (const Expr *Ref : C->varlists()) {
+ const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ const auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD || VD->hasLocalStorage())
+ continue;
+ if (!CGF.LocalDeclMap.count(VD)) {
+ LValue GlobLVal = CGF.EmitLValue(Ref);
+ GlobalsScope.addPrivate(
+ VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); });
+ }
+ }
+ }
+ }
if (isOpenMPSimdDirective(D.getDirectiveKind())) {
+ (void)GlobalsScope.Privatize();
+ ParentLoopDirectiveForScanRegion ScanRegion(CGF, D);
emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
} else {
- OMPPrivateScope LoopGlobals(CGF);
if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
for (const Expr *E : LD->counters()) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(E);
- LoopGlobals.addPrivate(
+ GlobalsScope.addPrivate(
VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); });
}
if (isa<OMPCapturedExprDecl>(VD)) {
@@ -5497,14 +6617,20 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
}
}
}
- LoopGlobals.Privatize();
+ (void)GlobalsScope.Privatize();
CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
}
};
- OMPSimdLexicalScope Scope(*this, D);
- CGM.getOpenMPRuntime().emitInlinedDirective(
- *this,
- isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
- : D.getDirectiveKind(),
- CodeGen);
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D);
+ OMPSimdLexicalScope Scope(*this, D);
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this,
+ isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
+ : D.getDirectiveKind(),
+ CodeGen);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, D);
}
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
index 59631e802373..65b3b0c5f53d 100644
--- a/clang/lib/CodeGen/CGVTables.cpp
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -363,8 +363,10 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::FunctionCallee Callee,
: FPT->getReturnType();
ReturnValueSlot Slot;
if (!ResultType->isVoidType() &&
- CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect)
- Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+ (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect ||
+ hasAggregateEvaluationKind(ResultType)))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified(),
+ /*IsUnused=*/false, /*IsExternallyDestructed=*/true);
// Now emit our call.
llvm::CallBase *CallOrInvoke;
@@ -437,7 +439,8 @@ void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
// Finish the function to maintain CodeGenFunction invariants.
// FIXME: Don't emit unreachable code.
EmitBlock(createBasicBlock());
- FinishFunction();
+
+ FinishThunk();
}
void CodeGenFunction::generateThunk(llvm::Function *Fn,
@@ -564,7 +567,7 @@ llvm::Constant *CodeGenVTables::maybeEmitThunk(GlobalDecl GD,
CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn);
// Thunks for variadic methods are special because in general variadic
- // arguments cannot be perferctly forwarded. In the general case, clang
+ // arguments cannot be perfectly forwarded. In the general case, clang
// implements such thunks by cloning the original function body. However, for
// thunks with no return adjustment on targets that support musttail, we can
// use musttail to perfectly forward the variadic arguments.
@@ -616,29 +619,178 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD) {
maybeEmitThunk(GD, Thunk, /*ForVTable=*/false);
}
-void CodeGenVTables::addVTableComponent(
- ConstantArrayBuilder &builder, const VTableLayout &layout,
- unsigned idx, llvm::Constant *rtti, unsigned &nextVTableThunkIndex) {
- auto &component = layout.vtable_components()[idx];
+void CodeGenVTables::addRelativeComponent(ConstantArrayBuilder &builder,
+ llvm::Constant *component,
+ unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage,
+ bool isCompleteDtor) const {
+ // No need to get the offset of a nullptr.
+ if (component->isNullValue())
+ return builder.add(llvm::ConstantInt::get(CGM.Int32Ty, 0));
+
+ auto *globalVal =
+ cast<llvm::GlobalValue>(component->stripPointerCastsAndAliases());
+ llvm::Module &module = CGM.getModule();
+
+ // We don't want to copy the linkage of the vtable exactly because we still
+ // want the stub/proxy to be emitted for properly calculating the offset.
+ // Examples where there would be no symbol emitted are available_externally
+ // and private linkages.
+ auto stubLinkage = vtableHasLocalLinkage ? llvm::GlobalValue::InternalLinkage
+ : llvm::GlobalValue::ExternalLinkage;
+
+ llvm::Constant *target;
+ if (auto *func = dyn_cast<llvm::Function>(globalVal)) {
+ target = getOrCreateRelativeStub(func, stubLinkage, isCompleteDtor);
+ } else {
+ llvm::SmallString<16> rttiProxyName(globalVal->getName());
+ rttiProxyName.append(".rtti_proxy");
+
+ // The RTTI component may not always be emitted in the same linkage unit as
+ // the vtable. As a general case, we can make a dso_local proxy to the RTTI
+ // that points to the actual RTTI struct somewhere. This will result in a
+ // GOTPCREL relocation when taking the relative offset to the proxy.
+ llvm::GlobalVariable *proxy = module.getNamedGlobal(rttiProxyName);
+ if (!proxy) {
+ proxy = new llvm::GlobalVariable(module, globalVal->getType(),
+ /*isConstant=*/true, stubLinkage,
+ globalVal, rttiProxyName);
+ proxy->setDSOLocal(true);
+ proxy->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ if (!proxy->hasLocalLinkage()) {
+ proxy->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ proxy->setComdat(module.getOrInsertComdat(rttiProxyName));
+ }
+ }
+ target = proxy;
+ }
- auto addOffsetConstant = [&](CharUnits offset) {
- builder.add(llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()),
- CGM.Int8PtrTy));
- };
+ builder.addRelativeOffsetToPosition(CGM.Int32Ty, target,
+ /*position=*/vtableAddressPoint);
+}
+
+llvm::Function *CodeGenVTables::getOrCreateRelativeStub(
+ llvm::Function *func, llvm::GlobalValue::LinkageTypes stubLinkage,
+ bool isCompleteDtor) const {
+ // A complete object destructor can later be substituted in the vtable for an
+ // appropriate base object destructor when optimizations are enabled. This can
+ // happen for child classes that don't have their own destructor. In the case
+ // where a parent virtual destructor is not guaranteed to be in the same
+ // linkage unit as the child vtable, it's possible for an external reference
+ // for this destructor to be substituted into the child vtable, preventing it
+ // from being in rodata. If this function is a complete virtual destructor, we
+ // can just force a stub to be emitted for it.
+ if (func->isDSOLocal() && !isCompleteDtor)
+ return func;
+
+ llvm::SmallString<16> stubName(func->getName());
+ stubName.append(".stub");
+
+ // Instead of taking the offset between the vtable and virtual function
+ // directly, we emit a dso_local stub that just contains a tail call to the
+ // original virtual function and take the offset between that and the
+ // vtable. We do this because there are some cases where the original
+ // function that would've been inserted into the vtable is not dso_local
+ // which may require some kind of dynamic relocation which prevents the
+ // vtable from being readonly. On x86_64, taking the offset between the
+ // function and the vtable gets lowered to the offset between the PLT entry
+ // for the function and the vtable which gives us a PLT32 reloc. On AArch64,
+ // right now only CALL26 and JUMP26 instructions generate PLT relocations,
+ // so we manifest them with stubs that are just jumps to the original
+ // function.
+ auto &module = CGM.getModule();
+ llvm::Function *stub = module.getFunction(stubName);
+ if (stub) {
+ assert(stub->isDSOLocal() &&
+ "The previous definition of this stub should've been dso_local.");
+ return stub;
+ }
+
+ stub = llvm::Function::Create(func->getFunctionType(), stubLinkage, stubName,
+ module);
+
+ // Propogate function attributes.
+ stub->setAttributes(func->getAttributes());
+
+ stub->setDSOLocal(true);
+ stub->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ if (!stub->hasLocalLinkage()) {
+ stub->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ stub->setComdat(module.getOrInsertComdat(stubName));
+ }
+
+ // Fill the stub with a tail call that will be optimized.
+ llvm::BasicBlock *block =
+ llvm::BasicBlock::Create(module.getContext(), "entry", stub);
+ llvm::IRBuilder<> block_builder(block);
+ llvm::SmallVector<llvm::Value *, 8> args;
+ for (auto &arg : stub->args())
+ args.push_back(&arg);
+ llvm::CallInst *call = block_builder.CreateCall(func, args);
+ call->setAttributes(func->getAttributes());
+ call->setTailCall();
+ if (call->getType()->isVoidTy())
+ block_builder.CreateRetVoid();
+ else
+ block_builder.CreateRet(call);
+
+ return stub;
+}
+
+bool CodeGenVTables::useRelativeLayout() const {
+ return CGM.getTarget().getCXXABI().isItaniumFamily() &&
+ CGM.getItaniumVTableContext().isRelativeLayout();
+}
+
+llvm::Type *CodeGenVTables::getVTableComponentType() const {
+ if (useRelativeLayout())
+ return CGM.Int32Ty;
+ return CGM.Int8PtrTy;
+}
+
+static void AddPointerLayoutOffset(const CodeGenModule &CGM,
+ ConstantArrayBuilder &builder,
+ CharUnits offset) {
+ builder.add(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()),
+ CGM.Int8PtrTy));
+}
+
+static void AddRelativeLayoutOffset(const CodeGenModule &CGM,
+ ConstantArrayBuilder &builder,
+ CharUnits offset) {
+ builder.add(llvm::ConstantInt::get(CGM.Int32Ty, offset.getQuantity()));
+}
+
+void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
+ const VTableLayout &layout,
+ unsigned componentIndex,
+ llvm::Constant *rtti,
+ unsigned &nextVTableThunkIndex,
+ unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage) {
+ auto &component = layout.vtable_components()[componentIndex];
+
+ auto addOffsetConstant =
+ useRelativeLayout() ? AddRelativeLayoutOffset : AddPointerLayoutOffset;
switch (component.getKind()) {
case VTableComponent::CK_VCallOffset:
- return addOffsetConstant(component.getVCallOffset());
+ return addOffsetConstant(CGM, builder, component.getVCallOffset());
case VTableComponent::CK_VBaseOffset:
- return addOffsetConstant(component.getVBaseOffset());
+ return addOffsetConstant(CGM, builder, component.getVBaseOffset());
case VTableComponent::CK_OffsetToTop:
- return addOffsetConstant(component.getOffsetToTop());
+ return addOffsetConstant(CGM, builder, component.getOffsetToTop());
case VTableComponent::CK_RTTI:
- return builder.add(llvm::ConstantExpr::getBitCast(rtti, CGM.Int8PtrTy));
+ if (useRelativeLayout())
+ return addRelativeComponent(builder, rtti, vtableAddressPoint,
+ vtableHasLocalLinkage,
+ /*isCompleteDtor=*/false);
+ else
+ return builder.add(llvm::ConstantExpr::getBitCast(rtti, CGM.Int8PtrTy));
case VTableComponent::CK_FunctionPointer:
case VTableComponent::CK_CompleteDtorPointer:
@@ -672,11 +824,21 @@ void CodeGenVTables::addVTableComponent(
? MD->hasAttr<CUDADeviceAttr>()
: (MD->hasAttr<CUDAHostAttr>() || !MD->hasAttr<CUDADeviceAttr>());
if (!CanEmitMethod)
- return builder.addNullPointer(CGM.Int8PtrTy);
+ return builder.add(llvm::ConstantExpr::getNullValue(CGM.Int8PtrTy));
// Method is acceptable, continue processing as usual.
}
auto getSpecialVirtualFn = [&](StringRef name) -> llvm::Constant * {
+ // FIXME(PR43094): When merging comdat groups, lld can select a local
+ // symbol as the signature symbol even though it cannot be accessed
+ // outside that symbol's TU. The relative vtables ABI would make
+ // __cxa_pure_virtual and __cxa_deleted_virtual local symbols, and
+ // depending on link order, the comdat groups could resolve to the one
+ // with the local symbol. As a temporary solution, fill these components
+ // with zero. We shouldn't be calling these in the first place anyway.
+ if (useRelativeLayout())
+ return llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+
// For NVPTX devices in OpenMP emit special functon as null pointers,
// otherwise linking ends up with unresolved references.
if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPIsDevice &&
@@ -697,19 +859,20 @@ void CodeGenVTables::addVTableComponent(
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
if (!PureVirtualFn)
PureVirtualFn =
- getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName());
+ getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName());
fnPtr = PureVirtualFn;
// Deleted virtual member functions.
} else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
if (!DeletedVirtualFn)
DeletedVirtualFn =
- getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName());
+ getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName());
fnPtr = DeletedVirtualFn;
// Thunks.
} else if (nextVTableThunkIndex < layout.vtable_thunks().size() &&
- layout.vtable_thunks()[nextVTableThunkIndex].first == idx) {
+ layout.vtable_thunks()[nextVTableThunkIndex].first ==
+ componentIndex) {
auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second;
nextVTableThunkIndex++;
@@ -721,13 +884,19 @@ void CodeGenVTables::addVTableComponent(
fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true);
}
- fnPtr = llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy);
- builder.add(fnPtr);
- return;
+ if (useRelativeLayout()) {
+ return addRelativeComponent(
+ builder, fnPtr, vtableAddressPoint, vtableHasLocalLinkage,
+ component.getKind() == VTableComponent::CK_CompleteDtorPointer);
+ } else
+ return builder.add(llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy));
}
case VTableComponent::CK_UnusedFunctionPointer:
- return builder.addNullPointer(CGM.Int8PtrTy);
+ if (useRelativeLayout())
+ return builder.add(llvm::ConstantExpr::getNullValue(CGM.Int32Ty));
+ else
+ return builder.addNullPointer(CGM.Int8PtrTy);
}
llvm_unreachable("Unexpected vtable component kind");
@@ -735,34 +904,41 @@ void CodeGenVTables::addVTableComponent(
llvm::Type *CodeGenVTables::getVTableType(const VTableLayout &layout) {
SmallVector<llvm::Type *, 4> tys;
- for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) {
- tys.push_back(llvm::ArrayType::get(CGM.Int8PtrTy, layout.getVTableSize(i)));
- }
+ llvm::Type *componentType = getVTableComponentType();
+ for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i)
+ tys.push_back(llvm::ArrayType::get(componentType, layout.getVTableSize(i)));
return llvm::StructType::get(CGM.getLLVMContext(), tys);
}
void CodeGenVTables::createVTableInitializer(ConstantStructBuilder &builder,
const VTableLayout &layout,
- llvm::Constant *rtti) {
+ llvm::Constant *rtti,
+ bool vtableHasLocalLinkage) {
+ llvm::Type *componentType = getVTableComponentType();
+
+ const auto &addressPoints = layout.getAddressPointIndices();
unsigned nextVTableThunkIndex = 0;
- for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) {
- auto vtableElem = builder.beginArray(CGM.Int8PtrTy);
- size_t thisIndex = layout.getVTableOffset(i);
- size_t nextIndex = thisIndex + layout.getVTableSize(i);
- for (unsigned i = thisIndex; i != nextIndex; ++i) {
- addVTableComponent(vtableElem, layout, i, rtti, nextVTableThunkIndex);
+ for (unsigned vtableIndex = 0, endIndex = layout.getNumVTables();
+ vtableIndex != endIndex; ++vtableIndex) {
+ auto vtableElem = builder.beginArray(componentType);
+
+ size_t vtableStart = layout.getVTableOffset(vtableIndex);
+ size_t vtableEnd = vtableStart + layout.getVTableSize(vtableIndex);
+ for (size_t componentIndex = vtableStart; componentIndex < vtableEnd;
+ ++componentIndex) {
+ addVTableComponent(vtableElem, layout, componentIndex, rtti,
+ nextVTableThunkIndex, addressPoints[vtableIndex],
+ vtableHasLocalLinkage);
}
vtableElem.finishAndAddTo(builder);
}
}
-llvm::GlobalVariable *
-CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
- const BaseSubobject &Base,
- bool BaseIsVirtual,
- llvm::GlobalVariable::LinkageTypes Linkage,
- VTableAddressPointsMapTy& AddressPoints) {
+llvm::GlobalVariable *CodeGenVTables::GenerateConstructionVTable(
+ const CXXRecordDecl *RD, const BaseSubobject &Base, bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ VTableAddressPointsMapTy &AddressPoints) {
if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
DI->completeClassData(Base.getBase());
@@ -779,7 +955,15 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
.mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(),
Base.getBase(), Out);
- StringRef Name = OutName.str();
+ SmallString<256> Name(OutName);
+
+ bool UsingRelativeLayout = getItaniumVTableContext().isRelativeLayout();
+ bool VTableAliasExists =
+ UsingRelativeLayout && CGM.getModule().getNamedAlias(Name);
+ if (VTableAliasExists) {
+ // We previously made the vtable hidden and changed its name.
+ Name.append(".local");
+ }
llvm::Type *VTType = getVTableType(*VTLayout);
@@ -806,7 +990,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Create and set the initializer.
ConstantInitBuilder builder(CGM);
auto components = builder.beginStruct();
- createVTableInitializer(components, *VTLayout, RTTI);
+ createVTableInitializer(components, *VTLayout, RTTI,
+ VTable->hasLocalLinkage());
components.finishAndSetAsInitializer(VTable);
// Set properties only after the initializer has been set to ensure that the
@@ -816,9 +1001,68 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
CGM.EmitVTableTypeMetadata(RD, VTable, *VTLayout.get());
+ if (UsingRelativeLayout && !VTable->isDSOLocal())
+ GenerateRelativeVTableAlias(VTable, OutName);
+
return VTable;
}
+// If the VTable is not dso_local, then we will not be able to indicate that
+// the VTable does not need a relocation and move into rodata. A frequent
+// time this can occur is for classes that should be made public from a DSO
+// (like in libc++). For cases like these, we can make the vtable hidden or
+// private and create a public alias with the same visibility and linkage as
+// the original vtable type.
+void CodeGenVTables::GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable,
+ llvm::StringRef AliasNameRef) {
+ assert(getItaniumVTableContext().isRelativeLayout() &&
+ "Can only use this if the relative vtable ABI is used");
+ assert(!VTable->isDSOLocal() && "This should be called only if the vtable is "
+ "not guaranteed to be dso_local");
+
+ // If the vtable is available_externally, we shouldn't (or need to) generate
+ // an alias for it in the first place since the vtable won't actually by
+ // emitted in this compilation unit.
+ if (VTable->hasAvailableExternallyLinkage())
+ return;
+
+ // Create a new string in the event the alias is already the name of the
+ // vtable. Using the reference directly could lead to use of an inititialized
+ // value in the module's StringMap.
+ llvm::SmallString<256> AliasName(AliasNameRef);
+ VTable->setName(AliasName + ".local");
+
+ auto Linkage = VTable->getLinkage();
+ assert(llvm::GlobalAlias::isValidLinkage(Linkage) &&
+ "Invalid vtable alias linkage");
+
+ llvm::GlobalAlias *VTableAlias = CGM.getModule().getNamedAlias(AliasName);
+ if (!VTableAlias) {
+ VTableAlias = llvm::GlobalAlias::create(VTable->getValueType(),
+ VTable->getAddressSpace(), Linkage,
+ AliasName, &CGM.getModule());
+ } else {
+ assert(VTableAlias->getValueType() == VTable->getValueType());
+ assert(VTableAlias->getLinkage() == Linkage);
+ }
+ VTableAlias->setVisibility(VTable->getVisibility());
+ VTableAlias->setUnnamedAddr(VTable->getUnnamedAddr());
+
+ // Both of these imply dso_local for the vtable.
+ if (!VTable->hasComdat()) {
+ // If this is in a comdat, then we shouldn't make the linkage private due to
+ // an issue in lld where private symbols can be used as the key symbol when
+ // choosing the prevelant group. This leads to "relocation refers to a
+ // symbol in a discarded section".
+ VTable->setLinkage(llvm::GlobalValue::PrivateLinkage);
+ } else {
+ // We should at least make this hidden since we don't want to expose it.
+ VTable->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ }
+
+ VTableAlias->setAliasee(VTable);
+}
+
static bool shouldEmitAvailableExternallyVTable(const CodeGenModule &CGM,
const CXXRecordDecl *RD) {
return CGM.getCodeGenOpts().OptimizationLevel > 0 &&
@@ -1011,6 +1255,26 @@ void CodeGenModule::EmitDeferredVTables() {
DeferredVTables.clear();
}
+bool CodeGenModule::HasLTOVisibilityPublicStd(const CXXRecordDecl *RD) {
+ if (!getCodeGenOpts().LTOVisibilityPublicStd)
+ return false;
+
+ const DeclContext *DC = RD;
+ while (1) {
+ auto *D = cast<Decl>(DC);
+ DC = DC->getParent();
+ if (isa<TranslationUnitDecl>(DC->getRedeclContext())) {
+ if (auto *ND = dyn_cast<NamespaceDecl>(D))
+ if (const IdentifierInfo *II = ND->getIdentifier())
+ if (II->isStr("std") || II->isStr("stdext"))
+ return true;
+ break;
+ }
+ }
+
+ return false;
+}
+
bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
LinkageInfo LV = RD->getLinkageAndVisibility();
if (!isExternallyVisible(LV.getLinkage()))
@@ -1027,22 +1291,7 @@ bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
return false;
}
- if (getCodeGenOpts().LTOVisibilityPublicStd) {
- const DeclContext *DC = RD;
- while (1) {
- auto *D = cast<Decl>(DC);
- DC = DC->getParent();
- if (isa<TranslationUnitDecl>(DC->getRedeclContext())) {
- if (auto *ND = dyn_cast<NamespaceDecl>(D))
- if (const IdentifierInfo *II = ND->getIdentifier())
- if (II->isStr("std") || II->isStr("stdext"))
- return false;
- break;
- }
- }
- }
-
- return true;
+ return !HasLTOVisibilityPublicStd(RD);
}
llvm::GlobalObject::VCallVisibility
@@ -1131,9 +1380,10 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
}
}
- if (getCodeGenOpts().VirtualFunctionElimination) {
+ if (getCodeGenOpts().VirtualFunctionElimination ||
+ getCodeGenOpts().WholeProgramVTables) {
llvm::GlobalObject::VCallVisibility TypeVis = GetVCallVisibilityLevel(RD);
if (TypeVis != llvm::GlobalObject::VCallVisibilityPublic)
- VTable->addVCallVisibilityMetadata(TypeVis);
+ VTable->setVCallVisibilityMetadata(TypeVis);
}
}
diff --git a/clang/lib/CodeGen/CGVTables.h b/clang/lib/CodeGen/CGVTables.h
index a47841bfc6c3..bdfc075ee305 100644
--- a/clang/lib/CodeGen/CGVTables.h
+++ b/clang/lib/CodeGen/CGVTables.h
@@ -62,16 +62,39 @@ class CodeGenVTables {
bool ForVTable);
void addVTableComponent(ConstantArrayBuilder &builder,
- const VTableLayout &layout, unsigned idx,
- llvm::Constant *rtti,
- unsigned &nextVTableThunkIndex);
+ const VTableLayout &layout, unsigned componentIndex,
+ llvm::Constant *rtti, unsigned &nextVTableThunkIndex,
+ unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage);
+
+ /// Add a 32-bit offset to a component relative to the vtable when using the
+ /// relative vtables ABI. The array builder points to the start of the vtable.
+ void addRelativeComponent(ConstantArrayBuilder &builder,
+ llvm::Constant *component,
+ unsigned vtableAddressPoint,
+ bool vtableHasLocalLinkage,
+ bool isCompleteDtor) const;
+
+ /// Create a dso_local stub that will be used for a relative reference in the
+ /// relative vtable layout. This stub will just be a tail call to the original
+ /// function and propagate any function attributes from the original. If the
+ /// original function is already dso_local, the original is returned instead
+ /// and a stub is not created.
+ llvm::Function *
+ getOrCreateRelativeStub(llvm::Function *func,
+ llvm::GlobalValue::LinkageTypes stubLinkage,
+ bool isCompleteDtor) const;
+
+ bool useRelativeLayout() const;
+
+ llvm::Type *getVTableComponentType() const;
public:
/// Add vtable components for the given vtable layout to the given
/// global initializer.
void createVTableInitializer(ConstantStructBuilder &builder,
- const VTableLayout &layout,
- llvm::Constant *rtti);
+ const VTableLayout &layout, llvm::Constant *rtti,
+ bool vtableHasLocalLinkage);
CodeGenVTables(CodeGenModule &CGM);
@@ -124,6 +147,13 @@ public:
/// arrays of pointers, with one struct element for each vtable in the vtable
/// group.
llvm::Type *getVTableType(const VTableLayout &layout);
+
+ /// Generate a public facing alias for the vtable and make the vtable either
+ /// hidden or private. The alias will have the original linkage and visibility
+ /// of the vtable. This is used for cases under the relative vtables ABI
+ /// when a vtable may not be dso_local.
+ void GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable,
+ llvm::StringRef AliasNameRef);
};
} // end namespace CodeGen
diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h
index 9fd07bdb187d..70e6fed3f4f6 100644
--- a/clang/lib/CodeGen/CGValue.h
+++ b/clang/lib/CodeGen/CGValue.h
@@ -170,7 +170,8 @@ class LValue {
VectorElt, // This is a vector element l-value (V[i]), use getVector*
BitField, // This is a bitfield l-value, use getBitfield*.
ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
- GlobalReg // This is a register l-value, use getGlobalReg()
+ GlobalReg, // This is a register l-value, use getGlobalReg()
+ MatrixElt // This is a matrix element, use getVector*
} LVType;
llvm::Value *V;
@@ -254,6 +255,7 @@ public:
bool isBitField() const { return LVType == BitField; }
bool isExtVectorElt() const { return LVType == ExtVectorElt; }
bool isGlobalReg() const { return LVType == GlobalReg; }
+ bool isMatrixElt() const { return LVType == MatrixElt; }
bool isVolatileQualified() const { return Quals.hasVolatile(); }
bool isRestrictQualified() const { return Quals.hasRestrict(); }
@@ -337,8 +339,26 @@ public:
Address getVectorAddress() const {
return Address(getVectorPointer(), getAlignment());
}
- llvm::Value *getVectorPointer() const { assert(isVectorElt()); return V; }
- llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+ llvm::Value *getVectorPointer() const {
+ assert(isVectorElt());
+ return V;
+ }
+ llvm::Value *getVectorIdx() const {
+ assert(isVectorElt());
+ return VectorIdx;
+ }
+
+ Address getMatrixAddress() const {
+ return Address(getMatrixPointer(), getAlignment());
+ }
+ llvm::Value *getMatrixPointer() const {
+ assert(isMatrixElt());
+ return V;
+ }
+ llvm::Value *getMatrixIdx() const {
+ assert(isMatrixElt());
+ return VectorIdx;
+ }
// extended vector elements.
Address getExtVectorAddress() const {
@@ -430,6 +450,18 @@ public:
return R;
}
+ static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx,
+ QualType type, LValueBaseInfo BaseInfo,
+ TBAAAccessInfo TBAAInfo) {
+ LValue R;
+ R.LVType = MatrixElt;
+ R.V = matAddress.getPointer();
+ R.VectorIdx = Idx;
+ R.Initialize(type, type.getQualifiers(), matAddress.getAlignment(),
+ BaseInfo, TBAAInfo);
+ return R;
+ }
+
RValue asAggregateRValue(CodeGenFunction &CGF) const {
return RValue::getAggregate(getAddress(CGF), isVolatileQualified());
}
diff --git a/clang/lib/CodeGen/CodeGenABITypes.cpp b/clang/lib/CodeGen/CodeGenABITypes.cpp
index 6b6a116cf259..d3a16a1d5acc 100644
--- a/clang/lib/CodeGen/CodeGenABITypes.cpp
+++ b/clang/lib/CodeGen/CodeGenABITypes.cpp
@@ -16,7 +16,9 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/CodeGenABITypes.h"
+#include "CGCXXABI.h"
#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Lex/HeaderSearchOptions.h"
@@ -25,6 +27,11 @@
using namespace clang;
using namespace CodeGen;
+void CodeGen::addDefaultFunctionDefinitionAttributes(CodeGenModule &CGM,
+ llvm::AttrBuilder &attrs) {
+ CGM.addDefaultFunctionDefinitionAttributes(attrs);
+}
+
const CGFunctionInfo &
CodeGen::arrangeObjCMessageSendSignature(CodeGenModule &CGM,
const ObjCMethodDecl *MD,
@@ -63,6 +70,30 @@ CodeGen::arrangeFreeFunctionCall(CodeGenModule &CGM,
info, {}, args);
}
+ImplicitCXXConstructorArgs
+CodeGen::getImplicitCXXConstructorArgs(CodeGenModule &CGM,
+ const CXXConstructorDecl *D) {
+ // We have to create a dummy CodeGenFunction here to pass to
+ // getImplicitConstructorArgs(). In some cases (base and delegating
+ // constructor calls), getImplicitConstructorArgs() can reach into the
+ // CodeGenFunction to find parameters of the calling constructor to pass on to
+ // the called constructor, but that can't happen here because we're asking for
+ // the args for a complete, non-delegating constructor call.
+ CodeGenFunction CGF(CGM, /* suppressNewContext= */ true);
+ CGCXXABI::AddedStructorArgs addedArgs =
+ CGM.getCXXABI().getImplicitConstructorArgs(CGF, D, Ctor_Complete,
+ /* ForVirtualBase= */ false,
+ /* Delegating= */ false);
+ ImplicitCXXConstructorArgs implicitArgs;
+ for (const auto &arg : addedArgs.Prefix) {
+ implicitArgs.Prefix.push_back(arg.Value);
+ }
+ for (const auto &arg : addedArgs.Suffix) {
+ implicitArgs.Suffix.push_back(arg.Value);
+ }
+ return implicitArgs;
+}
+
llvm::FunctionType *
CodeGen::convertFreeFunctionType(CodeGenModule &CGM, const FunctionDecl *FD) {
assert(FD != nullptr && "Expected a non-null function declaration!");
@@ -84,3 +115,16 @@ unsigned CodeGen::getLLVMFieldNumber(CodeGenModule &CGM,
const FieldDecl *FD) {
return CGM.getTypes().getCGRecordLayout(RD).getLLVMFieldNo(FD);
}
+
+llvm::Value *CodeGen::getCXXDestructorImplicitParam(
+ CodeGenModule &CGM, llvm::BasicBlock *InsertBlock,
+ llvm::BasicBlock::iterator InsertPoint, const CXXDestructorDecl *D,
+ CXXDtorType Type, bool ForVirtualBase, bool Delegating) {
+ CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
+ CGF.CurCodeDecl = D;
+ CGF.CurFuncDecl = D;
+ CGF.CurFn = InsertBlock->getParent();
+ CGF.Builder.SetInsertPoint(InsertBlock, InsertPoint);
+ return CGM.getCXXABI().getCXXDestructorImplicitParam(
+ CGF, D, Type, ForVirtualBase, Delegating);
+}
diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp
index 7065e78f19a2..55925110708e 100644
--- a/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/clang/lib/CodeGen/CodeGenAction.cpp
@@ -32,8 +32,8 @@
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LLVMRemarkStreamer.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/RemarkStreamer.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/Linker/Linker.h"
#include "llvm/Pass.h"
@@ -86,15 +86,15 @@ namespace clang {
const CodeGenOptions CodeGenOpts) {
handleAllErrors(
std::move(E),
- [&](const RemarkSetupFileError &E) {
+ [&](const LLVMRemarkSetupFileError &E) {
Diags.Report(diag::err_cannot_open_file)
<< CodeGenOpts.OptRecordFile << E.message();
},
- [&](const RemarkSetupPatternError &E) {
+ [&](const LLVMRemarkSetupPatternError &E) {
Diags.Report(diag::err_drv_optimization_remark_pattern)
<< E.message() << CodeGenOpts.OptRecordPasses;
},
- [&](const RemarkSetupFormatError &E) {
+ [&](const LLVMRemarkSetupFormatError &E) {
Diags.Report(diag::err_drv_optimization_remark_format)
<< CodeGenOpts.OptRecordFormat;
});
@@ -246,7 +246,7 @@ namespace clang {
for (auto &LM : LinkModules) {
if (LM.PropagateAttrs)
for (Function &F : *LM.Module)
- Gen->CGM().AddDefaultFnAttrs(F);
+ Gen->CGM().addDefaultFunctionDefinitionAttributes(F);
CurLinkModule = LM.Module.get();
@@ -309,7 +309,7 @@ namespace clang {
CodeGenOpts, this));
Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
- setupOptimizationRemarks(
+ setupLLVMOptimizationRemarks(
Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
CodeGenOpts.DiagnosticsHotnessThreshold);
@@ -633,8 +633,9 @@ const FullSourceLoc BackendConsumer::getBestLocationFromDebugLoc(
void BackendConsumer::UnsupportedDiagHandler(
const llvm::DiagnosticInfoUnsupported &D) {
- // We only support errors.
- assert(D.getSeverity() == llvm::DS_Error);
+ // We only support warnings or errors.
+ assert(D.getSeverity() == llvm::DS_Error ||
+ D.getSeverity() == llvm::DS_Warning);
StringRef Filename;
unsigned Line, Column;
@@ -652,7 +653,11 @@ void BackendConsumer::UnsupportedDiagHandler(
DiagnosticPrinterRawOStream DP(MsgStream);
D.print(DP);
}
- Diags.Report(Loc, diag::err_fe_backend_unsupported) << MsgStream.str();
+
+ auto DiagType = D.getSeverity() == llvm::DS_Error
+ ? diag::err_fe_backend_unsupported
+ : diag::warn_fe_backend_unsupported;
+ Diags.Report(Loc, DiagType) << MsgStream.str();
if (BadDebugInfo)
// If we were not able to translate the file:line:col information
@@ -994,7 +999,7 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, InFile,
+ CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, std::string(InFile),
std::move(LinkModules), std::move(OS), *VMContext, CoverageInfo));
BEConsumer = Result.get();
@@ -1146,11 +1151,14 @@ void CodeGenAction::ExecuteAction() {
CI.getTargetOpts(), CI.getLangOpts(),
CI.getFrontendOpts().ShowTimers,
std::move(LinkModules), *VMContext, nullptr);
+ // PR44896: Force DiscardValueNames as false. DiscardValueNames cannot be
+ // true here because the valued names are needed for reading textual IR.
+ Ctx.setDiscardValueNames(false);
Ctx.setDiagnosticHandler(
std::make_unique<ClangDiagnosticHandler>(CodeGenOpts, &Result));
Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
- setupOptimizationRemarks(
+ setupLLVMOptimizationRemarks(
Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
CodeGenOpts.DiagnosticsHotnessThreshold);
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index 2bf94f697e01..4a7c84562dee 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -32,6 +32,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/FPEnv.h"
@@ -64,67 +65,36 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
CGBuilderInserterTy(this)),
- SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
- PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
- CGM.getCodeGenOpts(), CGM.getLangOpts())) {
+ SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
+ DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
+ ShouldEmitLifetimeMarkers(
+ shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
if (!suppressNewContext)
CGM.getCXXABI().getMangleContext().startNewFunction();
- llvm::FastMathFlags FMF;
- if (CGM.getLangOpts().FastMath)
- FMF.setFast();
- if (CGM.getLangOpts().FiniteMathOnly) {
- FMF.setNoNaNs();
- FMF.setNoInfs();
- }
- if (CGM.getCodeGenOpts().NoNaNsFPMath) {
- FMF.setNoNaNs();
- }
- if (CGM.getCodeGenOpts().NoSignedZeros) {
- FMF.setNoSignedZeros();
- }
- if (CGM.getCodeGenOpts().ReciprocalMath) {
- FMF.setAllowReciprocal();
- }
- if (CGM.getCodeGenOpts().Reassociate) {
- FMF.setAllowReassoc();
- }
- Builder.setFastMathFlags(FMF);
+ SetFastMathFlags(CurFPFeatures);
SetFPModel();
}
CodeGenFunction::~CodeGenFunction() {
assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
- // If there are any unclaimed block infos, go ahead and destroy them
- // now. This can happen if IR-gen gets clever and skips evaluating
- // something.
- if (FirstBlockInfo)
- destroyBlockInfos(FirstBlockInfo);
-
if (getLangOpts().OpenMP && CurFn)
CGM.getOpenMPRuntime().functionFinished(*this);
-}
-
-// Map the LangOption for rounding mode into
-// the corresponding enum in the IR.
-static llvm::fp::RoundingMode ToConstrainedRoundingMD(
- LangOptions::FPRoundingModeKind Kind) {
- switch (Kind) {
- case LangOptions::FPR_ToNearest: return llvm::fp::rmToNearest;
- case LangOptions::FPR_Downward: return llvm::fp::rmDownward;
- case LangOptions::FPR_Upward: return llvm::fp::rmUpward;
- case LangOptions::FPR_TowardZero: return llvm::fp::rmTowardZero;
- case LangOptions::FPR_Dynamic: return llvm::fp::rmDynamic;
- }
- llvm_unreachable("Unsupported FP RoundingMode");
+ // If we have an OpenMPIRBuilder we want to finalize functions (incl.
+ // outlining etc) at some point. Doing it once the function codegen is done
+ // seems to be a reasonable spot. We do it here, as opposed to the deletion
+ // time of the CodeGenModule, because we have to ensure the IR has not yet
+ // been "emitted" to the outside, thus, modifications are still sensible.
+ if (CGM.getLangOpts().OpenMPIRBuilder)
+ CGM.getOpenMPRuntime().getOMPBuilder().finalize();
}
// Map the LangOption for exception behavior into
// the corresponding enum in the IR.
-static llvm::fp::ExceptionBehavior ToConstrainedExceptMD(
- LangOptions::FPExceptionModeKind Kind) {
+llvm::fp::ExceptionBehavior
+clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
switch (Kind) {
case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
@@ -135,81 +105,79 @@ static llvm::fp::ExceptionBehavior ToConstrainedExceptMD(
}
void CodeGenFunction::SetFPModel() {
- auto fpRoundingMode = ToConstrainedRoundingMD(
- getLangOpts().getFPRoundingMode());
+ llvm::RoundingMode RM = getLangOpts().getFPRoundingMode();
auto fpExceptionBehavior = ToConstrainedExceptMD(
getLangOpts().getFPExceptionMode());
- if (fpExceptionBehavior == llvm::fp::ebIgnore &&
- fpRoundingMode == llvm::fp::rmToNearest)
- // Constrained intrinsics are not used.
- ;
- else {
- Builder.setIsFPConstrained(true);
- Builder.setDefaultConstrainedRounding(fpRoundingMode);
- Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
- }
-}
-
-CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
- LValueBaseInfo *BaseInfo,
- TBAAAccessInfo *TBAAInfo) {
- return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
- /* forPointeeType= */ true);
-}
-
-CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
- LValueBaseInfo *BaseInfo,
- TBAAAccessInfo *TBAAInfo,
- bool forPointeeType) {
- if (TBAAInfo)
- *TBAAInfo = CGM.getTBAAAccessInfo(T);
-
- // Honor alignment typedef attributes even on incomplete types.
- // We also honor them straight for C++ class types, even as pointees;
- // there's an expressivity gap here.
- if (auto TT = T->getAs<TypedefType>()) {
- if (auto Align = TT->getDecl()->getMaxAlignment()) {
- if (BaseInfo)
- *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
- return getContext().toCharUnitsFromBits(Align);
- }
- }
+ Builder.setDefaultConstrainedRounding(RM);
+ Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
+ Builder.setIsFPConstrained(fpExceptionBehavior != llvm::fp::ebIgnore ||
+ RM != llvm::RoundingMode::NearestTiesToEven);
+}
- if (BaseInfo)
- *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
+void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
+ llvm::FastMathFlags FMF;
+ FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
+ FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
+ FMF.setNoInfs(FPFeatures.getNoHonorInfs());
+ FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
+ FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
+ FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
+ FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
+ Builder.setFastMathFlags(FMF);
+}
- CharUnits Alignment;
- if (T->isIncompleteType()) {
- Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
- } else {
- // For C++ class pointees, we don't know whether we're pointing at a
- // base or a complete object, so we generally need to use the
- // non-virtual alignment.
- const CXXRecordDecl *RD;
- if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
- Alignment = CGM.getClassPointerAlignment(RD);
- } else {
- Alignment = getContext().getTypeAlignInChars(T);
- if (T.getQualifiers().hasUnaligned())
- Alignment = CharUnits::One();
- }
+CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
+ FPOptions FPFeatures)
+ : CGF(CGF), OldFPFeatures(CGF.CurFPFeatures) {
+ CGF.CurFPFeatures = FPFeatures;
- // Cap to the global maximum type alignment unless the alignment
- // was somehow explicit on the type.
- if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
- if (Alignment.getQuantity() > MaxAlign &&
- !getContext().isAlignmentRequired(T))
- Alignment = CharUnits::fromQuantity(MaxAlign);
- }
- }
- return Alignment;
+ if (OldFPFeatures == FPFeatures)
+ return;
+
+ FMFGuard.emplace(CGF.Builder);
+
+ llvm::RoundingMode NewRoundingBehavior =
+ static_cast<llvm::RoundingMode>(FPFeatures.getRoundingMode());
+ CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
+ auto NewExceptionBehavior =
+ ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
+ FPFeatures.getFPExceptionMode()));
+ CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
+
+ CGF.SetFastMathFlags(FPFeatures);
+
+ assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
+ isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
+ isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
+ (NewExceptionBehavior == llvm::fp::ebIgnore &&
+ NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
+ "FPConstrained should be enabled on entire function");
+
+ auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
+ auto OldValue =
+ CGF.CurFn->getFnAttribute(Name).getValueAsString() == "true";
+ auto NewValue = OldValue & Value;
+ if (OldValue != NewValue)
+ CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
+ };
+ mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
+ mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
+ mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
+ mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() &&
+ FPFeatures.getAllowReciprocal() &&
+ FPFeatures.getAllowApproxFunc() &&
+ FPFeatures.getNoSignedZero());
+}
+
+CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
+ CGF.CurFPFeatures = OldFPFeatures;
}
LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
- CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
+ CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
TBAAInfo);
}
@@ -220,8 +188,8 @@ LValue
CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
- CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
- /* forPointeeType= */ true);
+ CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
+ /* forPointeeType= */ true);
return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
}
@@ -259,11 +227,13 @@ TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
case Type::MemberPointer:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Enum:
case Type::ObjCObjectPointer:
case Type::Pipe:
+ case Type::ExtInt:
return TEK_Scalar;
// Complexes.
@@ -486,13 +456,15 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
// Scan function arguments for vector width.
for (llvm::Argument &A : CurFn->args())
if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
// Update vector width based on return type.
if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
- LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getFixedSize());
+ LargestVectorWidth =
+ std::max((uint64_t)LargestVectorWidth,
+ VT->getPrimitiveSizeInBits().getKnownMinSize());
// Add the required-vector-width attribute. This contains the max width from:
// 1. min-vector-width attribute used in the source program.
@@ -799,35 +771,54 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
SanOpts.Mask &= ~SanitizerKind::Null;
- if (D) {
- // Apply xray attributes to the function (as a string, for now)
- if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
- if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
- XRayInstrKind::Function)) {
- if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
- Fn->addFnAttr("function-instrument", "xray-always");
- if (XRayAttr->neverXRayInstrument())
- Fn->addFnAttr("function-instrument", "xray-never");
- if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
- if (ShouldXRayInstrumentFunction())
- Fn->addFnAttr("xray-log-args",
- llvm::utostr(LogArgs->getArgumentCount()));
- }
- } else {
- if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
- Fn->addFnAttr(
- "xray-instruction-threshold",
- llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
+ // Apply xray attributes to the function (as a string, for now)
+ if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
+ if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::FunctionEntry) ||
+ CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::FunctionExit)) {
+ if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
+ Fn->addFnAttr("function-instrument", "xray-always");
+ if (XRayAttr->neverXRayInstrument())
+ Fn->addFnAttr("function-instrument", "xray-never");
+ if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
+ if (ShouldXRayInstrumentFunction())
+ Fn->addFnAttr("xray-log-args",
+ llvm::utostr(LogArgs->getArgumentCount()));
}
+ } else {
+ if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
+ Fn->addFnAttr(
+ "xray-instruction-threshold",
+ llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
+ }
- if (const auto *Attr = D->getAttr<PatchableFunctionEntryAttr>()) {
- // Attr->getStart is currently ignored.
- Fn->addFnAttr("patchable-function-entry",
- std::to_string(Attr->getCount()));
- } else if (unsigned Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount) {
- Fn->addFnAttr("patchable-function-entry",
- std::to_string(Count));
- }
+ if (ShouldXRayInstrumentFunction()) {
+ if (CGM.getCodeGenOpts().XRayIgnoreLoops)
+ Fn->addFnAttr("xray-ignore-loops");
+
+ if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::FunctionExit))
+ Fn->addFnAttr("xray-skip-exit");
+
+ if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::FunctionEntry))
+ Fn->addFnAttr("xray-skip-entry");
+ }
+
+ unsigned Count, Offset;
+ if (const auto *Attr =
+ D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
+ Count = Attr->getCount();
+ Offset = Attr->getOffset();
+ } else {
+ Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
+ Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
+ }
+ if (Count && Offset <= Count) {
+ Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
+ if (Offset)
+ Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
}
// Add no-jump-tables value.
@@ -842,6 +833,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (CGM.getCodeGenOpts().ProfileSampleAccurate)
Fn->addFnAttr("profile-sample-accurate");
+ if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
+ Fn->addFnAttr("use-sample-profile");
+
if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
Fn->addFnAttr("cfi-canonical-jump-table");
@@ -889,14 +883,26 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// If we're in C++ mode and the function name is "main", it is guaranteed
// to be norecurse by the standard (3.6.1.3 "The function main shall not be
// used within a program").
- if (getLangOpts().CPlusPlus)
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
- if (FD->isMain())
- Fn->addFnAttr(llvm::Attribute::NoRecurse);
-
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ //
+ // OpenCL C 2.0 v2.2-11 s6.9.i:
+ // Recursion is not supported.
+ //
+ // SYCL v1.2.1 s3.10:
+ // kernels cannot include RTTI information, exception classes,
+ // recursive code, virtual functions or make use of C++ libraries that
+ // are not compiled for the device.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
+ getLangOpts().SYCLIsDevice ||
+ (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>()))
+ Fn->addFnAttr(llvm::Attribute::NoRecurse);
+ }
+
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ Builder.setIsFPConstrained(FD->usesFPIntrin());
if (FD->usesFPIntrin())
Fn->addFnAttr(llvm::Attribute::StrictFP);
+ }
// If a custom alignment is used, force realigning to this alignment on
// any main function which certainly will need it.
@@ -1021,7 +1027,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
ReturnValuePointer = Address(Addr, getPointerAlign());
Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
- ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
+ ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy));
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
@@ -1978,6 +1984,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::Complex:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Record:
case Type::Enum:
case Type::Elaborated:
@@ -1986,6 +1993,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
+ case Type::ExtInt:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
@@ -2141,21 +2149,47 @@ void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
protection.Inst->eraseFromParent();
}
-void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
+void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
QualType Ty, SourceLocation Loc,
SourceLocation AssumptionLoc,
llvm::Value *Alignment,
llvm::Value *OffsetValue) {
- llvm::Value *TheCheck;
- llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
- CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
+ if (Alignment->getType() != IntPtrTy)
+ Alignment =
+ Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
+ if (OffsetValue && OffsetValue->getType() != IntPtrTy)
+ OffsetValue =
+ Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
+ llvm::Value *TheCheck = nullptr;
if (SanOpts.has(SanitizerKind::Alignment)) {
- EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
- OffsetValue, TheCheck, Assumption);
+ llvm::Value *PtrIntValue =
+ Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
+
+ if (OffsetValue) {
+ bool IsOffsetZero = false;
+ if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
+ IsOffsetZero = CI->isZero();
+
+ if (!IsOffsetZero)
+ PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
+ }
+
+ llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
+ llvm::Value *Mask =
+ Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
+ llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
+ TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
}
+ llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
+ CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
+
+ if (!SanOpts.has(SanitizerKind::Alignment))
+ return;
+ emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
+ OffsetValue, TheCheck, Assumption);
}
-void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
+void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
const Expr *E,
SourceLocation AssumptionLoc,
llvm::Value *Alignment,
@@ -2165,7 +2199,7 @@ void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
QualType Ty = E->getType();
SourceLocation Loc = E->getExprLoc();
- EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
+ emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
OffsetValue);
}
@@ -2319,8 +2353,7 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
SmallVector<StringRef, 1> ReqFeatures;
llvm::StringMap<bool> CalleeFeatureMap;
- CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap,
- GlobalDecl(TargetDecl));
+ CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
for (const auto &F : ParsedAttr.Features) {
if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
@@ -2433,13 +2466,13 @@ void CodeGenFunction::EmitMultiVersionResolver(
// Loc), the diagnostic will additionally point a "Note:" to this location.
// It should be the location where the __attribute__((assume_aligned))
// was written e.g.
-void CodeGenFunction::EmitAlignmentAssumptionCheck(
+void CodeGenFunction::emitAlignmentAssumptionCheck(
llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
SourceLocation SecondaryLoc, llvm::Value *Alignment,
llvm::Value *OffsetValue, llvm::Value *TheCheck,
llvm::Instruction *Assumption) {
assert(Assumption && isa<llvm::CallInst>(Assumption) &&
- cast<llvm::CallInst>(Assumption)->getCalledValue() ==
+ cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
llvm::Intrinsic::getDeclaration(
Builder.GetInsertBlock()->getParent()->getParent(),
llvm::Intrinsic::assume) &&
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 3d8bc93eb965..d794f4f0fa81 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -26,6 +26,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/CapturedStmt.h"
@@ -36,6 +37,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
@@ -75,7 +77,11 @@ class ObjCAtTryStmt;
class ObjCAtThrowStmt;
class ObjCAtSynchronizedStmt;
class ObjCAutoreleasePoolStmt;
+class OMPUseDevicePtrClause;
+class OMPUseDeviceAddrClause;
class ReturnsNonNullAttr;
+class SVETypeFlags;
+class OMPExecutableDirective;
namespace analyze_os_log {
class OSLogBufferLayout;
@@ -118,6 +124,7 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 1) \
SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
+ SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
SANITIZER_CHECK(MissingReturn, missing_return, 0) \
SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
@@ -258,6 +265,9 @@ public:
CodeGenModule &CGM; // Per-module state.
const TargetInfo &Target;
+ // For EH/SEH outlined funclets, this field points to parent's CGF
+ CodeGenFunction *ParentCGF = nullptr;
+
typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
LoopInfoStack LoopStack;
CGBuilderTy Builder;
@@ -332,6 +342,10 @@ public:
/// This is invalid if sret is not in use.
Address ReturnValuePointer = Address::invalid();
+ /// If a return statement is being visited, this holds the return statment's
+ /// result expression.
+ const Expr *RetExpr = nullptr;
+
/// Return true if a label was seen in the current scope.
bool hasLabelBeenSeenInCurrentScope() const {
if (CurLexicalScope)
@@ -485,6 +499,9 @@ public:
/// region.
bool IsInPreservedAIRegion = false;
+ /// True if the current statement has nomerge attribute.
+ bool InNoMergeAttributedStmt = false;
+
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
llvm::Value *BlockPointer = nullptr;
@@ -533,9 +550,6 @@ public:
unsigned NextCleanupDestIndex = 1;
- /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
- CGBlockInfo *FirstBlockInfo = nullptr;
-
/// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
llvm::BasicBlock *EHResumeBlock = nullptr;
@@ -560,11 +574,49 @@ public:
llvm::BasicBlock *getInvokeDestImpl();
+ /// Parent loop-based directive for scan directive.
+ const OMPExecutableDirective *OMPParentLoopDirectiveForScan = nullptr;
+ llvm::BasicBlock *OMPBeforeScanBlock = nullptr;
+ llvm::BasicBlock *OMPAfterScanBlock = nullptr;
+ llvm::BasicBlock *OMPScanExitBlock = nullptr;
+ llvm::BasicBlock *OMPScanDispatch = nullptr;
+ bool OMPFirstScanLoop = false;
+
+ /// Manages parent directive for scan directives.
+ class ParentLoopDirectiveForScanRegion {
+ CodeGenFunction &CGF;
+ const OMPExecutableDirective *ParentLoopDirectiveForScan;
+
+ public:
+ ParentLoopDirectiveForScanRegion(
+ CodeGenFunction &CGF,
+ const OMPExecutableDirective &ParentLoopDirectiveForScan)
+ : CGF(CGF),
+ ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) {
+ CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan;
+ }
+ ~ParentLoopDirectiveForScanRegion() {
+ CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan;
+ }
+ };
+
template <class T>
typename DominatingValue<T>::saved_type saveValueInCond(T value) {
return DominatingValue<T>::save(*this, value);
}
+ class CGFPOptionsRAII {
+ public:
+ CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
+ ~CGFPOptionsRAII();
+
+ private:
+ CodeGenFunction &CGF;
+ FPOptions OldFPFeatures;
+ Optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
+ };
+ FPOptions CurFPFeatures;
+
public:
/// ObjCEHValueStack - Stack of Objective-C exception values, used for
/// rethrows.
@@ -1541,6 +1593,169 @@ public:
CallArgList OldCXXInheritedCtorInitExprArgs;
};
+ // Helper class for the OpenMP IR Builder. Allows reusability of code used for
+ // region body, and finalization codegen callbacks. This will class will also
+ // contain privatization functions used by the privatization call backs
+ //
+ // TODO: this is temporary class for things that are being moved out of
+ // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
+ // utility function for use with the OMPBuilder. Once that move to use the
+ // OMPBuilder is done, everything here will either become part of CodeGenFunc.
+ // directly, or a new helper class that will contain functions used by both
+ // this and the OMPBuilder
+
+ struct OMPBuilderCBHelpers {
+
+ OMPBuilderCBHelpers() = delete;
+ OMPBuilderCBHelpers(const OMPBuilderCBHelpers &) = delete;
+ OMPBuilderCBHelpers &operator=(const OMPBuilderCBHelpers &) = delete;
+
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+
+ /// Cleanup action for allocate support.
+ class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
+
+ private:
+ llvm::CallInst *RTLFnCI;
+
+ public:
+ OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) {
+ RLFnCI->removeFromParent();
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
+ if (!CGF.HaveInsertPoint())
+ return;
+ CGF.Builder.Insert(RTLFnCI);
+ }
+ };
+
+ /// Returns address of the threadprivate variable for the current
+ /// thread. This Also create any necessary OMP runtime calls.
+ ///
+ /// \param VD VarDecl for Threadprivate variable.
+ /// \param VDAddr Address of the Vardecl
+ /// \param Loc The location where the barrier directive was encountered
+ static Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD, Address VDAddr,
+ SourceLocation Loc);
+
+ /// Gets the OpenMP-specific address of the local variable /p VD.
+ static Address getAddressOfLocalVariable(CodeGenFunction &CGF,
+ const VarDecl *VD);
+ /// Get the platform-specific name separator.
+ /// \param Parts different parts of the final name that needs separation
+ /// \param FirstSeparator First separator used between the initial two
+ /// parts of the name.
+ /// \param Separator separator used between all of the rest consecutinve
+ /// parts of the name
+ static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
+ StringRef FirstSeparator = ".",
+ StringRef Separator = ".");
+ /// Emit the Finalization for an OMP region
+ /// \param CGF The Codegen function this belongs to
+ /// \param IP Insertion point for generating the finalization code.
+ static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP) {
+ CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
+ assert(IP.getBlock()->end() != IP.getPoint() &&
+ "OpenMP IR Builder should cause terminated block!");
+
+ llvm::BasicBlock *IPBB = IP.getBlock();
+ llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor();
+ assert(DestBB && "Finalization block should have one successor!");
+
+ // erase and replace with cleanup branch.
+ IPBB->getTerminator()->eraseFromParent();
+ CGF.Builder.SetInsertPoint(IPBB);
+ CodeGenFunction::JumpDest Dest = CGF.getJumpDestInCurrentScope(DestBB);
+ CGF.EmitBranchThroughCleanup(Dest);
+ }
+
+ /// Emit the body of an OMP region
+ /// \param CGF The Codegen function this belongs to
+ /// \param RegionBodyStmt The body statement for the OpenMP region being
+ /// generated
+ /// \param CodeGenIP Insertion point for generating the body code.
+ /// \param FiniBB The finalization basic block
+ static void EmitOMPRegionBody(CodeGenFunction &CGF,
+ const Stmt *RegionBodyStmt,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
+ if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
+ CodeGenIPBBTI->eraseFromParent();
+
+ CGF.Builder.SetInsertPoint(CodeGenIPBB);
+
+ CGF.EmitStmt(RegionBodyStmt);
+
+ if (CGF.Builder.saveIP().isSet())
+ CGF.Builder.CreateBr(&FiniBB);
+ }
+
+ /// RAII for preserving necessary info during Outlined region body codegen.
+ class OutlinedRegionBodyRAII {
+
+ llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
+ CodeGenFunction::JumpDest OldReturnBlock;
+ CGBuilderTy::InsertPoint IP;
+ CodeGenFunction &CGF;
+
+ public:
+ OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP,
+ llvm::BasicBlock &RetBB)
+ : CGF(cgf) {
+ assert(AllocaIP.isSet() &&
+ "Must specify Insertion point for allocas of outlined function");
+ OldAllocaIP = CGF.AllocaInsertPt;
+ CGF.AllocaInsertPt = &*AllocaIP.getPoint();
+ IP = CGF.Builder.saveIP();
+
+ OldReturnBlock = CGF.ReturnBlock;
+ CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
+ }
+
+ ~OutlinedRegionBodyRAII() {
+ CGF.AllocaInsertPt = OldAllocaIP;
+ CGF.ReturnBlock = OldReturnBlock;
+ CGF.Builder.restoreIP(IP);
+ }
+ };
+
+ /// RAII for preserving necessary info during inlined region body codegen.
+ class InlinedRegionBodyRAII {
+
+ llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
+ CodeGenFunction &CGF;
+
+ public:
+ InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP,
+ llvm::BasicBlock &FiniBB)
+ : CGF(cgf) {
+ // Alloca insertion block should be in the entry block of the containing
+ // function so it expects an empty AllocaIP in which case will reuse the
+ // old alloca insertion point, or a new AllocaIP in the same block as
+ // the old one
+ assert((!AllocaIP.isSet() ||
+ CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) &&
+ "Insertion point should be in the entry block of containing "
+ "function!");
+ OldAllocaIP = CGF.AllocaInsertPt;
+ if (AllocaIP.isSet())
+ CGF.AllocaInsertPt = &*AllocaIP.getPoint();
+
+ // TODO: Remove the call, after making sure the counter is not used by
+ // the EHStack.
+ // Since this is an inlined region, it should not modify the
+ // ReturnBlock, and should reuse the one for the enclosing outlined
+ // region. So, the JumpDest being return by the function is discarded
+ (void)CGF.getJumpDestInCurrentScope(&FiniBB);
+ }
+
+ ~InlinedRegionBodyRAII() { CGF.AllocaInsertPt = OldAllocaIP; }
+ };
+ };
+
private:
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
@@ -1772,7 +1987,6 @@ public:
/// information about the block, including the block invoke function, the
/// captured variables, etc.
llvm::Value *EmitBlockLiteral(const BlockExpr *);
- static void destroyBlockInfos(CGBlockInfo *info);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &Info,
@@ -2155,13 +2369,6 @@ public:
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
- CharUnits getNaturalTypeAlignment(QualType T,
- LValueBaseInfo *BaseInfo = nullptr,
- TBAAAccessInfo *TBAAInfo = nullptr,
- bool forPointeeType = false);
- CharUnits getNaturalPointeeTypeAlignment(QualType T,
- LValueBaseInfo *BaseInfo = nullptr,
- TBAAAccessInfo *TBAAInfo = nullptr);
Address EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo = nullptr,
@@ -2264,8 +2471,9 @@ public:
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
- AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name),
+ AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
+ Address *Alloca = nullptr) {
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name, Alloca),
T.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -2594,7 +2802,8 @@ public:
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
/// Situations in which we might emit a check for the suitability of a
- /// pointer or glvalue.
+ /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
+ /// compiler-rt.
enum TypeCheckKind {
/// Checking the operand of a load. Must be suitably sized and aligned.
TCK_Load,
@@ -2826,7 +3035,7 @@ public:
PeepholeProtection protectFromPeepholes(RValue rvalue);
void unprotectFromPeepholes(PeepholeProtection protection);
- void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
+ void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
SourceLocation Loc,
SourceLocation AssumptionLoc,
llvm::Value *Alignment,
@@ -2834,13 +3043,14 @@ public:
llvm::Value *TheCheck,
llvm::Instruction *Assumption);
- void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
+ void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
SourceLocation Loc, SourceLocation AssumptionLoc,
llvm::Value *Alignment,
llvm::Value *OffsetValue = nullptr);
- void EmitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
- SourceLocation AssumptionLoc, llvm::Value *Alignment,
+ void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
+ SourceLocation AssumptionLoc,
+ llvm::Value *Alignment,
llvm::Value *OffsetValue = nullptr);
//===--------------------------------------------------------------------===//
@@ -2983,7 +3193,8 @@ public:
llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
Address GenerateCapturedStmtArgument(const CapturedStmt &S);
- llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S);
+ llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
+ SourceLocation Loc);
void GenerateOpenMPCapturedVars(const CapturedStmt &S,
SmallVectorImpl<llvm::Value *> &CapturedVars);
void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
@@ -3037,7 +3248,10 @@ public:
void EmitOMPPrivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
void EmitOMPUseDevicePtrClause(
- const OMPClause &C, OMPPrivateScope &PrivateScope,
+ const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
+ const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
+ void EmitOMPUseDeviceAddrClause(
+ const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
/// Emit code for copyin clause in \a D directive. The next code is
/// generated at the start of outlined functions for directives:
@@ -3091,7 +3305,8 @@ public:
/// proper codegen in internal captured statement.
///
void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
- OMPPrivateScope &PrivateScope);
+ OMPPrivateScope &PrivateScope,
+ bool ForInscan = false);
/// Emit final update of reduction values to original variables at
/// the end of the directive.
///
@@ -3149,6 +3364,8 @@ public:
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
void EmitOMPFlushDirective(const OMPFlushDirective &S);
+ void EmitOMPDepobjDirective(const OMPDepobjDirective &S);
+ void EmitOMPScanDirective(const OMPScanDirective &S);
void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
void EmitOMPTargetDirective(const OMPTargetDirective &S);
@@ -3250,8 +3467,8 @@ public:
/// \param PostIncGen Genrator for post-increment code (required for ordered
/// loop directvies).
void EmitOMPInnerLoop(
- const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
- const Expr *IncExpr,
+ const OMPExecutableDirective &S, bool RequiresCleanup,
+ const Expr *LoopCond, const Expr *IncExpr,
const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
@@ -3517,6 +3734,7 @@ public:
LValue EmitUnaryOpLValue(const UnaryOperator *E);
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed = false);
+ LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E);
LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
bool IsLowerBound = true);
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
@@ -3722,6 +3940,8 @@ public:
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue);
+ RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue);
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CallExpr *E, ReturnValueSlot ReturnValue);
@@ -3757,6 +3977,13 @@ public:
llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch);
+ llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
+ ReturnValueSlot ReturnValue,
+ llvm::Triple::ArchType Arch);
+ llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy,
+ QualType RTy);
+ llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy,
+ QualType RTy);
llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
unsigned LLVMIntrinsic,
@@ -3775,12 +4002,62 @@ public:
SmallVectorImpl<llvm::Value*> &O,
const char *name,
unsigned shift = 0, bool rightshift = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
+ const llvm::ElementCount &Count);
llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
bool negateForRightShift);
llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
llvm::Type *Ty, bool usgn, const char *name);
llvm::Value *vectorWrapScalar16(llvm::Value *Op);
+ /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
+ /// access builtin. Only required if it can't be inferred from the base
+ /// pointer operand.
+ llvm::Type *SVEBuiltinMemEltTy(SVETypeFlags TypeFlags);
+
+ SmallVector<llvm::Type *, 2> getSVEOverloadTypes(SVETypeFlags TypeFlags,
+ llvm::Type *ReturnType,
+ ArrayRef<llvm::Value *> Ops);
+ llvm::Type *getEltType(SVETypeFlags TypeFlags);
+ llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
+ llvm::ScalableVectorType *getSVEPredType(SVETypeFlags TypeFlags);
+ llvm::Value *EmitSVEAllTruePred(SVETypeFlags TypeFlags);
+ llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
+ llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
+ llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
+ llvm::Value *EmitSVEPMull(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned BuiltinID);
+ llvm::Value *EmitSVEMovl(SVETypeFlags TypeFlags,
+ llvm::ArrayRef<llvm::Value *> Ops,
+ unsigned BuiltinID);
+ llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
+ llvm::ScalableVectorType *VTy);
+ llvm::Value *EmitSVEGatherLoad(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSVEScatterStore(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned BuiltinID, bool IsZExtReturn);
+ llvm::Value *EmitSVEMaskedStore(const CallExpr *,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned BuiltinID);
+ llvm::Value *EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned BuiltinID);
+ llvm::Value *EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSVEStructLoad(SVETypeFlags TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops, unsigned IntID);
+ llvm::Value *EmitSVEStructStore(SVETypeFlags TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
llvm::Triple::ArchType Arch);
llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -3794,6 +4071,9 @@ public:
llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
const CallExpr *E);
llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
+ llvm::AtomicOrdering &AO,
+ llvm::SyncScope::ID &SSID);
private:
enum class MSVCIntrin;
@@ -3924,6 +4204,10 @@ public:
/// aggregate type into a temporary LValue.
LValue EmitAggExprToLValue(const Expr *E);
+ /// Build all the stores needed to initialize an aggregate at Dest with the
+ /// value Val.
+ void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile);
+
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
/// make sure it survives garbage collection until this point.
void EmitExtendGCLifetime(llvm::Value *object);
@@ -3974,6 +4258,9 @@ public:
/// Call atexit() with function dtorStub.
void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
+ /// Call unatexit() with function dtorStub.
+ llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub);
+
/// Emit code in this function to perform a guarded variable
/// initialization. Guarded initializations are used when it's not
/// possible to prove that an initialization will be done exactly
@@ -3997,12 +4284,12 @@ public:
ArrayRef<llvm::Function *> CXXThreadLocals,
ConstantAddress Guard = ConstantAddress::invalid());
- /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
+ /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
/// variables.
- void GenerateCXXGlobalDtorsFunc(
+ void GenerateCXXGlobalCleanUpFunc(
llvm::Function *Fn,
const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
- llvm::Constant *>> &DtorsAndObjects);
+ llvm::Constant *>> &DtorsOrStermFinalizers);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
@@ -4013,14 +4300,6 @@ public:
void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
- void enterFullExpression(const FullExpr *E) {
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(E))
- if (EWC->getNumObjects() == 0)
- return;
- enterNonTrivialFullExpression(E);
- }
- void enterNonTrivialFullExpression(const FullExpr *E);
-
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
RValue EmitAtomicExpr(AtomicExpr *E);
@@ -4175,6 +4454,9 @@ public:
/// SetFPModel - Control floating point behavior via fp-model settings.
void SetFPModel();
+ /// Set the codegen fast-math flags.
+ void SetFastMathFlags(FPOptions FPFeatures);
+
private:
llvm::MDNode *getRangeForLoadFromType(QualType Ty);
void EmitReturnOfRValue(RValue RV, QualType Ty);
@@ -4195,7 +4477,7 @@ private:
///
/// \param AI - The first function argument of the expansion.
void ExpandTypeFromArgs(QualType Ty, LValue Dst,
- SmallVectorImpl<llvm::Value *>::iterator &AI);
+ llvm::Function::arg_iterator &AI);
/// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
/// Ty, into individual arguments on the provided vector \arg IRCallArgs,
@@ -4411,10 +4693,15 @@ inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
// Otherwise, it should be an alloca instruction, as set up in save().
auto alloca = cast<llvm::AllocaInst>(value.getPointer());
- return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
+ return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlign());
}
} // end namespace CodeGen
+
+// Map the LangOption for floating point exception behavior into
+// the corresponding enum in the IR.
+llvm::fp::ExceptionBehavior
+ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind);
} // end namespace clang
#endif
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 57beda26677c..4ae8ce7e5ccf 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -38,6 +38,7 @@
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -83,6 +84,7 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::XL:
return CreateItaniumCXXABI(CGM);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(CGM);
@@ -110,6 +112,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
HalfTy = llvm::Type::getHalfTy(LLVMContext);
+ BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
FloatTy = llvm::Type::getFloatTy(LLVMContext);
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
@@ -219,14 +222,6 @@ void CodeGenModule::createOpenMPRuntime() {
OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
break;
}
-
- // The OpenMP-IR-Builder should eventually replace the above runtime codegens
- // but we are not there yet so they both reside in CGModule for now and the
- // OpenMP-IR-Builder is opt-in only.
- if (LangOpts.OpenMPIRBuilder) {
- OMPBuilder.reset(new llvm::OpenMPIRBuilder(TheModule));
- OMPBuilder->initialize();
- }
}
void CodeGenModule::createCUDARuntime() {
@@ -408,7 +403,7 @@ void CodeGenModule::Release() {
checkAliases();
emitMultiVersionFunctions();
EmitCXXGlobalInitFunc();
- EmitCXXGlobalDtorFunc();
+ EmitCXXGlobalCleanUpFunc();
registerGlobalDtorsWithAtExit();
EmitCXXThreadLocalInitFunc();
if (ObjCRuntime)
@@ -447,6 +442,10 @@ void CodeGenModule::Release() {
CodeGenFunction(*this).EmitCfiCheckStub();
}
emitAtAvailableLinkGuard();
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ !Context.getTargetInfo().getTriple().isOSEmscripten()) {
+ EmitMainVoidAlias();
+ }
emitLLVMUsed();
if (SanStats)
SanStats->finish();
@@ -483,6 +482,14 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
CodeGenOpts.DwarfVersion);
}
+
+ if (Context.getLangOpts().SemanticInterposition)
+ // Require various optimization to respect semantic interposition.
+ getModule().setSemanticInterposition(1);
+ else if (Context.getLangOpts().ExplicitNoSemanticInterposition)
+ // Allow dso_local on applicable targets.
+ getModule().setSemanticInterposition(0);
+
if (CodeGenOpts.EmitCodeView) {
// Indicate that we want CodeView in the metadata.
getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
@@ -513,7 +520,7 @@ void CodeGenModule::Release() {
"StrictVTablePointersRequirement",
llvm::MDNode::get(VMContext, Ops));
}
- if (DebugInfo)
+ if (getModuleDebugInfo())
// We support a single version in the linked module. The LLVM
// parser will drop debug info with a different version number
// (and warn about it, too).
@@ -537,11 +544,26 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
}
+ if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
+ StringRef ABIStr = Target.getABI();
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ getModule().addModuleFlag(llvm::Module::Error, "target-abi",
+ llvm::MDString::get(Ctx, ABIStr));
+ }
+
if (CodeGenOpts.SanitizeCfiCrossDso) {
// Indicate that we want cross-DSO control flow integrity checks.
getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
}
+ if (CodeGenOpts.WholeProgramVTables) {
+ // Indicate whether VFE was enabled for this module, so that the
+ // vcall_visibility metadata added under whole program vtables is handled
+ // appropriately in the optimizer.
+ getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
+ CodeGenOpts.VirtualFunctionElimination);
+ }
+
if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
getModule().addModuleFlag(llvm::Module::Override,
"CFI Canonical Jump Tables",
@@ -567,7 +589,8 @@ void CodeGenModule::Release() {
// floating point values to 0. (This corresponds to its "__CUDA_FTZ"
// property.)
getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
- CodeGenOpts.FlushDenorm ? 1 : 0);
+ CodeGenOpts.FP32DenormalMode.Output !=
+ llvm::DenormalMode::IEEE);
}
// Emit OpenCL specific module metadata: OpenCL/SPIR version.
@@ -623,8 +646,8 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
EmitCoverageFile();
- if (DebugInfo)
- DebugInfo->finalize();
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->finalize();
if (getCodeGenOpts().EmitVersionIdentMetadata)
EmitVersionIdentMetadata();
@@ -632,7 +655,9 @@ void CodeGenModule::Release() {
if (!getCodeGenOpts().RecordCommandLine.empty())
EmitCommandLineMetadata();
- EmitTargetMetadata();
+ getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
+
+ EmitBackendOptionsMetadata(getCodeGenOpts());
}
void CodeGenModule::EmitOpenCLMetadata() {
@@ -652,6 +677,19 @@ void CodeGenModule::EmitOpenCLMetadata() {
OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
}
+void CodeGenModule::EmitBackendOptionsMetadata(
+ const CodeGenOptions CodeGenOpts) {
+ switch (getTriple().getArch()) {
+ default:
+ break;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
+ CodeGenOpts.SmallDataLimit);
+ break;
+ }
+}
+
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
// Make sure that this type is translated.
Types.UpdateCompletedType(TD);
@@ -671,6 +709,19 @@ llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
if (!TBAA)
return TBAAAccessInfo();
+ if (getLangOpts().CUDAIsDevice) {
+ // As CUDA builtin surface/texture types are replaced, skip generating TBAA
+ // access info.
+ if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
+ if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
+ nullptr)
+ return TBAAAccessInfo();
+ } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
+ if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
+ nullptr)
+ return TBAAAccessInfo();
+ }
+ }
return TBAA->getAccessInfo(AccessType);
}
@@ -856,7 +907,7 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
return true;
- // Otherwise don't assue it is local.
+ // Otherwise don't assume it is local.
return false;
}
@@ -912,9 +963,9 @@ static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
.Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
}
-static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
- CodeGenOptions::TLSModel M) {
- switch (M) {
+llvm::GlobalVariable::ThreadLocalMode
+CodeGenModule::GetDefaultLLVMTLSModel() const {
+ switch (CodeGenOpts.getDefaultTLSModel()) {
case CodeGenOptions::GeneralDynamicTLSModel:
return llvm::GlobalVariable::GeneralDynamicTLSModel;
case CodeGenOptions::LocalDynamicTLSModel:
@@ -931,7 +982,7 @@ void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
llvm::GlobalValue::ThreadLocalMode TLM;
- TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
+ TLM = GetDefaultLLVMTLSModel();
// Override the TLS model if it is explicitly specified.
if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
@@ -997,23 +1048,19 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
MangleContext &MC = CGM.getCXXABI().getMangleContext();
- if (MC.shouldMangleDeclName(ND)) {
- llvm::raw_svector_ostream Out(Buffer);
- if (const auto *D = dyn_cast<CXXConstructorDecl>(ND))
- MC.mangleCXXCtor(D, GD.getCtorType(), Out);
- else if (const auto *D = dyn_cast<CXXDestructorDecl>(ND))
- MC.mangleCXXDtor(D, GD.getDtorType(), Out);
- else
- MC.mangleName(ND, Out);
- } else {
+ if (MC.shouldMangleDeclName(ND))
+ MC.mangleName(GD.getWithDecl(ND), Out);
+ else {
IdentifierInfo *II = ND->getIdentifier();
assert(II && "Attempt to mangle unnamed decl.");
const auto *FD = dyn_cast<FunctionDecl>(ND);
if (FD &&
FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
- llvm::raw_svector_ostream Out(Buffer);
Out << "__regcall3__" << II->getName();
+ } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
+ GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
+ Out << "__device_stub__" << II->getName();
} else {
Out << II->getName();
}
@@ -1036,7 +1083,7 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
}
}
- return Out.str();
+ return std::string(Out.str());
}
void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
@@ -1101,11 +1148,25 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
const auto *ND = cast<NamedDecl>(GD.getDecl());
std::string MangledName = getMangledNameImpl(*this, GD, ND);
- // Adjust kernel stub mangling as we may need to be able to differentiate
- // them from the kernel itself (e.g., for HIP).
- if (auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
- if (!getLangOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>())
- MangledName = getCUDARuntime().getDeviceStubName(MangledName);
+ // Ensure either we have different ABIs between host and device compilations,
+ // says host compilation following MSVC ABI but device compilation follows
+ // Itanium C++ ABI or, if they follow the same ABI, kernel names after
+ // mangling should be the same after name stubbing. The later checking is
+ // very important as the device kernel name being mangled in host-compilation
+ // is used to resolve the device binaries to be executed. Inconsistent naming
+ // result in undefined behavior. Even though we cannot check that naming
+ // directly between host- and device-compilations, the host- and
+ // device-mangling in host compilation could help catching certain ones.
+ assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
+ getLangOpts().CUDAIsDevice ||
+ (getContext().getAuxTargetInfo() &&
+ (getContext().getAuxTargetInfo()->getCXXABI() !=
+ getContext().getTargetInfo().getCXXABI())) ||
+ getCUDARuntime().getDeviceSideName(ND) ==
+ getMangledNameImpl(
+ *this,
+ GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),
+ ND));
auto Result = Manglings.insert(std::make_pair(MangledName, GD));
return MangledDeclNames[CanonicalGD] = Result.first->first();
@@ -1357,7 +1418,7 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
std::string typeName;
if (isPipe)
typeName = ty.getCanonicalType()
- ->getAs<PipeType>()
+ ->castAs<PipeType>()
->getElementType()
.getAsString(Policy);
else
@@ -1371,7 +1432,7 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
std::string baseTypeName;
if (isPipe)
baseTypeName = ty.getCanonicalType()
- ->getAs<PipeType>()
+ ->castAs<PipeType>()
->getElementType()
.getCanonicalType()
.getAsString(Policy);
@@ -1493,6 +1554,9 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (CodeGenOpts.UnwindTables)
B.addAttribute(llvm::Attribute::UWTable);
+ if (CodeGenOpts.StackClashProtector)
+ B.addAttribute("probe-stack", "inline-asm");
+
if (!hasUnwindExceptions(LangOpts))
B.addAttribute(llvm::Attribute::NoUnwind);
@@ -1840,9 +1904,16 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
else if (const auto *SA = FD->getAttr<SectionAttr>())
F->setSection(SA->getName());
+ // If we plan on emitting this inline builtin, we can't treat it as a builtin.
if (FD->isInlineBuiltinDeclaration()) {
- F->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoBuiltin);
+ const FunctionDecl *FDBody;
+ bool HasBody = FD->hasBody(FDBody);
+ (void)HasBody;
+ assert(HasBody && "Inline builtin declarations should always have an "
+ "available body!");
+ if (shouldEmitFunction(FDBody))
+ F->addAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoBuiltin);
}
if (FD->isReplaceableGlobalAllocationFunction()) {
@@ -1850,15 +1921,6 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
// default, only if it is invoked by a new-expression or delete-expression.
F->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoBuiltin);
-
- // A sane operator new returns a non-aliasing pointer.
- // FIXME: Also add NonNull attribute to the return value
- // for the non-nothrow forms?
- auto Kind = FD->getDeclName().getCXXOverloadedOperator();
- if (getCodeGenOpts().AssumeSaneOperatorNew &&
- (Kind == OO_New || Kind == OO_Array_New))
- F->addAttribute(llvm::AttributeList::ReturnIndex,
- llvm::Attribute::NoAlias);
}
if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
@@ -2375,13 +2437,8 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
return true;
}
-ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
- const CXXUuidofExpr* E) {
- // Sema has verified that IIDSource has a __declspec(uuid()), and that its
- // well-formed.
- StringRef Uuid = E->getUuidStr();
- std::string Name = "_GUID_" + Uuid.lower();
- std::replace(Name.begin(), Name.end(), '-', '_');
+ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
+ StringRef Name = getMangledName(GD);
// The UUID descriptor should be pointer aligned.
CharUnits Alignment = CharUnits::fromQuantity(PointerAlignInBytes);
@@ -2390,8 +2447,30 @@ ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
return ConstantAddress(GV, Alignment);
- llvm::Constant *Init = EmitUuidofInitializer(Uuid);
- assert(Init && "failed to initialize as constant");
+ ConstantEmitter Emitter(*this);
+ llvm::Constant *Init;
+
+ APValue &V = GD->getAsAPValue();
+ if (!V.isAbsent()) {
+ // If possible, emit the APValue version of the initializer. In particular,
+ // this gets the type of the constant right.
+ Init = Emitter.emitForInitializer(
+ GD->getAsAPValue(), GD->getType().getAddressSpace(), GD->getType());
+ } else {
+ // As a fallback, directly construct the constant.
+ // FIXME: This may get padding wrong under esoteric struct layout rules.
+ // MSVC appears to create a complete type 'struct __s_GUID' that it
+ // presumably uses to represent these constants.
+ MSGuidDecl::Parts Parts = GD->getParts();
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantInt::get(Int32Ty, Parts.Part1),
+ llvm::ConstantInt::get(Int16Ty, Parts.Part2),
+ llvm::ConstantInt::get(Int16Ty, Parts.Part3),
+ llvm::ConstantDataArray::getRaw(
+ StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), 8,
+ Int8Ty)};
+ Init = llvm::ConstantStruct::getAnon(Fields);
+ }
auto *GV = new llvm::GlobalVariable(
getModule(), Init->getType(),
@@ -2399,7 +2478,16 @@ ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
if (supportsCOMDAT())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
setDSOLocal(GV);
- return ConstantAddress(GV, Alignment);
+
+ llvm::Constant *Addr = GV;
+ if (!V.isAbsent()) {
+ Emitter.finalize(GV);
+ } else {
+ llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
+ Addr = llvm::ConstantExpr::getBitCast(
+ GV, Ty->getPointerTo(GV->getAddressSpace()));
+ }
+ return ConstantAddress(Addr, Alignment);
}
ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
@@ -2461,7 +2549,8 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
!Global->hasAttr<CUDAGlobalAttr>() &&
!Global->hasAttr<CUDAConstantAttr>() &&
!Global->hasAttr<CUDASharedAttr>() &&
- !(LangOpts.HIP && Global->hasAttr<HIPPinnedShadowAttr>()))
+ !Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
+ !Global->getType()->isCUDADeviceBuiltinTextureType())
return;
} else {
// We need to emit host-side 'shadows' for all global
@@ -2554,11 +2643,6 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
return;
}
- // Check if this must be emitted as declare variant.
- if (LangOpts.OpenMP && isa<FunctionDecl>(Global) && OpenMPRuntime &&
- OpenMPRuntime->emitDeclareVariant(GD, /*IsForDefinition=*/false))
- return;
-
// If we're deferring emission of a C++ variable with an
// initializer, remember the order in which it appeared in the file.
if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
@@ -2741,8 +2825,8 @@ bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
// PR9614. Avoid cases where the source code is lying to us. An available
// externally function should have an equivalent function somewhere else,
- // but a function that calls itself is clearly not equivalent to the real
- // implementation.
+ // but a function that calls itself through asm label/`__builtin_` trickery is
+ // clearly not equivalent to the real implementation.
// This happens in glibc's btowc and in some configure checks.
return !isTriviallyRecursive(F);
}
@@ -2764,50 +2848,6 @@ void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
EmitGlobalFunctionDefinition(GD, GV);
}
-void CodeGenModule::emitOpenMPDeviceFunctionRedefinition(
- GlobalDecl OldGD, GlobalDecl NewGD, llvm::GlobalValue *GV) {
- assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
- OpenMPRuntime && "Expected OpenMP device mode.");
- const auto *D = cast<FunctionDecl>(OldGD.getDecl());
-
- // Compute the function info and LLVM type.
- const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(OldGD);
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
-
- // Get or create the prototype for the function.
- if (!GV || (GV->getType()->getElementType() != Ty)) {
- GV = cast<llvm::GlobalValue>(GetOrCreateLLVMFunction(
- getMangledName(OldGD), Ty, GlobalDecl(), /*ForVTable=*/false,
- /*DontDefer=*/true, /*IsThunk=*/false, llvm::AttributeList(),
- ForDefinition));
- SetFunctionAttributes(OldGD, cast<llvm::Function>(GV),
- /*IsIncompleteFunction=*/false,
- /*IsThunk=*/false);
- }
- // We need to set linkage and visibility on the function before
- // generating code for it because various parts of IR generation
- // want to propagate this information down (e.g. to local static
- // declarations).
- auto *Fn = cast<llvm::Function>(GV);
- setFunctionLinkage(OldGD, Fn);
-
- // FIXME: this is redundant with part of
- // setFunctionDefinitionAttributes
- setGVProperties(Fn, OldGD);
-
- MaybeHandleStaticInExternC(D, Fn);
-
- maybeSetTrivialComdat(*D, *Fn);
-
- CodeGenFunction(*this).GenerateCode(NewGD, Fn, FI);
-
- setNonAliasAttributes(OldGD, Fn);
- SetLLVMFunctionAttributesForDefinition(D, Fn);
-
- if (D->hasAttr<AnnotateAttr>())
- AddGlobalAnnotations(D, Fn);
-}
-
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
const auto *D = cast<ValueDecl>(GD.getDecl());
@@ -3122,14 +3162,9 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
EmitGlobal(GDDef);
}
}
- // Check if this must be emitted as declare variant and emit reference to
- // the the declare variant function.
- if (LangOpts.OpenMP && OpenMPRuntime)
- (void)OpenMPRuntime->emitDeclareVariant(GD, /*IsForDefinition=*/true);
if (FD->isMultiVersion()) {
- const auto *TA = FD->getAttr<TargetAttr>();
- if (TA && TA->isDefaultVersion())
+ if (FD->hasAttr<TargetAttr>())
UpdateMultiVersionNames(GD, FD);
if (!IsForDefinition)
return GetOrCreateMultiVersionResolver(GD, Ty, FD);
@@ -3169,7 +3204,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
- (Entry->getType()->getElementType() == Ty)) {
+ (Entry->getValueType() == Ty)) {
return Entry;
}
@@ -3218,7 +3253,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
- F, Entry->getType()->getElementType()->getPointerTo());
+ F, Entry->getValueType()->getPointerTo());
addGlobalValReplacement(Entry, BC);
}
@@ -3277,7 +3312,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
// Make sure the result is of the requested type.
if (!IsIncompleteFunction) {
- assert(F->getType()->getElementType() == Ty);
+ assert(F->getFunctionType() == Ty);
return F;
}
@@ -3293,6 +3328,8 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
bool ForVTable,
bool DontDefer,
ForDefinition_t IsForDefinition) {
+ assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&
+ "consteval function should never be emitted");
// If there was no specific requested type, just convert it now.
if (!Ty) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
@@ -3568,7 +3605,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::Constant *Init = emitter.tryEmitForInitializer(*InitDecl);
if (Init) {
auto *InitType = Init->getType();
- if (GV->getType()->getElementType() != InitType) {
+ if (GV->getValueType() != InitType) {
// The type of the initializer does not match the definition.
// This happens when an initializer has a different type from
// the type of the global (because of padding at the end of a
@@ -3611,26 +3648,29 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
}
llvm::Constant *
-CodeGenModule::GetAddrOfGlobal(GlobalDecl GD,
- ForDefinition_t IsForDefinition) {
+CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) {
const Decl *D = GD.getDecl();
+
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
/*DontDefer=*/false, IsForDefinition);
- else if (isa<CXXMethodDecl>(D)) {
- auto FInfo = &getTypes().arrangeCXXMethodDeclaration(
- cast<CXXMethodDecl>(D));
+
+ if (isa<CXXMethodDecl>(D)) {
+ auto FInfo =
+ &getTypes().arrangeCXXMethodDeclaration(cast<CXXMethodDecl>(D));
auto Ty = getTypes().GetFunctionType(*FInfo);
return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
IsForDefinition);
- } else if (isa<FunctionDecl>(D)) {
+ }
+
+ if (isa<FunctionDecl>(D)) {
const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
IsForDefinition);
- } else
- return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr,
- IsForDefinition);
+ }
+
+ return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr, IsForDefinition);
}
llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
@@ -3641,7 +3681,7 @@ llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
if (GV) {
// Check if the variable has the right type.
- if (GV->getType()->getElementType() == Ty)
+ if (GV->getValueType() == Ty)
return GV;
// Because C++ name mangling, the only way we can end up with an already
@@ -3915,12 +3955,16 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
!getLangOpts().CUDAIsDevice &&
(D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
D->hasAttr<CUDASharedAttr>());
+ bool IsCUDADeviceShadowVar =
+ getLangOpts().CUDAIsDevice &&
+ (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ D->getType()->isCUDADeviceBuiltinTextureType());
// HIP pinned shadow of initialized host-side global variables are also
// left undefined.
- bool IsHIPPinnedShadowVar =
- getLangOpts().CUDAIsDevice && D->hasAttr<HIPPinnedShadowAttr>();
if (getLangOpts().CUDA &&
- (IsCUDASharedVar || IsCUDAShadowVar || IsHIPPinnedShadowVar))
+ (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
+ Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
+ else if (D->hasAttr<LoaderUninitializedAttr>())
Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
@@ -3979,7 +4023,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// "extern int x[];") and then a definition of a different type (e.g.
// "int x[10];"). This also happens when an initializer has a different type
// from the type of the global (this happens with unions).
- if (!GV || GV->getType()->getElementType() != InitType ||
+ if (!GV || GV->getValueType() != InitType ||
GV->getType()->getAddressSpace() !=
getContext().getTargetAddressSpace(GetGlobalVarAddressSpace(D))) {
@@ -4026,34 +4070,56 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// global variables become internal definitions. These have to
// be internal in order to prevent name conflicts with global
// host variables with the same name in a different TUs.
- if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
- D->hasAttr<HIPPinnedShadowAttr>()) {
+ if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
Linkage = llvm::GlobalValue::InternalLinkage;
-
- // Shadow variables and their properties must be registered
- // with CUDA runtime.
- unsigned Flags = 0;
- if (!D->hasDefinition())
- Flags |= CGCUDARuntime::ExternDeviceVar;
- if (D->hasAttr<CUDAConstantAttr>())
- Flags |= CGCUDARuntime::ConstantDeviceVar;
- // Extern global variables will be registered in the TU where they are
- // defined.
+ // Shadow variables and their properties must be registered with CUDA
+ // runtime. Skip Extern global variables, which will be registered in
+ // the TU where they are defined.
if (!D->hasExternalStorage())
- getCUDARuntime().registerDeviceVar(D, *GV, Flags);
- } else if (D->hasAttr<CUDASharedAttr>())
+ getCUDARuntime().registerDeviceVar(D, *GV, !D->hasDefinition(),
+ D->hasAttr<CUDAConstantAttr>());
+ } else if (D->hasAttr<CUDASharedAttr>()) {
// __shared__ variables are odd. Shadows do get created, but
// they are not registered with the CUDA runtime, so they
// can't really be used to access their device-side
// counterparts. It's not clear yet whether it's nvcc's bug or
// a feature, but we've got to do the same for compatibility.
Linkage = llvm::GlobalValue::InternalLinkage;
+ } else if (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ D->getType()->isCUDADeviceBuiltinTextureType()) {
+ // Builtin surfaces and textures and their template arguments are
+ // also registered with CUDA runtime.
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ const ClassTemplateSpecializationDecl *TD =
+ cast<ClassTemplateSpecializationDecl>(
+ D->getType()->getAs<RecordType>()->getDecl());
+ const TemplateArgumentList &Args = TD->getTemplateArgs();
+ if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
+ assert(Args.size() == 2 &&
+ "Unexpected number of template arguments of CUDA device "
+ "builtin surface type.");
+ auto SurfType = Args[1].getAsIntegral();
+ if (!D->hasExternalStorage())
+ getCUDARuntime().registerDeviceSurf(D, *GV, !D->hasDefinition(),
+ SurfType.getSExtValue());
+ } else {
+ assert(Args.size() == 3 &&
+ "Unexpected number of template arguments of CUDA device "
+ "builtin texture type.");
+ auto TexType = Args[1].getAsIntegral();
+ auto Normalized = Args[2].getAsIntegral();
+ if (!D->hasExternalStorage())
+ getCUDARuntime().registerDeviceTex(D, *GV, !D->hasDefinition(),
+ TexType.getSExtValue(),
+ Normalized.getZExtValue());
+ }
+ }
}
}
- if (!IsHIPPinnedShadowVar)
- GV->setInitializer(Init);
- if (emitter) emitter->finalize(GV);
+ GV->setInitializer(Init);
+ if (emitter)
+ emitter->finalize(GV);
// If it is safe to mark the global 'constant', do so now.
GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
@@ -4068,17 +4134,24 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
- // On Darwin, if the normal linkage of a C++ thread_local variable is
- // LinkOnce or Weak, we keep the normal linkage to prevent multiple
- // copies within a linkage unit; otherwise, the backing variable has
- // internal linkage and all accesses should just be calls to the
- // Itanium-specified entry point, which has the normal linkage of the
- // variable. This is to preserve the ability to change the implementation
- // behind the scenes.
- if (!D->isStaticLocal() && D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
+ // function is only defined alongside the variable, not also alongside
+ // callers. Normally, all accesses to a thread_local go through the
+ // thread-wrapper in order to ensure initialization has occurred, underlying
+ // variable will never be used other than the thread-wrapper, so it can be
+ // converted to internal linkage.
+ //
+ // However, if the variable has the 'constinit' attribute, it _can_ be
+ // referenced directly, without calling the thread-wrapper, so the linkage
+ // must not be changed.
+ //
+ // Additionally, if the variable isn't plain external linkage, e.g. if it's
+ // weak or linkonce, the de-duplication semantics are important to preserve,
+ // so we don't change the linkage.
+ if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
+ Linkage == llvm::GlobalValue::ExternalLinkage &&
Context.getTargetInfo().getTriple().isOSDarwin() &&
- !llvm::GlobalVariable::isLinkOnceLinkage(Linkage) &&
- !llvm::GlobalVariable::isWeakLinkage(Linkage))
+ !D->hasAttr<ConstInitAttr>())
Linkage = llvm::GlobalValue::InternalLinkage;
GV->setLinkage(Linkage);
@@ -4421,11 +4494,6 @@ void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
llvm::GlobalValue *GV) {
- // Check if this must be emitted as declare variant.
- if (LangOpts.OpenMP && OpenMPRuntime &&
- OpenMPRuntime->emitDeclareVariant(GD, /*IsForDefinition=*/true))
- return;
-
const auto *D = cast<FunctionDecl>(GD.getDecl());
// Compute the function info and LLVM type.
@@ -4433,7 +4501,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
// Get or create the prototype for the function.
- if (!GV || (GV->getType()->getElementType() != Ty))
+ if (!GV || (GV->getValueType() != Ty))
GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
/*DontDefer=*/true,
ForDefinition));
@@ -4457,7 +4525,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
maybeSetTrivialComdat(*D, *Fn);
- CodeGenFunction(*this).GenerateCode(D, Fn, FI);
+ CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
setNonAliasAttributes(GD, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
@@ -4509,8 +4577,9 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
}
// Create the new alias itself, but don't set a name yet.
+ unsigned AS = Aliasee->getType()->getPointerAddressSpace();
auto *GA =
- llvm::GlobalAlias::create(DeclTy, 0, LT, "", Aliasee, &getModule());
+ llvm::GlobalAlias::create(DeclTy, AS, LT, "", Aliasee, &getModule());
if (Entry) {
if (GA->getAliasee() == Entry) {
@@ -5258,6 +5327,11 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
if (D->isTemplated())
return;
+ // Consteval function shouldn't be emitted.
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isConsteval())
+ return;
+
switch (D->getKind()) {
case Decl::CXXConversion:
case Decl::CXXMethod:
@@ -5293,17 +5367,17 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
case Decl::ClassTemplateSpecialization: {
const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
- if (DebugInfo &&
- Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition &&
- Spec->hasDefinition())
- DebugInfo->completeTemplateDefinition(*Spec);
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ if (Spec->getSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition &&
+ Spec->hasDefinition())
+ DI->completeTemplateDefinition(*Spec);
} LLVM_FALLTHROUGH;
case Decl::CXXRecord:
- if (DebugInfo) {
+ if (CGDebugInfo *DI = getModuleDebugInfo())
if (auto *ES = D->getASTContext().getExternalSource())
if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
- DebugInfo->completeUnusedClass(cast<CXXRecordDecl>(*D));
- }
+ DI->completeUnusedClass(cast<CXXRecordDecl>(*D));
// Emit any static data members, they may be definitions.
for (auto *I : cast<CXXRecordDecl>(D)->decls())
if (isa<VarDecl>(I) || isa<CXXRecordDecl>(I))
@@ -5324,15 +5398,15 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Using: // using X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(*D));
- return;
+ break;
case Decl::NamespaceAlias:
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
- return;
+ break;
case Decl::UsingDirective: // using namespace X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
- return;
+ break;
case Decl::CXXConstructor:
getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
break;
@@ -5515,10 +5589,10 @@ void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
case Decl::CXXConstructor:
case Decl::CXXDestructor: {
if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
- return;
+ break;
SourceManager &SM = getContext().getSourceManager();
if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
- return;
+ break;
auto I = DeferredEmptyCoverageMappingDecls.find(D);
if (I == DeferredEmptyCoverageMappingDecls.end())
DeferredEmptyCoverageMappingDecls[D] = true;
@@ -5584,6 +5658,17 @@ void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
}
}
+void CodeGenModule::EmitMainVoidAlias() {
+ // In order to transition away from "__original_main" gracefully, emit an
+ // alias for "main" in the no-argument case so that libc can detect when
+ // new-style no-argument main is in used.
+ if (llvm::Function *F = getModule().getFunction("main")) {
+ if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
+ F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth()))
+ addUsedGlobal(llvm::GlobalAlias::create("__main_void", F));
+ }
+}
+
/// Turns the given pointer into a constant.
static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
const void *Ptr) {
@@ -5698,21 +5783,6 @@ void CodeGenModule::EmitCommandLineMetadata() {
CommandLineMetadata->addOperand(llvm::MDNode::get(Ctx, CommandLineNode));
}
-void CodeGenModule::EmitTargetMetadata() {
- // Warning, new MangledDeclNames may be appended within this loop.
- // We rely on MapVector insertions adding new elements to the end
- // of the container.
- // FIXME: Move this loop into the one target that needs it, and only
- // loop over those declarations for which we couldn't emit the target
- // metadata when we emitted the declaration.
- for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
- auto Val = *(MangledDeclNames.begin() + I);
- const Decl *D = Val.first.getDecl()->getMostRecentDecl();
- llvm::GlobalValue *GV = GetGlobalValue(Val.second);
- getTargetCodeGenInfo().emitTargetMD(D, GV, *this);
- }
-}
-
void CodeGenModule::EmitCoverageFile() {
if (getCodeGenOpts().CoverageDataFile.empty() &&
getCodeGenOpts().CoverageNotesFile.empty())
@@ -5735,39 +5805,14 @@ void CodeGenModule::EmitCoverageFile() {
}
}
-llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) {
- // Sema has checked that all uuid strings are of the form
- // "12345678-1234-1234-1234-1234567890ab".
- assert(Uuid.size() == 36);
- for (unsigned i = 0; i < 36; ++i) {
- if (i == 8 || i == 13 || i == 18 || i == 23) assert(Uuid[i] == '-');
- else assert(isHexDigit(Uuid[i]));
- }
-
- // The starts of all bytes of Field3 in Uuid. Field 3 is "1234-1234567890ab".
- const unsigned Field3ValueOffsets[8] = { 19, 21, 24, 26, 28, 30, 32, 34 };
-
- llvm::Constant *Field3[8];
- for (unsigned Idx = 0; Idx < 8; ++Idx)
- Field3[Idx] = llvm::ConstantInt::get(
- Int8Ty, Uuid.substr(Field3ValueOffsets[Idx], 2), 16);
-
- llvm::Constant *Fields[4] = {
- llvm::ConstantInt::get(Int32Ty, Uuid.substr(0, 8), 16),
- llvm::ConstantInt::get(Int16Ty, Uuid.substr(9, 4), 16),
- llvm::ConstantInt::get(Int16Ty, Uuid.substr(14, 4), 16),
- llvm::ConstantArray::get(llvm::ArrayType::get(Int8Ty, 8), Field3)
- };
-
- return llvm::ConstantStruct::getAnon(Fields);
-}
-
llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
bool ForEH) {
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice)
+ if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice ||
+ (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ getTriple().isNVPTX()))
return llvm::Constant::getNullValue(Int8PtrTy);
if (ForEH && Ty->isObjCObjectPointerType() &&
@@ -5911,3 +5956,99 @@ CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
"__translate_sampler_initializer"),
{C});
}
+
+CharUnits CodeGenModule::getNaturalPointeeTypeAlignment(
+ QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) {
+ return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
+ /* forPointeeType= */ true);
+}
+
+CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T,
+ LValueBaseInfo *BaseInfo,
+ TBAAAccessInfo *TBAAInfo,
+ bool forPointeeType) {
+ if (TBAAInfo)
+ *TBAAInfo = getTBAAAccessInfo(T);
+
+ // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But
+ // that doesn't return the information we need to compute BaseInfo.
+
+ // Honor alignment typedef attributes even on incomplete types.
+ // We also honor them straight for C++ class types, even as pointees;
+ // there's an expressivity gap here.
+ if (auto TT = T->getAs<TypedefType>()) {
+ if (auto Align = TT->getDecl()->getMaxAlignment()) {
+ if (BaseInfo)
+ *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
+ return getContext().toCharUnitsFromBits(Align);
+ }
+ }
+
+ bool AlignForArray = T->isArrayType();
+
+ // Analyze the base element type, so we don't get confused by incomplete
+ // array types.
+ T = getContext().getBaseElementType(T);
+
+ if (T->isIncompleteType()) {
+ // We could try to replicate the logic from
+ // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the
+ // type is incomplete, so it's impossible to test. We could try to reuse
+ // getTypeAlignIfKnown, but that doesn't return the information we need
+ // to set BaseInfo. So just ignore the possibility that the alignment is
+ // greater than one.
+ if (BaseInfo)
+ *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
+ return CharUnits::One();
+ }
+
+ if (BaseInfo)
+ *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
+
+ CharUnits Alignment;
+ // For C++ class pointees, we don't know whether we're pointing at a
+ // base or a complete object, so we generally need to use the
+ // non-virtual alignment.
+ const CXXRecordDecl *RD;
+ if (forPointeeType && !AlignForArray && (RD = T->getAsCXXRecordDecl())) {
+ Alignment = getClassPointerAlignment(RD);
+ } else {
+ Alignment = getContext().getTypeAlignInChars(T);
+ if (T.getQualifiers().hasUnaligned())
+ Alignment = CharUnits::One();
+ }
+
+ // Cap to the global maximum type alignment unless the alignment
+ // was somehow explicit on the type.
+ if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
+ if (Alignment.getQuantity() > MaxAlign &&
+ !getContext().isAlignmentRequired(T))
+ Alignment = CharUnits::fromQuantity(MaxAlign);
+ }
+ return Alignment;
+}
+
+bool CodeGenModule::stopAutoInit() {
+ unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter;
+ if (StopAfter) {
+ // This number is positive only when -ftrivial-auto-var-init-stop-after=* is
+ // used
+ if (NumAutoVarInit >= StopAfter) {
+ return true;
+ }
+ if (!NumAutoVarInit) {
+ unsigned DiagID = getDiags().getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "-ftrivial-auto-var-init-stop-after=%0 has been enabled to limit the "
+ "number of times ftrivial-auto-var-init=%1 gets applied.");
+ getDiags().Report(DiagID)
+ << StopAfter
+ << (getContext().getLangOpts().getTrivialAutoVarInit() ==
+ LangOptions::TrivialAutoVarInitKind::Zero
+ ? "zero"
+ : "pattern");
+ }
+ ++NumAutoVarInit;
+ }
+ return false;
+}
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index 115e754bb392..a6c4a1f7b278 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -26,6 +26,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SanitizerBlacklist.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/XRayLists.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
@@ -301,6 +302,7 @@ private:
const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
const CodeGenOptions &CodeGenOpts;
+ unsigned NumAutoVarInit = 0;
llvm::Module &TheModule;
DiagnosticsEngine &Diags;
const TargetInfo &Target;
@@ -322,7 +324,6 @@ private:
std::unique_ptr<CGObjCRuntime> ObjCRuntime;
std::unique_ptr<CGOpenCLRuntime> OpenCLRuntime;
std::unique_ptr<CGOpenMPRuntime> OpenMPRuntime;
- std::unique_ptr<llvm::OpenMPIRBuilder> OMPBuilder;
std::unique_ptr<CGCUDARuntime> CUDARuntime;
std::unique_ptr<CGDebugInfo> DebugInfo;
std::unique_ptr<ObjCEntrypoints> ObjCData;
@@ -395,6 +396,10 @@ private:
/// emitted when the translation unit is complete.
CtorList GlobalDtors;
+ /// A unique trailing identifier as a part of sinit/sterm function when
+ /// UseSinitAndSterm of CXXABI is set as true.
+ std::string GlobalUniqueModuleId;
+
/// An ordered map of canonical GlobalDecls to their mangled names.
llvm::MapVector<GlobalDecl, StringRef> MangledDeclNames;
llvm::StringMap<GlobalDecl, llvm::BumpPtrAllocator> Manglings;
@@ -463,9 +468,11 @@ private:
SmallVector<GlobalInitData, 8> PrioritizedCXXGlobalInits;
/// Global destructor functions and arguments that need to run on termination.
+ /// When UseSinitAndSterm is set, it instead contains sterm finalizer
+ /// functions, which also run on unloading a shared library.
std::vector<
std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH, llvm::Constant *>>
- CXXGlobalDtors;
+ CXXGlobalDtorsOrStermFinalizers;
/// The complete set of modules that has been imported.
llvm::SetVector<clang::Module *> ImportedModules;
@@ -589,9 +596,6 @@ public:
return *OpenMPRuntime;
}
- /// Return a pointer to the configured OpenMPIRBuilder, if any.
- llvm::OpenMPIRBuilder *getOpenMPIRBuilder() { return OMPBuilder.get(); }
-
/// Return a reference to the configured CUDA runtime.
CGCUDARuntime &getCUDARuntime() {
assert(CUDARuntime != nullptr);
@@ -788,6 +792,9 @@ public:
/// variable declaration D.
void setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const;
+ /// Get LLVM TLS mode from CodeGenOptions.
+ llvm::GlobalVariable::ThreadLocalMode GetDefaultLLVMTLSModel() const;
+
static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) {
switch (V) {
case DefaultVisibility: return llvm::GlobalValue::DefaultVisibility;
@@ -810,11 +817,10 @@ public:
llvm::GlobalValue::LinkageTypes Linkage,
unsigned Alignment);
- llvm::Function *
- CreateGlobalInitOrDestructFunction(llvm::FunctionType *ty, const Twine &name,
- const CGFunctionInfo &FI,
- SourceLocation Loc = SourceLocation(),
- bool TLS = false);
+ llvm::Function *CreateGlobalInitOrCleanUpFunction(
+ llvm::FunctionType *ty, const Twine &name, const CGFunctionInfo &FI,
+ SourceLocation Loc = SourceLocation(), bool TLS = false,
+ bool IsExternalLinkage = false);
/// Return the AST address space of the underlying global variable for D, as
/// determined by its declaration. Normally this is the same as the address
@@ -855,8 +861,8 @@ public:
/// Get the address of the RTTI descriptor for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
- /// Get the address of a uuid descriptor .
- ConstantAddress GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
+ /// Get the address of a GUID.
+ ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD);
/// Get the address of the thunk for the given global decl.
llvm::Constant *GetAddrOfThunk(StringRef Name, llvm::Type *FnTy,
@@ -868,6 +874,17 @@ public:
/// Returns the assumed alignment of an opaque pointer to the given class.
CharUnits getClassPointerAlignment(const CXXRecordDecl *CD);
+ /// Returns the minimum object size for an object of the given class type
+ /// (or a class derived from it).
+ CharUnits getMinimumClassObjectSize(const CXXRecordDecl *CD);
+
+ /// Returns the minimum object size for an object of the given type.
+ CharUnits getMinimumObjectSize(QualType Ty) {
+ if (CXXRecordDecl *RD = Ty->getAsCXXRecordDecl())
+ return getMinimumClassObjectSize(RD);
+ return getContext().getTypeSizeInChars(Ty);
+ }
+
/// Returns the assumed alignment of a virtual base of a class.
CharUnits getVBaseAlignment(CharUnits DerivedAlign,
const CXXRecordDecl *Derived,
@@ -1012,6 +1029,9 @@ public:
/// for the uninstrumented functions.
void EmitDeferredUnusedCoverageMappings();
+ /// Emit an alias for "main" if it has no arguments (needed for wasm).
+ void EmitMainVoidAlias();
+
/// Tell the consumer that this variable has been instantiated.
void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
@@ -1029,8 +1049,14 @@ public:
/// Add a destructor and object to add to the C++ global destructor function.
void AddCXXDtorEntry(llvm::FunctionCallee DtorFn, llvm::Constant *Object) {
- CXXGlobalDtors.emplace_back(DtorFn.getFunctionType(), DtorFn.getCallee(),
- Object);
+ CXXGlobalDtorsOrStermFinalizers.emplace_back(DtorFn.getFunctionType(),
+ DtorFn.getCallee(), Object);
+ }
+
+ /// Add an sterm finalizer to the C++ global cleanup function.
+ void AddCXXStermFinalizerEntry(llvm::FunctionCallee DtorFn) {
+ CXXGlobalDtorsOrStermFinalizers.emplace_back(DtorFn.getFunctionType(),
+ DtorFn.getCallee(), nullptr);
}
/// Create or return a runtime function declaration with the specified type
@@ -1155,7 +1181,11 @@ public:
/// on the function more conservative. But it's unsafe to call this on a
/// function which relies on particular fast-math attributes for correctness.
/// It's up to you to ensure that this is safe.
- void AddDefaultFnAttrs(llvm::Function &F);
+ void addDefaultFunctionDefinitionAttributes(llvm::Function &F);
+
+ /// Like the overload taking a `Function &`, but intended specifically
+ /// for frontends that want to build on Clang's target-configuration logic.
+ void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs);
StringRef getMangledName(GlobalDecl GD);
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD);
@@ -1282,16 +1312,16 @@ public:
/// \param D Requires declaration
void EmitOMPRequiresDecl(const OMPRequiresDecl *D);
- /// Emits the definition of \p OldGD function with body from \p NewGD.
- /// Required for proper handling of declare variant directive on the GPU.
- void emitOpenMPDeviceFunctionRedefinition(GlobalDecl OldGD, GlobalDecl NewGD,
- llvm::GlobalValue *GV);
-
/// Returns whether the given record has hidden LTO visibility and therefore
/// may participate in (single-module) CFI and whole-program vtable
/// optimization.
bool HasHiddenLTOVisibility(const CXXRecordDecl *RD);
+ /// Returns whether the given record has public std LTO visibility
+ /// and therefore may not participate in (single-module) CFI and whole-program
+ /// vtable optimization.
+ bool HasLTOVisibilityPublicStd(const CXXRecordDecl *RD);
+
/// Returns the vcall visibility of the given type. This is the scope in which
/// a virtual function call could be made which ends up being dispatched to a
/// member function of this class. This scope can be wider than the visibility
@@ -1367,6 +1397,15 @@ public:
/// \param QT is the clang QualType of the null pointer.
llvm::Constant *getNullPointer(llvm::PointerType *T, QualType QT);
+ CharUnits getNaturalTypeAlignment(QualType T,
+ LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr,
+ bool forPointeeType = false);
+ CharUnits getNaturalPointeeTypeAlignment(QualType T,
+ LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr);
+ bool stopAutoInit();
+
private:
llvm::Constant *GetOrCreateLLVMFunction(
StringRef MangledName, llvm::Type *Ty, GlobalDecl D, bool ForVTable,
@@ -1417,8 +1456,8 @@ private:
/// Emit the function that initializes C++ globals.
void EmitCXXGlobalInitFunc();
- /// Emit the function that destroys C++ globals.
- void EmitCXXGlobalDtorFunc();
+ /// Emit the function that performs cleanup associated with C++ globals.
+ void EmitCXXGlobalCleanUpFunc();
/// Emit the function that initializes the specified global (if PerformInit is
/// true) and registers its destructor.
@@ -1489,8 +1528,9 @@ private:
/// Emit the Clang commandline as llvm.commandline metadata.
void EmitCommandLineMetadata();
- /// Emits target specific Metadata for global declarations.
- void EmitTargetMetadata();
+ /// Emit the module flag metadata used to pass options controlling the
+ /// the backend to LLVM.
+ void EmitBackendOptionsMetadata(const CodeGenOptions CodeGenOpts);
/// Emits OpenCL specific Metadata e.g. OpenCL version.
void EmitOpenCLMetadata();
@@ -1499,9 +1539,6 @@ private:
/// .gcda files in a way that persists in .bc files.
void EmitCoverageFile();
- /// Emits the initializer for a uuidof string.
- llvm::Constant *EmitUuidofInitializer(StringRef uuidstr);
-
/// Determine whether the definition must be emitted; if this returns \c
/// false, the definition can be emitted lazily if it's used.
bool MustBeEmitted(const ValueDecl *D);
@@ -1516,11 +1553,12 @@ private:
/// function.
void SimplifyPersonality();
- /// Helper function for ConstructAttributeList and AddDefaultFnAttrs.
- /// Constructs an AttrList for a function with the given properties.
- void ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
- bool AttrOnCallSite,
- llvm::AttrBuilder &FuncAttrs);
+ /// Helper function for ConstructAttributeList and
+ /// addDefaultFunctionDefinitionAttributes. Builds a set of function
+ /// attributes to add to a function with the given properties.
+ void getDefaultFunctionAttributes(StringRef Name, bool HasOptnone,
+ bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs);
llvm::Metadata *CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
StringRef Suffix);
diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp
index bad796bf92dc..e810f608ab78 100644
--- a/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -52,9 +52,10 @@ void CodeGenPGO::setFuncName(llvm::Function *Fn) {
enum PGOHashVersion : unsigned {
PGO_HASH_V1,
PGO_HASH_V2,
+ PGO_HASH_V3,
// Keep this set to the latest hash version.
- PGO_HASH_LATEST = PGO_HASH_V2
+ PGO_HASH_LATEST = PGO_HASH_V3
};
namespace {
@@ -122,7 +123,7 @@ public:
BinaryOperatorGE,
BinaryOperatorEQ,
BinaryOperatorNE,
- // The preceding values are available with PGO_HASH_V2.
+ // The preceding values are available since PGO_HASH_V2.
// Keep this last. It's for the static assert that follows.
LastHashType
@@ -144,7 +145,9 @@ static PGOHashVersion getPGOHashVersion(llvm::IndexedInstrProfReader *PGOReader,
CodeGenModule &CGM) {
if (PGOReader->getVersion() <= 4)
return PGO_HASH_V1;
- return PGO_HASH_V2;
+ if (PGOReader->getVersion() <= 5)
+ return PGO_HASH_V2;
+ return PGO_HASH_V3;
}
/// A RecursiveASTVisitor that fills a map of statements to PGO counters.
@@ -288,7 +291,7 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return PGOHash::BinaryOperatorLAnd;
if (BO->getOpcode() == BO_LOr)
return PGOHash::BinaryOperatorLOr;
- if (HashVersion == PGO_HASH_V2) {
+ if (HashVersion >= PGO_HASH_V2) {
switch (BO->getOpcode()) {
default:
break;
@@ -310,7 +313,7 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
}
}
- if (HashVersion == PGO_HASH_V2) {
+ if (HashVersion >= PGO_HASH_V2) {
switch (S->getStmtClass()) {
default:
break;
@@ -747,13 +750,21 @@ uint64_t PGOHash::finalize() {
return Working;
// Check for remaining work in Working.
- if (Working)
- MD5.update(Working);
+ if (Working) {
+ // Keep the buggy behavior from v1 and v2 for backward-compatibility. This
+ // is buggy because it converts a uint64_t into an array of uint8_t.
+ if (HashVersion < PGO_HASH_V3) {
+ MD5.update({(uint8_t)Working});
+ } else {
+ using namespace llvm::support;
+ uint64_t Swapped = endian::byte_swap<uint64_t, little>(Working);
+ MD5.update(llvm::makeArrayRef((uint8_t *)&Swapped, sizeof(Swapped)));
+ }
+ }
// Finalize the MD5 and return the hash.
llvm::MD5::MD5Result Result;
MD5.final(Result);
- using namespace llvm::support;
return Result.low();
}
@@ -1051,8 +1062,7 @@ llvm::MDNode *CodeGenFunction::createProfileWeightsForLoop(const Stmt *Cond,
if (!PGO.haveRegionCounts())
return nullptr;
Optional<uint64_t> CondCount = PGO.getStmtCount(Cond);
- assert(CondCount.hasValue() && "missing expected loop condition count");
- if (*CondCount == 0)
+ if (!CondCount || *CondCount == 0)
return nullptr;
return createProfileWeights(LoopCount,
std::max(*CondCount, LoopCount) - LoopCount);
diff --git a/clang/lib/CodeGen/CodeGenPGO.h b/clang/lib/CodeGen/CodeGenPGO.h
index a3778b549910..dda8c66b6db2 100644
--- a/clang/lib/CodeGen/CodeGenPGO.h
+++ b/clang/lib/CodeGen/CodeGenPGO.h
@@ -40,8 +40,8 @@ private:
uint64_t CurrentRegionCount;
public:
- CodeGenPGO(CodeGenModule &CGM)
- : CGM(CGM), FuncNameVar(nullptr), NumValueSites({{0}}),
+ CodeGenPGO(CodeGenModule &CGModule)
+ : CGM(CGModule), FuncNameVar(nullptr), NumValueSites({{0}}),
NumRegionCounters(0), FunctionHash(0), CurrentRegionCount(0) {}
/// Whether or not we have PGO region data for the current function. This is
diff --git a/clang/lib/CodeGen/CodeGenTBAA.cpp b/clang/lib/CodeGen/CodeGenTBAA.cpp
index 7d730cb1ed15..f4ebe6885675 100644
--- a/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -141,6 +141,34 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
case BuiltinType::UInt128:
return getTypeInfo(Context.Int128Ty);
+ case BuiltinType::UShortFract:
+ return getTypeInfo(Context.ShortFractTy);
+ case BuiltinType::UFract:
+ return getTypeInfo(Context.FractTy);
+ case BuiltinType::ULongFract:
+ return getTypeInfo(Context.LongFractTy);
+
+ case BuiltinType::SatUShortFract:
+ return getTypeInfo(Context.SatShortFractTy);
+ case BuiltinType::SatUFract:
+ return getTypeInfo(Context.SatFractTy);
+ case BuiltinType::SatULongFract:
+ return getTypeInfo(Context.SatLongFractTy);
+
+ case BuiltinType::UShortAccum:
+ return getTypeInfo(Context.ShortAccumTy);
+ case BuiltinType::UAccum:
+ return getTypeInfo(Context.AccumTy);
+ case BuiltinType::ULongAccum:
+ return getTypeInfo(Context.LongAccumTy);
+
+ case BuiltinType::SatUShortAccum:
+ return getTypeInfo(Context.SatShortAccumTy);
+ case BuiltinType::SatUAccum:
+ return getTypeInfo(Context.SatAccumTy);
+ case BuiltinType::SatULongAccum:
+ return getTypeInfo(Context.SatLongAccumTy);
+
// Treat all other builtin types as distinct types. This includes
// treating wchar_t, char16_t, and char32_t as distinct from their
// "underlying types".
@@ -181,6 +209,15 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
return createScalarTypeNode(OutName, getChar(), Size);
}
+ if (const auto *EIT = dyn_cast<ExtIntType>(Ty)) {
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ // Don't specify signed/unsigned since integer types can alias despite sign
+ // differences.
+ Out << "_ExtInt(" << EIT->getNumBits() << ')';
+ return createScalarTypeNode(OutName, getChar(), Size);
+ }
+
// For now, handle any other kind of type conservatively.
return getChar();
}
diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h
index ed4b773afd13..20a3263c0b1a 100644
--- a/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -35,8 +35,8 @@ struct CodeGenTypeCache {
/// i8, i16, i32, and i64
llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
- /// float, double
- llvm::Type *HalfTy, *FloatTy, *DoubleTy;
+ /// half, bfloat, float, double
+ llvm::Type *HalfTy, *BFloatTy, *FloatTy, *DoubleTy;
/// int
llvm::IntegerType *IntTy;
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index a458811d7a30..d431c0263666 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -36,8 +36,6 @@ CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
}
CodeGenTypes::~CodeGenTypes() {
- llvm::DeleteContainerSeconds(CGRecordLayouts);
-
for (llvm::FoldingSet<CGFunctionInfo>::iterator
I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
delete &*I++;
@@ -83,19 +81,26 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
-llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
+llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
+ if (T->isConstantMatrixType()) {
+ const Type *Ty = Context.getCanonicalType(T).getTypePtr();
+ const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
+ return llvm::ArrayType::get(ConvertType(MT->getElementType()),
+ MT->getNumRows() * MT->getNumColumns());
+ }
+
llvm::Type *R = ConvertType(T);
- // If this is a non-bool type, don't map it.
- if (!R->isIntegerTy(1))
- return R;
+ // If this is a bool type, or an ExtIntType in a bitfield representation,
+ // map this integer to the target-specified size.
+ if ((ForBitField && T->isExtIntType()) || R->isIntegerTy(1))
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
- // Otherwise, return an integer of the target-specified size.
- return llvm::IntegerType::get(getLLVMContext(),
- (unsigned)Context.getTypeSize(T));
+ // Else, don't map it.
+ return R;
}
-
/// isRecordLayoutComplete - Return true if the specified type is already
/// completely laid out.
bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
@@ -295,6 +300,8 @@ static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
else
return llvm::Type::getInt16Ty(VMContext);
}
+ if (&format == &llvm::APFloat::BFloat())
+ return llvm::Type::getBFloatTy(VMContext);
if (&format == &llvm::APFloat::IEEEsingle())
return llvm::Type::getFloatTy(VMContext);
if (&format == &llvm::APFloat::IEEEdouble())
@@ -383,6 +390,20 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
const Type *Ty = T.getTypePtr();
+ // For the device-side compilation, CUDA device builtin surface/texture types
+ // may be represented in different types.
+ if (Context.getLangOpts().CUDAIsDevice) {
+ if (T->isCUDADeviceBuiltinSurfaceType()) {
+ if (auto *Ty = CGM.getTargetCodeGenInfo()
+ .getCUDADeviceBuiltinSurfaceDeviceType())
+ return Ty;
+ } else if (T->isCUDADeviceBuiltinTextureType()) {
+ if (auto *Ty = CGM.getTargetCodeGenInfo()
+ .getCUDADeviceBuiltinTextureDeviceType())
+ return Ty;
+ }
+ }
+
// RecordTypes are cached and processed specially.
if (const RecordType *RT = dyn_cast<RecordType>(Ty))
return ConvertRecordDeclType(RT->getDecl());
@@ -479,6 +500,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
Context.getLangOpts().NativeHalfType ||
!Context.getTargetInfo().useFP16ConversionIntrinsics());
break;
+ case BuiltinType::BFloat16:
case BuiltinType::Float:
case BuiltinType::Double:
case BuiltinType::LongDouble:
@@ -511,23 +533,99 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case BuiltinType::OCLReserveID:
ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
break;
-
- // TODO: real CodeGen support for SVE types requires more infrastructure
- // to be added first. Report an error until then.
-#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
-#include "clang/Basic/AArch64SVEACLETypes.def"
- {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error,
- "cannot yet generate code for SVE type '%0'");
- auto *BT = cast<BuiltinType>(Ty);
- auto Name = BT->getName(CGM.getContext().getPrintingPolicy());
- CGM.getDiags().Report(DiagID) << Name;
- // Return something safe.
- ResultType = llvm::IntegerType::get(getLLVMContext(), 32);
- break;
- }
-
+#define GET_SVE_INT_VEC(BITS, ELTS) \
+ llvm::ScalableVectorType::get( \
+ llvm::IntegerType::get(getLLVMContext(), BITS), ELTS);
+ case BuiltinType::SveInt8:
+ case BuiltinType::SveUint8:
+ return GET_SVE_INT_VEC(8, 16);
+ case BuiltinType::SveInt8x2:
+ case BuiltinType::SveUint8x2:
+ return GET_SVE_INT_VEC(8, 32);
+ case BuiltinType::SveInt8x3:
+ case BuiltinType::SveUint8x3:
+ return GET_SVE_INT_VEC(8, 48);
+ case BuiltinType::SveInt8x4:
+ case BuiltinType::SveUint8x4:
+ return GET_SVE_INT_VEC(8, 64);
+ case BuiltinType::SveInt16:
+ case BuiltinType::SveUint16:
+ return GET_SVE_INT_VEC(16, 8);
+ case BuiltinType::SveInt16x2:
+ case BuiltinType::SveUint16x2:
+ return GET_SVE_INT_VEC(16, 16);
+ case BuiltinType::SveInt16x3:
+ case BuiltinType::SveUint16x3:
+ return GET_SVE_INT_VEC(16, 24);
+ case BuiltinType::SveInt16x4:
+ case BuiltinType::SveUint16x4:
+ return GET_SVE_INT_VEC(16, 32);
+ case BuiltinType::SveInt32:
+ case BuiltinType::SveUint32:
+ return GET_SVE_INT_VEC(32, 4);
+ case BuiltinType::SveInt32x2:
+ case BuiltinType::SveUint32x2:
+ return GET_SVE_INT_VEC(32, 8);
+ case BuiltinType::SveInt32x3:
+ case BuiltinType::SveUint32x3:
+ return GET_SVE_INT_VEC(32, 12);
+ case BuiltinType::SveInt32x4:
+ case BuiltinType::SveUint32x4:
+ return GET_SVE_INT_VEC(32, 16);
+ case BuiltinType::SveInt64:
+ case BuiltinType::SveUint64:
+ return GET_SVE_INT_VEC(64, 2);
+ case BuiltinType::SveInt64x2:
+ case BuiltinType::SveUint64x2:
+ return GET_SVE_INT_VEC(64, 4);
+ case BuiltinType::SveInt64x3:
+ case BuiltinType::SveUint64x3:
+ return GET_SVE_INT_VEC(64, 6);
+ case BuiltinType::SveInt64x4:
+ case BuiltinType::SveUint64x4:
+ return GET_SVE_INT_VEC(64, 8);
+ case BuiltinType::SveBool:
+ return GET_SVE_INT_VEC(1, 16);
+#undef GET_SVE_INT_VEC
+#define GET_SVE_FP_VEC(TY, ISFP16, ELTS) \
+ llvm::ScalableVectorType::get( \
+ getTypeForFormat(getLLVMContext(), \
+ Context.getFloatTypeSemantics(Context.TY), \
+ /* UseNativeHalf = */ ISFP16), \
+ ELTS);
+ case BuiltinType::SveFloat16:
+ return GET_SVE_FP_VEC(HalfTy, true, 8);
+ case BuiltinType::SveFloat16x2:
+ return GET_SVE_FP_VEC(HalfTy, true, 16);
+ case BuiltinType::SveFloat16x3:
+ return GET_SVE_FP_VEC(HalfTy, true, 24);
+ case BuiltinType::SveFloat16x4:
+ return GET_SVE_FP_VEC(HalfTy, true, 32);
+ case BuiltinType::SveFloat32:
+ return GET_SVE_FP_VEC(FloatTy, false, 4);
+ case BuiltinType::SveFloat32x2:
+ return GET_SVE_FP_VEC(FloatTy, false, 8);
+ case BuiltinType::SveFloat32x3:
+ return GET_SVE_FP_VEC(FloatTy, false, 12);
+ case BuiltinType::SveFloat32x4:
+ return GET_SVE_FP_VEC(FloatTy, false, 16);
+ case BuiltinType::SveFloat64:
+ return GET_SVE_FP_VEC(DoubleTy, false, 2);
+ case BuiltinType::SveFloat64x2:
+ return GET_SVE_FP_VEC(DoubleTy, false, 4);
+ case BuiltinType::SveFloat64x3:
+ return GET_SVE_FP_VEC(DoubleTy, false, 6);
+ case BuiltinType::SveFloat64x4:
+ return GET_SVE_FP_VEC(DoubleTy, false, 8);
+ case BuiltinType::SveBFloat16:
+ return GET_SVE_FP_VEC(BFloat16Ty, false, 8);
+ case BuiltinType::SveBFloat16x2:
+ return GET_SVE_FP_VEC(BFloat16Ty, false, 16);
+ case BuiltinType::SveBFloat16x3:
+ return GET_SVE_FP_VEC(BFloat16Ty, false, 24);
+ case BuiltinType::SveBFloat16x4:
+ return GET_SVE_FP_VEC(BFloat16Ty, false, 32);
+#undef GET_SVE_FP_VEC
case BuiltinType::Dependent:
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
@@ -560,7 +658,11 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
llvm::Type *PointeeType = ConvertTypeForMem(ETy);
if (PointeeType->isVoidTy())
PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
- unsigned AS = Context.getTargetAddressSpace(ETy);
+
+ unsigned AS = PointeeType->isFunctionTy()
+ ? getDataLayout().getProgramAddressSpace()
+ : Context.getTargetAddressSpace(ETy);
+
ResultType = llvm::PointerType::get(PointeeType, AS);
break;
}
@@ -605,8 +707,15 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case Type::ExtVector:
case Type::Vector: {
const VectorType *VT = cast<VectorType>(Ty);
- ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()),
- VT->getNumElements());
+ ResultType = llvm::FixedVectorType::get(ConvertType(VT->getElementType()),
+ VT->getNumElements());
+ break;
+ }
+ case Type::ConstantMatrix: {
+ const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
+ ResultType =
+ llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
+ MT->getNumRows() * MT->getNumColumns());
break;
}
case Type::FunctionNoProto:
@@ -692,6 +801,11 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
break;
}
+ case Type::ExtInt: {
+ const auto &EIT = cast<ExtIntType>(Ty);
+ ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
+ break;
+ }
}
assert(ResultType && "Didn't convert a type?");
@@ -749,8 +863,8 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
}
// Layout fields.
- CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty);
- CGRecordLayouts[Key] = Layout;
+ std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
+ CGRecordLayouts[Key] = std::move(Layout);
// We're done laying out this struct.
bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
@@ -776,17 +890,18 @@ const CGRecordLayout &
CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
const Type *Key = Context.getTagDeclType(RD).getTypePtr();
- const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
- if (!Layout) {
- // Compute the type information.
- ConvertRecordDeclType(RD);
+ auto I = CGRecordLayouts.find(Key);
+ if (I != CGRecordLayouts.end())
+ return *I->second;
+ // Compute the type information.
+ ConvertRecordDeclType(RD);
- // Now try again.
- Layout = CGRecordLayouts.lookup(Key);
- }
+ // Now try again.
+ I = CGRecordLayouts.find(Key);
- assert(Layout && "Unable to find record layout information for type");
- return *Layout;
+ assert(I != CGRecordLayouts.end() &&
+ "Unable to find record layout information for type");
+ return *I->second;
}
bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
diff --git a/clang/lib/CodeGen/CodeGenTypes.h b/clang/lib/CodeGen/CodeGenTypes.h
index 03102329507e..f8f7542e4c83 100644
--- a/clang/lib/CodeGen/CodeGenTypes.h
+++ b/clang/lib/CodeGen/CodeGenTypes.h
@@ -75,7 +75,7 @@ class CodeGenTypes {
llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes;
/// Maps clang struct type with corresponding record layout info.
- llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+ llvm::DenseMap<const Type*, std::unique_ptr<CGRecordLayout>> CGRecordLayouts;
/// Contains the LLVM IR type for any converted RecordDecl.
llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
@@ -134,7 +134,7 @@ public:
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
- llvm::Type *ConvertTypeForMem(QualType T);
+ llvm::Type *ConvertTypeForMem(QualType T, bool ForBitField = false);
/// GetFunctionType - Get the LLVM function type for \arg Info.
llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
@@ -272,8 +272,8 @@ public:
RequiredArgs args);
/// Compute a new LLVM record layout object for the given record.
- CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
- llvm::StructType *Ty);
+ std::unique_ptr<CGRecordLayout> ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty);
/// addRecordTypeName - Compute a name from the given record decl with an
/// optional suffix and name the given LLVM type using it.
diff --git a/clang/lib/CodeGen/ConstantEmitter.h b/clang/lib/CodeGen/ConstantEmitter.h
index 121acbac4fa9..188b82e56f53 100644
--- a/clang/lib/CodeGen/ConstantEmitter.h
+++ b/clang/lib/CodeGen/ConstantEmitter.h
@@ -110,6 +110,8 @@ public:
llvm::Constant *tryEmitAbstract(const APValue &value, QualType T);
llvm::Constant *tryEmitAbstractForMemory(const APValue &value, QualType T);
+ llvm::Constant *tryEmitConstantExpr(const ConstantExpr *CE);
+
llvm::Constant *emitNullForMemory(QualType T) {
return emitNullForMemory(CGM, T);
}
diff --git a/clang/lib/CodeGen/ConstantInitBuilder.cpp b/clang/lib/CodeGen/ConstantInitBuilder.cpp
index 2d63d88020be..24e3ca19709c 100644
--- a/clang/lib/CodeGen/ConstantInitBuilder.cpp
+++ b/clang/lib/CodeGen/ConstantInitBuilder.cpp
@@ -128,8 +128,14 @@ void ConstantAggregateBuilderBase::addSize(CharUnits size) {
llvm::Constant *
ConstantAggregateBuilderBase::getRelativeOffset(llvm::IntegerType *offsetType,
llvm::Constant *target) {
+ return getRelativeOffsetToPosition(offsetType, target,
+ Builder.Buffer.size() - Begin);
+}
+
+llvm::Constant *ConstantAggregateBuilderBase::getRelativeOffsetToPosition(
+ llvm::IntegerType *offsetType, llvm::Constant *target, size_t position) {
// Compute the address of the relative-address slot.
- auto base = getAddrOfCurrentPosition(offsetType);
+ auto base = getAddrOfPosition(offsetType, position);
// Subtract.
base = llvm::ConstantExpr::getPtrToInt(base, Builder.CGM.IntPtrTy);
@@ -145,6 +151,20 @@ ConstantAggregateBuilderBase::getRelativeOffset(llvm::IntegerType *offsetType,
}
llvm::Constant *
+ConstantAggregateBuilderBase::getAddrOfPosition(llvm::Type *type,
+ size_t position) {
+ // Make a global variable. We will replace this with a GEP to this
+ // position after installing the initializer.
+ auto dummy = new llvm::GlobalVariable(Builder.CGM.getModule(), type, true,
+ llvm::GlobalVariable::PrivateLinkage,
+ nullptr, "");
+ Builder.SelfReferences.emplace_back(dummy);
+ auto &entry = Builder.SelfReferences.back();
+ (void)getGEPIndicesTo(entry.Indices, position + Begin);
+ return dummy;
+}
+
+llvm::Constant *
ConstantAggregateBuilderBase::getAddrOfCurrentPosition(llvm::Type *type) {
// Make a global variable. We will replace this with a GEP to this
// position after installing the initializer.
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index bdecff39c88f..78b268f423cb 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -13,10 +13,13 @@
#include "CoverageMappingGen.h"
#include "CodeGenFunction.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ProfileData/Coverage/CoverageMapping.h"
#include "llvm/ProfileData/Coverage/CoverageMappingReader.h"
#include "llvm/ProfileData/Coverage/CoverageMappingWriter.h"
@@ -24,6 +27,10 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+// This selects the coverage mapping format defined when `InstrProfData.inc`
+// is textually included.
+#define COVMAP_V3
+
using namespace clang;
using namespace CodeGen;
using namespace llvm::coverage;
@@ -901,6 +908,18 @@ struct CounterCoverageMappingBuilder
terminateRegion(S);
}
+ void VisitCoroutineBodyStmt(const CoroutineBodyStmt *S) {
+ extendRegion(S);
+ Visit(S->getBody());
+ }
+
+ void VisitCoreturnStmt(const CoreturnStmt *S) {
+ extendRegion(S);
+ if (S->getOperand())
+ Visit(S->getOperand());
+ terminateRegion(S);
+ }
+
void VisitCXXThrowExpr(const CXXThrowExpr *E) {
extendRegion(E);
if (E->getSubExpr())
@@ -1272,17 +1291,11 @@ struct CounterCoverageMappingBuilder
}
};
-std::string getCoverageSection(const CodeGenModule &CGM) {
- return llvm::getInstrProfSectionName(
- llvm::IPSK_covmap,
- CGM.getContext().getTargetInfo().getTriple().getObjectFormat());
-}
-
std::string normalizeFilename(StringRef Filename) {
llvm::SmallString<256> Path(Filename);
llvm::sys::fs::make_absolute(Path);
llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- return Path.str().str();
+ return std::string(Path);
}
} // end anonymous namespace
@@ -1317,30 +1330,71 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
}
}
-void CoverageMappingModuleGen::addFunctionMappingRecord(
- llvm::GlobalVariable *NamePtr, StringRef NameValue, uint64_t FuncHash,
- const std::string &CoverageMapping, bool IsUsed) {
+static std::string getInstrProfSection(const CodeGenModule &CGM,
+ llvm::InstrProfSectKind SK) {
+ return llvm::getInstrProfSectionName(
+ SK, CGM.getContext().getTargetInfo().getTriple().getObjectFormat());
+}
+
+void CoverageMappingModuleGen::emitFunctionMappingRecord(
+ const FunctionInfo &Info, uint64_t FilenamesRef) {
llvm::LLVMContext &Ctx = CGM.getLLVMContext();
- if (!FunctionRecordTy) {
+
+ // Assign a name to the function record. This is used to merge duplicates.
+ std::string FuncRecordName = "__covrec_" + llvm::utohexstr(Info.NameHash);
+
+ // A dummy description for a function included-but-not-used in a TU can be
+ // replaced by full description provided by a different TU. The two kinds of
+ // descriptions play distinct roles: therefore, assign them different names
+ // to prevent `linkonce_odr` merging.
+ if (Info.IsUsed)
+ FuncRecordName += "u";
+
+ // Create the function record type.
+ const uint64_t NameHash = Info.NameHash;
+ const uint64_t FuncHash = Info.FuncHash;
+ const std::string &CoverageMapping = Info.CoverageMapping;
#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) LLVMType,
- llvm::Type *FunctionRecordTypes[] = {
- #include "llvm/ProfileData/InstrProfData.inc"
- };
- FunctionRecordTy =
- llvm::StructType::get(Ctx, makeArrayRef(FunctionRecordTypes),
- /*isPacked=*/true);
- }
+ llvm::Type *FunctionRecordTypes[] = {
+#include "llvm/ProfileData/InstrProfData.inc"
+ };
+ auto *FunctionRecordTy =
+ llvm::StructType::get(Ctx, makeArrayRef(FunctionRecordTypes),
+ /*isPacked=*/true);
- #define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Init,
+ // Create the function record constant.
+#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Init,
llvm::Constant *FunctionRecordVals[] = {
#include "llvm/ProfileData/InstrProfData.inc"
};
- FunctionRecords.push_back(llvm::ConstantStruct::get(
- FunctionRecordTy, makeArrayRef(FunctionRecordVals)));
+ auto *FuncRecordConstant = llvm::ConstantStruct::get(
+ FunctionRecordTy, makeArrayRef(FunctionRecordVals));
+
+ // Create the function record global.
+ auto *FuncRecord = new llvm::GlobalVariable(
+ CGM.getModule(), FunctionRecordTy, /*isConstant=*/true,
+ llvm::GlobalValue::LinkOnceODRLinkage, FuncRecordConstant,
+ FuncRecordName);
+ FuncRecord->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ FuncRecord->setSection(getInstrProfSection(CGM, llvm::IPSK_covfun));
+ FuncRecord->setAlignment(llvm::Align(8));
+ if (CGM.supportsCOMDAT())
+ FuncRecord->setComdat(CGM.getModule().getOrInsertComdat(FuncRecordName));
+
+ // Make sure the data doesn't get deleted.
+ CGM.addUsedGlobal(FuncRecord);
+}
+
+void CoverageMappingModuleGen::addFunctionMappingRecord(
+ llvm::GlobalVariable *NamePtr, StringRef NameValue, uint64_t FuncHash,
+ const std::string &CoverageMapping, bool IsUsed) {
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ const uint64_t NameHash = llvm::IndexedInstrProf::ComputeHash(NameValue);
+ FunctionRecords.push_back({NameHash, FuncHash, CoverageMapping, IsUsed});
+
if (!IsUsed)
FunctionNames.push_back(
llvm::ConstantExpr::getBitCast(NamePtr, llvm::Type::getInt8PtrTy(Ctx)));
- CoverageMappings.push_back(CoverageMapping);
if (CGM.getCodeGenOpts().DumpCoverageMapping) {
// Dump the coverage mapping data for this function by decoding the
@@ -1385,37 +1439,22 @@ void CoverageMappingModuleGen::emit() {
FilenameRefs[I] = FilenameStrs[I];
}
- std::string FilenamesAndCoverageMappings;
- llvm::raw_string_ostream OS(FilenamesAndCoverageMappings);
- CoverageFilenamesSectionWriter(FilenameRefs).write(OS);
-
- // Stream the content of CoverageMappings to OS while keeping
- // memory consumption under control.
- size_t CoverageMappingSize = 0;
- for (auto &S : CoverageMappings) {
- CoverageMappingSize += S.size();
- OS << S;
- S.clear();
- S.shrink_to_fit();
- }
- CoverageMappings.clear();
- CoverageMappings.shrink_to_fit();
-
- size_t FilenamesSize = OS.str().size() - CoverageMappingSize;
- // Append extra zeroes if necessary to ensure that the size of the filenames
- // and coverage mappings is a multiple of 8.
- if (size_t Rem = OS.str().size() % 8) {
- CoverageMappingSize += 8 - Rem;
- OS.write_zeros(8 - Rem);
+ std::string Filenames;
+ {
+ llvm::raw_string_ostream OS(Filenames);
+ CoverageFilenamesSectionWriter(FilenameRefs).write(OS);
}
- auto *FilenamesAndMappingsVal =
- llvm::ConstantDataArray::getString(Ctx, OS.str(), false);
+ auto *FilenamesVal =
+ llvm::ConstantDataArray::getString(Ctx, Filenames, false);
+ const int64_t FilenamesRef = llvm::IndexedInstrProf::ComputeHash(Filenames);
- // Create the deferred function records array
- auto RecordsTy =
- llvm::ArrayType::get(FunctionRecordTy, FunctionRecords.size());
- auto RecordsVal = llvm::ConstantArray::get(RecordsTy, FunctionRecords);
+ // Emit the function records.
+ for (const FunctionInfo &Info : FunctionRecords)
+ emitFunctionMappingRecord(Info, FilenamesRef);
+ const unsigned NRecords = 0;
+ const size_t FilenamesSize = Filenames.size();
+ const unsigned CoverageMappingSize = 0;
llvm::Type *CovDataHeaderTypes[] = {
#define COVMAP_HEADER(Type, LLVMType, Name, Init) LLVMType,
#include "llvm/ProfileData/InstrProfData.inc"
@@ -1430,18 +1469,16 @@ void CoverageMappingModuleGen::emit() {
CovDataHeaderTy, makeArrayRef(CovDataHeaderVals));
// Create the coverage data record
- llvm::Type *CovDataTypes[] = {CovDataHeaderTy, RecordsTy,
- FilenamesAndMappingsVal->getType()};
+ llvm::Type *CovDataTypes[] = {CovDataHeaderTy, FilenamesVal->getType()};
auto CovDataTy = llvm::StructType::get(Ctx, makeArrayRef(CovDataTypes));
- llvm::Constant *TUDataVals[] = {CovDataHeaderVal, RecordsVal,
- FilenamesAndMappingsVal};
+ llvm::Constant *TUDataVals[] = {CovDataHeaderVal, FilenamesVal};
auto CovDataVal =
llvm::ConstantStruct::get(CovDataTy, makeArrayRef(TUDataVals));
auto CovData = new llvm::GlobalVariable(
- CGM.getModule(), CovDataTy, true, llvm::GlobalValue::InternalLinkage,
+ CGM.getModule(), CovDataTy, true, llvm::GlobalValue::PrivateLinkage,
CovDataVal, llvm::getCoverageMappingVarName());
- CovData->setSection(getCoverageSection(CGM));
+ CovData->setSection(getInstrProfSection(CGM, llvm::IPSK_covmap));
CovData->setAlignment(llvm::Align(8));
// Make sure the data doesn't get deleted.
diff --git a/clang/lib/CodeGen/CoverageMappingGen.h b/clang/lib/CodeGen/CoverageMappingGen.h
index 3bf51f590479..5d79d1e65670 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.h
+++ b/clang/lib/CodeGen/CoverageMappingGen.h
@@ -47,17 +47,27 @@ class CodeGenModule;
/// Organizes the cross-function state that is used while generating
/// code coverage mapping data.
class CoverageMappingModuleGen {
+ /// Information needed to emit a coverage record for a function.
+ struct FunctionInfo {
+ uint64_t NameHash;
+ uint64_t FuncHash;
+ std::string CoverageMapping;
+ bool IsUsed;
+ };
+
CodeGenModule &CGM;
CoverageSourceInfo &SourceInfo;
llvm::SmallDenseMap<const FileEntry *, unsigned, 8> FileEntries;
- std::vector<llvm::Constant *> FunctionRecords;
std::vector<llvm::Constant *> FunctionNames;
- llvm::StructType *FunctionRecordTy;
- std::vector<std::string> CoverageMappings;
+ std::vector<FunctionInfo> FunctionRecords;
+
+ /// Emit a function record.
+ void emitFunctionMappingRecord(const FunctionInfo &Info,
+ uint64_t FilenamesRef);
public:
CoverageMappingModuleGen(CodeGenModule &CGM, CoverageSourceInfo &SourceInfo)
- : CGM(CGM), SourceInfo(SourceInfo), FunctionRecordTy(nullptr) {}
+ : CGM(CGM), SourceInfo(SourceInfo) {}
CoverageSourceInfo &getSourceInfo() const {
return SourceInfo;
diff --git a/clang/lib/CodeGen/EHScopeStack.h b/clang/lib/CodeGen/EHScopeStack.h
index 0ed67aabcd62..3a640d6117d6 100644
--- a/clang/lib/CodeGen/EHScopeStack.h
+++ b/clang/lib/CodeGen/EHScopeStack.h
@@ -85,11 +85,6 @@ enum CleanupKind : unsigned {
NormalAndEHCleanup = EHCleanup | NormalCleanup,
- InactiveCleanup = 0x4,
- InactiveEHCleanup = EHCleanup | InactiveCleanup,
- InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
- InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup,
-
LifetimeMarker = 0x8,
NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
};
@@ -158,9 +153,10 @@ public:
/// Generation flags.
class Flags {
enum {
- F_IsForEH = 0x1,
+ F_IsForEH = 0x1,
F_IsNormalCleanupKind = 0x2,
- F_IsEHCleanupKind = 0x4
+ F_IsEHCleanupKind = 0x4,
+ F_HasExitSwitch = 0x8,
};
unsigned flags;
@@ -179,8 +175,10 @@ public:
/// cleanup.
bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
- };
+ bool hasExitSwitch() const { return flags & F_HasExitSwitch; }
+ void setHasExitSwitch() { flags |= F_HasExitSwitch; }
+ };
/// Emit the cleanup. For normal cleanups, this is run in the
/// same EH context as when the cleanup was pushed, i.e. the
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index b5b8702c551e..80de2a6e3950 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -203,7 +203,7 @@ public:
void EmitCXXConstructors(const CXXConstructorDecl *D) override;
- AddedStructorArgs
+ AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) override;
@@ -222,10 +222,17 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
- AddedStructorArgs
- addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
- CXXCtorType Type, bool ForVirtualBase,
- bool Delegating, CallArgList &Args) override;
+ AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
+ const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ bool ForVirtualBase,
+ bool Delegating) override;
+
+ llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ bool ForVirtualBase,
+ bool Delegating) override;
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
@@ -516,6 +523,22 @@ private:
}
bool canCallMismatchedFunctionType() const override { return false; }
};
+
+class XLCXXABI final : public ItaniumCXXABI {
+public:
+ explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
+ : ItaniumCXXABI(CGM) {}
+
+ void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::FunctionCallee dtor,
+ llvm::Constant *addr) override;
+
+ bool useSinitAndSterm() const override { return true; }
+
+private:
+ void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
+ llvm::Constant *addr);
+};
}
CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
@@ -546,6 +569,9 @@ CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
case TargetCXXABI::WebAssembly:
return new WebAssemblyCXXABI(CGM);
+ case TargetCXXABI::XL:
+ return new XLCXXABI(CGM);
+
case TargetCXXABI::GenericItanium:
if (CGM.getContext().getTargetInfo().getTriple().getArch()
== llvm::Triple::le32) {
@@ -670,6 +696,10 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGM.HasHiddenLTOVisibility(RD);
bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
CGM.HasHiddenLTOVisibility(RD);
+ bool ShouldEmitWPDInfo =
+ CGM.getCodeGenOpts().WholeProgramVTables &&
+ // Don't insert type tests if we are forcing public std visibility.
+ !CGM.HasLTOVisibilityPublicStd(RD);
llvm::Value *VirtualFn = nullptr;
{
@@ -677,16 +707,17 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
llvm::Value *TypeId = nullptr;
llvm::Value *CheckResult = nullptr;
- if (ShouldEmitCFICheck || ShouldEmitVFEInfo) {
- // If doing CFI or VFE, we will need the metadata node to check against.
+ if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
+ // If doing CFI, VFE or WPD, we will need the metadata node to check
+ // against.
llvm::Metadata *MD =
CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
}
- llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
-
if (ShouldEmitVFEInfo) {
+ llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
+
// If doing VFE, load from the vtable with a type.checked.load intrinsic
// call. Note that we use the GEP to calculate the address to load from
// and pass 0 as the offset to the intrinsic. This is because every
@@ -702,18 +733,30 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
} else {
// When not doing VFE, emit a normal load, as it allows more
// optimisations than type.checked.load.
- if (ShouldEmitCFICheck) {
+ if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
+ llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
CheckResult = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test),
{Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
}
- VFPAddr =
- Builder.CreateBitCast(VFPAddr, FTy->getPointerTo()->getPointerTo());
- VirtualFn = Builder.CreateAlignedLoad(VFPAddr, CGF.getPointerAlign(),
- "memptr.virtualfn");
+
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ VirtualFn = CGF.Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::load_relative,
+ {VTableOffset->getType()}),
+ {VTable, VTableOffset});
+ VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
+ } else {
+ llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset);
+ VFPAddr = CGF.Builder.CreateBitCast(
+ VFPAddr, FTy->getPointerTo()->getPointerTo());
+ VirtualFn = CGF.Builder.CreateAlignedLoad(
+ VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
+ }
}
assert(VirtualFn && "Virtual fuction pointer not created!");
- assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || CheckResult) &&
+ assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
+ CheckResult) &&
"Check result required but not created!");
if (ShouldEmitCFICheck) {
@@ -984,11 +1027,16 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
llvm::Constant *MemPtr[2];
if (MD->isVirtual()) {
uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
-
- const ASTContext &Context = getContext();
- CharUnits PointerWidth =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
+ uint64_t VTableOffset;
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // Multiply by 4-byte relative offsets.
+ VTableOffset = Index * 4;
+ } else {
+ const ASTContext &Context = getContext();
+ CharUnits PointerWidth = Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getPointerWidth(0));
+ VTableOffset = Index * PointerWidth.getQuantity();
+ }
if (UseARMMethodPtrABI) {
// ARM C++ ABI 3.2.1:
@@ -1402,8 +1450,19 @@ llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
llvm::Value *Value =
CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
- // Load the type info.
- Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // Load the type info.
+ Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
+ Value = CGF.Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
+ {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
+
+ // Setup to dereference again since this is a proxy we accessed.
+ Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
+ } else {
+ // Load the type info.
+ Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
+ }
return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
}
@@ -1459,28 +1518,37 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
Address ThisAddr,
QualType SrcRecordTy,
QualType DestTy) {
- llvm::Type *PtrDiffLTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
-
auto *ClassDecl =
cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
- // Get the vtable pointer.
- llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
- ClassDecl);
+ llvm::Value *OffsetToTop;
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // Get the vtable pointer.
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
- // Get the offset-to-top from the vtable.
- llvm::Value *OffsetToTop =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
- OffsetToTop =
- CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
- "offset.to.top");
+ // Get the offset-to-top from the vtable.
+ OffsetToTop =
+ CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U);
+ OffsetToTop = CGF.Builder.CreateAlignedLoad(
+ OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
+ } else {
+ llvm::Type *PtrDiffLTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+ // Get the vtable pointer.
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
+
+ // Get the offset-to-top from the vtable.
+ OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
+ OffsetToTop = CGF.Builder.CreateAlignedLoad(
+ OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
+ }
// Finally, add the offset to the pointer.
llvm::Value *Value = ThisAddr.getPointer();
Value = CGF.EmitCastToVoidPtr(Value);
Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
-
return CGF.Builder.CreateBitCast(Value, DestLTy);
}
@@ -1501,17 +1569,22 @@ ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
CharUnits VBaseOffsetOffset =
CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
BaseClassDecl);
-
llvm::Value *VBaseOffsetPtr =
CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
"vbase.offset.ptr");
- VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
- CGM.PtrDiffTy->getPointerTo());
-
- llvm::Value *VBaseOffset =
- CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
- "vbase.offset");
+ llvm::Value *VBaseOffset;
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ VBaseOffsetPtr =
+ CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
+ VBaseOffset = CGF.Builder.CreateAlignedLoad(
+ VBaseOffsetPtr, CharUnits::fromQuantity(4), "vbase.offset");
+ } else {
+ VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
+ CGM.PtrDiffTy->getPointerTo());
+ VBaseOffset = CGF.Builder.CreateAlignedLoad(
+ VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
+ }
return VBaseOffset;
}
@@ -1531,7 +1604,7 @@ void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
}
}
-CGCXXABI::AddedStructorArgs
+CGCXXABI::AddedStructorArgCounts
ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) {
ASTContext &Context = getContext();
@@ -1545,9 +1618,9 @@ ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
ArgTys.insert(ArgTys.begin() + 1,
Context.getPointerType(Context.VoidPtrTy));
- return AddedStructorArgs::prefix(1);
+ return AddedStructorArgCounts::prefix(1);
}
- return AddedStructorArgs{};
+ return AddedStructorArgCounts{};
}
void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
@@ -1613,9 +1686,9 @@ void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
}
-CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
+CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
- bool ForVirtualBase, bool Delegating, CallArgList &Args) {
+ bool ForVirtualBase, bool Delegating) {
if (!NeedsVTTParameter(GlobalDecl(D, Type)))
return AddedStructorArgs{};
@@ -1623,8 +1696,14 @@ CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
llvm::Value *VTT =
CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
- Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
- return AddedStructorArgs::prefix(1); // Added one arg.
+ return AddedStructorArgs::prefix({{VTT, VTTTy}});
+}
+
+llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
+ CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
+ bool ForVirtualBase, bool Delegating) {
+ GlobalDecl GD(DD, Type);
+ return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
}
void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
@@ -1633,7 +1712,8 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
bool Delegating, Address This,
QualType ThisTy) {
GlobalDecl GD(DD, Type);
- llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
+ llvm::Value *VTT =
+ getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
CGCallee Callee;
@@ -1660,10 +1740,11 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
// Create and set the initializer.
- ConstantInitBuilder Builder(CGM);
- auto Components = Builder.beginStruct();
- CGVT.createVTableInitializer(Components, VTLayout, RTTI);
- Components.finishAndSetAsInitializer(VTable);
+ ConstantInitBuilder builder(CGM);
+ auto components = builder.beginStruct();
+ CGVT.createVTableInitializer(components, VTLayout, RTTI,
+ llvm::GlobalValue::isLocalLinkage(Linkage));
+ components.finishAndSetAsInitializer(VTable);
// Set the correct linkage.
VTable->setLinkage(Linkage);
@@ -1687,6 +1768,9 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
if (!VTable->isDeclarationForLinker())
CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
+
+ if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
+ CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
}
bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
@@ -1776,7 +1860,9 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
// Use pointer alignment for the vtable. Otherwise we would align them based
// on the size of the initializer which doesn't make sense as only single
// values are read.
- unsigned PAlign = CGM.getTarget().getPointerAlign(0);
+ unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
+ ? 32
+ : CGM.getTarget().getPointerAlign(0);
VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, VTableType, llvm::GlobalValue::ExternalLinkage,
@@ -1793,9 +1879,9 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
Address This,
llvm::Type *Ty,
SourceLocation Loc) {
- Ty = Ty->getPointerTo()->getPointerTo();
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
- llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ This, Ty->getPointerTo()->getPointerTo(), MethodDecl->getParent());
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
llvm::Value *VFunc;
@@ -1806,10 +1892,21 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
} else {
CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
- llvm::Value *VFuncPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
- auto *VFuncLoad =
- CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
+ llvm::Value *VFuncLoad;
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
+ llvm::Value *Load = CGF.Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
+ {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
+ VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo());
+ } else {
+ VTable =
+ CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo()->getPointerTo());
+ llvm::Value *VTableSlotPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
+ VFuncLoad =
+ CGF.Builder.CreateAlignedLoad(VTableSlotPtr, CGF.getPointerAlign());
+ }
// Add !invariant.load md to virtual function load to indicate that
// function didn't change inside vtable.
@@ -1818,11 +1915,14 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
// the same virtual function loads from the same vtable load, which won't
// happen without enabled devirtualization with -fstrict-vtable-pointers.
if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
- CGM.getCodeGenOpts().StrictVTablePointers)
- VFuncLoad->setMetadata(
- llvm::LLVMContext::MD_invariant_load,
- llvm::MDNode::get(CGM.getLLVMContext(),
- llvm::ArrayRef<llvm::Metadata *>()));
+ CGM.getCodeGenOpts().StrictVTablePointers) {
+ if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
+ VFuncLoadInstr->setMetadata(
+ llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(CGM.getLLVMContext(),
+ llvm::ArrayRef<llvm::Metadata *>()));
+ }
+ }
VFunc = VFuncLoad;
}
@@ -1939,21 +2039,28 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
// Perform the virtual adjustment if we have one.
llvm::Value *ResultPtr;
if (VirtualAdjustment) {
- llvm::Type *PtrDiffTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
-
Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
+ llvm::Value *Offset;
llvm::Value *OffsetPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+ if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // Load the adjustment offset from the vtable as a 32-bit int.
+ OffsetPtr =
+ CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
+ Offset =
+ CGF.Builder.CreateAlignedLoad(OffsetPtr, CharUnits::fromQuantity(4));
+ } else {
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
- OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
-
- // Load the adjustment offset from the vtable.
- llvm::Value *Offset =
- CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
+ OffsetPtr =
+ CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
+ // Load the adjustment offset from the vtable.
+ Offset = CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
+ }
// Adjust our pointer.
ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
} else {
@@ -2438,7 +2545,7 @@ void CodeGenModule::registerGlobalDtorsWithAtExit() {
std::string GlobalInitFnName =
std::string("__GLOBAL_init_") + llvm::to_string(Priority);
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
- llvm::Function *GlobalInitFn = CreateGlobalInitOrDestructFunction(
+ llvm::Function *GlobalInitFn = CreateGlobalInitOrCleanUpFunction(
FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
SourceLocation());
ASTContext &Ctx = getContext();
@@ -2592,14 +2699,15 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
- SourceLocation(),
- /*TLS=*/true);
+ InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
+ SourceLocation(),
+ /*TLS=*/true);
llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
llvm::GlobalVariable::InternalLinkage,
llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
Guard->setThreadLocal(true);
+ Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
CharUnits GuardAlign = CharUnits::One();
Guard->setAlignment(GuardAlign.getAsAlign());
@@ -3008,6 +3116,7 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
+ case BuiltinType::BFloat16:
return false;
case BuiltinType::Dependent:
@@ -3200,9 +3309,11 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
llvm_unreachable("Pipe types shouldn't get here");
case Type::Builtin:
+ case Type::ExtInt:
// GCC treats vector and complex types as fundamental types.
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Complex:
case Type::Atomic:
// FIXME: GCC treats block pointers as fundamental types?!
@@ -3277,17 +3388,32 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
break;
}
- llvm::Constant *VTable =
- CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+ llvm::Constant *VTable = nullptr;
+
+ // Check if the alias exists. If it doesn't, then get or create the global.
+ if (CGM.getItaniumVTableContext().isRelativeLayout())
+ VTable = CGM.getModule().getNamedAlias(VTableName);
+ if (!VTable)
+ VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+
CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
llvm::Type *PtrDiffTy =
- CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
// The vtable address point is 2.
- llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
- VTable =
- llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
+ if (CGM.getItaniumVTableContext().isRelativeLayout()) {
+ // The vtable address point is 8 bytes after its start:
+ // 4 for the offset to top + 4 for the relative offset to rtti.
+ llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
+ VTable =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
+ } else {
+ llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
+ Two);
+ }
VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
Fields.push_back(VTable);
@@ -3438,6 +3564,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
case Type::Builtin:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Complex:
case Type::BlockPointer:
// Itanium C++ ABI 2.9.5p4:
@@ -3453,7 +3580,10 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
llvm_unreachable("Undeduced type shouldn't get here");
case Type::Pipe:
- llvm_unreachable("Pipe type shouldn't get here");
+ break;
+
+ case Type::ExtInt:
+ break;
case Type::ConstantArray:
case Type::IncompleteArray:
@@ -4401,3 +4531,70 @@ void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
ItaniumCXXABI::emitBeginCatch(CGF, C);
}
+
+/// Register a global destructor as best as we know how.
+void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::FunctionCallee dtor,
+ llvm::Constant *addr) {
+ if (D.getTLSKind() != VarDecl::TLS_None)
+ llvm::report_fatal_error("thread local storage not yet implemented on AIX");
+
+ // Create __dtor function for the var decl.
+ llvm::Function *dtorStub = CGF.createAtExitStub(D, dtor, addr);
+
+ // Register above __dtor with atexit().
+ CGF.registerGlobalDtorWithAtExit(dtorStub);
+
+ // Emit __finalize function to unregister __dtor and (as appropriate) call
+ // __dtor.
+ emitCXXStermFinalizer(D, dtorStub, addr);
+}
+
+void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
+ llvm::Constant *addr) {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
+ SmallString<256> FnName;
+ {
+ llvm::raw_svector_ostream Out(FnName);
+ getMangleContext().mangleDynamicStermFinalizer(&D, Out);
+ }
+
+ // Create the finalization action associated with a variable.
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
+ llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
+ FTy, FnName.str(), FI, D.getLocation());
+
+ CodeGenFunction CGF(CGM);
+
+ CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
+ FunctionArgList());
+
+ // The unatexit subroutine unregisters __dtor functions that were previously
+ // registered by the atexit subroutine. If the referenced function is found,
+ // the unatexit returns a value of 0, meaning that the cleanup is still
+ // pending (and we should call the __dtor function).
+ llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
+
+ llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
+
+ llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
+
+ // Check if unatexit returns a value of 0. If it does, jump to
+ // DestructCallBlock, otherwise jump to EndBlock directly.
+ CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
+
+ CGF.EmitBlock(DestructCallBlock);
+
+ // Emit the call to dtorStub.
+ llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
+
+ // Make sure the call and the callee agree on calling convention.
+ CI->setCallingConv(dtorStub->getCallingConv());
+
+ CGF.EmitBlock(EndBlock);
+
+ CGF.FinishFunction();
+
+ CGM.AddCXXStermFinalizerEntry(StermFinalizer);
+}
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index aff46135705a..45c6cb6b2e0d 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -206,7 +206,7 @@ public:
// lacks a definition for the destructor, non-base destructors must always
// delegate to or alias the base destructor.
- AddedStructorArgs
+ AddedStructorArgCounts
buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) override;
@@ -253,10 +253,17 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
- AddedStructorArgs
- addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
- CXXCtorType Type, bool ForVirtualBase,
- bool Delegating, CallArgList &Args) override;
+ AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
+ const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ bool ForVirtualBase,
+ bool Delegating) override;
+
+ llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ bool ForVirtualBase,
+ bool Delegating) override;
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
@@ -1261,10 +1268,10 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
}
}
-CGCXXABI::AddedStructorArgs
+CGCXXABI::AddedStructorArgCounts
MicrosoftCXXABI::buildStructorSignature(GlobalDecl GD,
SmallVectorImpl<CanQualType> &ArgTys) {
- AddedStructorArgs Added;
+ AddedStructorArgCounts Added;
// TODO: 'for base' flag
if (isa<CXXDestructorDecl>(GD.getDecl()) &&
GD.getDtorType() == Dtor_Deleting) {
@@ -1553,9 +1560,9 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
}
}
-CGCXXABI::AddedStructorArgs MicrosoftCXXABI::addImplicitConstructorArgs(
+CGCXXABI::AddedStructorArgs MicrosoftCXXABI::getImplicitConstructorArgs(
CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
- bool ForVirtualBase, bool Delegating, CallArgList &Args) {
+ bool ForVirtualBase, bool Delegating) {
assert(Type == Ctor_Complete || Type == Ctor_Base);
// Check if we need a 'most_derived' parameter.
@@ -1570,13 +1577,16 @@ CGCXXABI::AddedStructorArgs MicrosoftCXXABI::addImplicitConstructorArgs(
} else {
MostDerivedArg = llvm::ConstantInt::get(CGM.Int32Ty, Type == Ctor_Complete);
}
- RValue RV = RValue::get(MostDerivedArg);
if (FPT->isVariadic()) {
- Args.insert(Args.begin() + 1, CallArg(RV, getContext().IntTy));
- return AddedStructorArgs::prefix(1);
+ return AddedStructorArgs::prefix({{MostDerivedArg, getContext().IntTy}});
}
- Args.add(RV, getContext().IntTy);
- return AddedStructorArgs::suffix(1);
+ return AddedStructorArgs::suffix({{MostDerivedArg, getContext().IntTy}});
+}
+
+llvm::Value *MicrosoftCXXABI::getCXXDestructorImplicitParam(
+ CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
+ bool ForVirtualBase, bool Delegating) {
+ return nullptr;
}
void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
@@ -1605,8 +1615,11 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
BaseDtorEndBB = EmitDtorCompleteObjectHandler(CGF);
}
+ llvm::Value *Implicit =
+ getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase,
+ Delegating); // = nullptr
CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
- /*ImplicitParam=*/nullptr,
+ /*ImplicitParam=*/Implicit,
/*ImplicitParamTy=*/QualType(), nullptr);
if (BaseDtorEndBB) {
// Complete object handler should continue to be the remaining
@@ -1621,6 +1634,15 @@ void MicrosoftCXXABI::emitVTableTypeMetadata(const VPtrInfo &Info,
if (!CGM.getCodeGenOpts().LTOUnit)
return;
+ // TODO: Should VirtualFunctionElimination also be supported here?
+ // See similar handling in CodeGenModule::EmitVTableTypeMetadata.
+ if (CGM.getCodeGenOpts().WholeProgramVTables) {
+ llvm::GlobalObject::VCallVisibility TypeVis =
+ CGM.GetVCallVisibilityLevel(RD);
+ if (TypeVis != llvm::GlobalObject::VCallVisibilityPublic)
+ VTable->setVCallVisibilityMetadata(TypeVis);
+ }
+
// The location of the first virtual function pointer in the virtual table,
// aka the "address point" on Itanium. This is at offset 0 if RTTI is
// disabled, or sizeof(void*) if RTTI is enabled.
@@ -1681,10 +1703,11 @@ void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
[](const VTableComponent &VTC) { return VTC.isRTTIKind(); }))
RTTI = getMSCompleteObjectLocator(RD, *Info);
- ConstantInitBuilder Builder(CGM);
- auto Components = Builder.beginStruct();
- CGVT.createVTableInitializer(Components, VTLayout, RTTI);
- Components.finishAndSetAsInitializer(VTable);
+ ConstantInitBuilder builder(CGM);
+ auto components = builder.beginStruct();
+ CGVT.createVTableInitializer(components, VTLayout, RTTI,
+ VTable->hasLocalLinkage());
+ components.finishAndSetAsInitializer(VTable);
emitVTableTypeMetadata(*Info, RD, VTable);
}
@@ -2341,7 +2364,7 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
if (!NonComdatInits.empty()) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- llvm::Function *InitFunc = CGM.CreateGlobalInitOrDestructFunction(
+ llvm::Function *InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(
FTy, "__tls_init", CGM.getTypes().arrangeNullaryFunction(),
SourceLocation(), /*TLS=*/true);
CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, NonComdatInits);
@@ -2515,7 +2538,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
GuardVar->setComdat(
CGM.getModule().getOrInsertComdat(GuardVar->getName()));
if (D.getTLSKind())
- GuardVar->setThreadLocal(true);
+ CGM.setTLSMode(GuardVar, D);
if (GI && !HasPerVariableGuard)
GI->Guard = GuardVar;
}
@@ -3913,7 +3936,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
// Calculate the mangled name.
SmallString<256> ThunkName;
llvm::raw_svector_ostream Out(ThunkName);
- getMangleContext().mangleCXXCtor(CD, CT, Out);
+ getMangleContext().mangleName(GlobalDecl(CD, CT), Out);
// If the thunk has been generated previously, just return it.
if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
@@ -4000,7 +4023,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
CGF.EmitCallArgs(Args, FPT, llvm::makeArrayRef(ArgVec), CD, IsCopy ? 1 : 0);
// Insert any ABI-specific implicit constructor arguments.
- AddedStructorArgs ExtraArgs =
+ AddedStructorArgCounts ExtraArgs =
addImplicitConstructorArgs(CGF, CD, Ctor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false, Args);
diff --git a/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index 284e8022a3c4..0c7e5f4598f8 100644
--- a/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -173,8 +173,8 @@ public:
// Prepare CGDebugInfo to emit debug info for a clang module.
auto *DI = Builder->getModuleDebugInfo();
StringRef ModuleName = llvm::sys::path::filename(MainFileName);
- DI->setPCHDescriptor({ModuleName, "", OutputFileName,
- ASTFileSignature{{{~0U, ~0U, ~0U, ~0U, ~1U}}}});
+ DI->setPCHDescriptor(
+ {ModuleName, "", OutputFileName, ASTFileSignature::createDISentinel()});
DI->setModuleMap(MMap);
}
diff --git a/clang/lib/CodeGen/PatternInit.cpp b/clang/lib/CodeGen/PatternInit.cpp
index 3410c7f21533..26ac8b63a9ba 100644
--- a/clang/lib/CodeGen/PatternInit.cpp
+++ b/clang/lib/CodeGen/PatternInit.cpp
@@ -8,6 +8,7 @@
#include "PatternInit.h"
#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Type.h"
@@ -33,17 +34,15 @@ llvm::Constant *clang::CodeGen::initializationPatternFor(CodeGenModule &CGM,
constexpr bool NegativeNaN = true;
constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull;
if (Ty->isIntOrIntVectorTy()) {
- unsigned BitWidth = cast<llvm::IntegerType>(
- Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
- ->getBitWidth();
+ unsigned BitWidth =
+ cast<llvm::IntegerType>(Ty->getScalarType())->getBitWidth();
if (BitWidth <= 64)
return llvm::ConstantInt::get(Ty, IntValue);
return llvm::ConstantInt::get(
Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, IntValue)));
}
if (Ty->isPtrOrPtrVectorTy()) {
- auto *PtrTy = cast<llvm::PointerType>(
- Ty->isVectorTy() ? Ty->getVectorElementType() : Ty);
+ auto *PtrTy = cast<llvm::PointerType>(Ty->getScalarType());
unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth(
PtrTy->getAddressSpace());
if (PtrWidth > 64)
@@ -54,8 +53,7 @@ llvm::Constant *clang::CodeGen::initializationPatternFor(CodeGenModule &CGM,
}
if (Ty->isFPOrFPVectorTy()) {
unsigned BitWidth = llvm::APFloat::semanticsSizeInBits(
- (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
- ->getFltSemantics());
+ Ty->getScalarType()->getFltSemantics());
llvm::APInt Payload(64, NaNPayload);
if (BitWidth >= 64)
Payload = llvm::APInt::getSplat(BitWidth, Payload);
diff --git a/clang/lib/CodeGen/SanitizerMetadata.cpp b/clang/lib/CodeGen/SanitizerMetadata.cpp
index 24ae6c6e362f..cdf83370c41f 100644
--- a/clang/lib/CodeGen/SanitizerMetadata.cpp
+++ b/clang/lib/CodeGen/SanitizerMetadata.cpp
@@ -13,6 +13,7 @@
#include "CodeGenModule.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Type.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Constants.h"
@@ -30,16 +31,16 @@ static bool isAsanHwasanOrMemTag(const SanitizerSet& SS) {
void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
SourceLocation Loc, StringRef Name,
QualType Ty, bool IsDynInit,
- bool IsBlacklisted) {
+ bool IsExcluded) {
if (!isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
return;
IsDynInit &= !CGM.isInSanitizerBlacklist(GV, Loc, Ty, "init");
- IsBlacklisted |= CGM.isInSanitizerBlacklist(GV, Loc, Ty);
+ IsExcluded |= CGM.isInSanitizerBlacklist(GV, Loc, Ty);
llvm::Metadata *LocDescr = nullptr;
llvm::Metadata *GlobalName = nullptr;
llvm::LLVMContext &VMContext = CGM.getLLVMContext();
- if (!IsBlacklisted) {
+ if (!IsExcluded) {
// Don't generate source location and global name if it is blacklisted -
// it won't be instrumented anyway.
LocDescr = getLocationMetadata(Loc);
@@ -52,7 +53,7 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsDynInit)),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt1Ty(VMContext), IsBlacklisted))};
+ llvm::Type::getInt1Ty(VMContext), IsExcluded))};
llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalMetadata);
llvm::NamedMDNode *AsanGlobals =
@@ -68,12 +69,12 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
llvm::raw_string_ostream OS(QualName);
D.printQualifiedName(OS);
- bool IsBlacklisted = false;
+ bool IsExcluded = false;
for (auto Attr : D.specific_attrs<NoSanitizeAttr>())
if (Attr->getMask() & SanitizerKind::Address)
- IsBlacklisted = true;
+ IsExcluded = true;
reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit,
- IsBlacklisted);
+ IsExcluded);
}
void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
diff --git a/clang/lib/CodeGen/SanitizerMetadata.h b/clang/lib/CodeGen/SanitizerMetadata.h
index 7ffac4360d9c..440a54590acc 100644
--- a/clang/lib/CodeGen/SanitizerMetadata.h
+++ b/clang/lib/CodeGen/SanitizerMetadata.h
@@ -40,7 +40,7 @@ public:
bool IsDynInit = false);
void reportGlobalToASan(llvm::GlobalVariable *GV, SourceLocation Loc,
StringRef Name, QualType Ty, bool IsDynInit = false,
- bool IsBlacklisted = false);
+ bool IsExcluded = false);
void disableSanitizerForGlobal(llvm::GlobalVariable *GV);
void disableSanitizerForInstruction(llvm::Instruction *I);
private:
diff --git a/clang/lib/CodeGen/SwiftCallingConv.cpp b/clang/lib/CodeGen/SwiftCallingConv.cpp
index 8bce93b71c0c..3d7421ac2e16 100644
--- a/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -694,7 +694,7 @@ swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
// Try to split the vector type in half.
if (numElts >= 4 && isPowerOf2(numElts)) {
if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
- return {llvm::VectorType::get(eltTy, numElts / 2), 2};
+ return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
}
return {eltTy, numElts};
@@ -747,7 +747,8 @@ void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
// Add the right number of vectors of this size.
auto numVecs = numElts >> logCandidateNumElts;
- components.append(numVecs, llvm::VectorType::get(eltTy, candidateNumElts));
+ components.append(numVecs,
+ llvm::FixedVectorType::get(eltTy, candidateNumElts));
numElts -= (numVecs << logCandidateNumElts);
if (numElts == 0) return;
@@ -757,7 +758,7 @@ void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
// This only needs to be separately checked if it's not a power of 2.
if (numElts > 2 && !isPowerOf2(numElts) &&
isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
- components.push_back(llvm::VectorType::get(eltTy, numElts));
+ components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
return;
}
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 682ef18da73b..9cd63ebe29ee 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
#include "llvm/ADT/SmallBitVector.h"
@@ -28,6 +29,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm> // std::sort
@@ -96,6 +98,17 @@ Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
return Address::invalid();
}
+bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
+ if (Ty->isPromotableIntegerType())
+ return true;
+
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
+ return true;
+
+ return false;
+}
+
ABIInfo::~ABIInfo() {}
/// Does the given lowering require more than the given number of
@@ -384,7 +397,7 @@ static Address emitMergePHI(CodeGenFunction &CGF,
return Address(PHI, Align);
}
-TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+TargetCodeGenInfo::~TargetCodeGenInfo() = default;
// If someone can figure out a general rule for this, that would be great.
// It's probably just doomed to be platform-dependent, though.
@@ -486,11 +499,15 @@ static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
// Constant arrays of empty records count as empty, strip them off.
// Constant arrays of zero length always count as empty.
+ bool WasArray = false;
if (AllowArrays)
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
if (AT->getSize() == 0)
return true;
FT = AT->getElementType();
+ // The [[no_unique_address]] special case below does not apply to
+ // arrays of C++ empty records, so we need to remember this fact.
+ WasArray = true;
}
const RecordType *RT = FT->getAs<RecordType>();
@@ -501,7 +518,14 @@ static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
//
// FIXME: We should use a predicate for whether this behavior is true in the
// current ABI.
- if (isa<CXXRecordDecl>(RT->getDecl()))
+ //
+ // The exception to the above rule are fields marked with the
+ // [[no_unique_address]] attribute (since C++20). Those do count as empty
+ // according to the Itanium ABI. The exception applies only to records,
+ // not arrays of records, so we must also check whether we stripped off an
+ // array type above.
+ if (isa<CXXRecordDecl>(RT->getDecl()) &&
+ (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
return false;
return isEmptyRecord(Context, FT, AllowArrays);
@@ -681,7 +705,7 @@ public:
class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
public:
DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
};
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
@@ -700,8 +724,16 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ ASTContext &Context = getContext();
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() >
+ Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
+ ? Context.Int128Ty
+ : Context.LongLongTy))
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
@@ -715,8 +747,15 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() >
+ getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
+ ? getContext().Int128Ty
+ : getContext().LongLongTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
//===----------------------------------------------------------------------===//
@@ -726,11 +765,19 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
//===----------------------------------------------------------------------===//
class WebAssemblyABIInfo final : public SwiftABIInfo {
+public:
+ enum ABIKind {
+ MVP = 0,
+ ExperimentalMV = 1,
+ };
+
+private:
DefaultABIInfo defaultInfo;
+ ABIKind Kind;
public:
- explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
- : SwiftABIInfo(CGT), defaultInfo(CGT) {}
+ explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
+ : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
private:
ABIArgInfo classifyReturnType(QualType RetTy) const;
@@ -761,8 +808,9 @@ private:
class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
public:
- explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
+ explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ WebAssemblyABIInfo::ABIKind K)
+ : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
@@ -813,6 +861,20 @@ ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
// though watch out for things like bitfields.
if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ // For the experimental multivalue ABI, fully expand all other aggregates
+ if (Kind == ABIKind::ExperimentalMV) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ assert(RT);
+ bool HasBitField = false;
+ for (auto *Field : RT->getDecl()->fields()) {
+ if (Field->isBitField()) {
+ HasBitField = true;
+ break;
+ }
+ }
+ if (!HasBitField)
+ return ABIArgInfo::getExpand();
+ }
}
// Otherwise just do the default thing.
@@ -832,6 +894,9 @@ ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
// ABIArgInfo::getDirect().
if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ // For the experimental multivalue ABI, return all other aggregates
+ if (Kind == ABIKind::ExperimentalMV)
+ return ABIArgInfo::getDirect();
}
}
@@ -871,8 +936,8 @@ class PNaClABIInfo : public ABIInfo {
class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
+ PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
};
void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -906,10 +971,15 @@ ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
} else if (Ty->isFloatingType()) {
// Floating-point types don't go inreg.
return ABIArgInfo::getDirect();
+ } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ // Treat extended integers as integers if <=64, otherwise pass indirectly.
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(Ty);
+ return ABIArgInfo::getDirect();
}
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
@@ -920,12 +990,19 @@ ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
if (isAggregateTypeForABI(RetTy))
return getNaturalAlignIndirect(RetTy);
+ // Treat extended integers as integers if <=64, otherwise pass indirectly.
+ if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(RetTy);
+ return ABIArgInfo::getDirect();
+ }
+
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
/// IsX86_MMXType - Return true if this is an MMX type.
@@ -943,7 +1020,8 @@ static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
.Cases("y", "&y", "^Ym", true)
.Default(false);
if (IsMMXCons && Ty->isVectorTy()) {
- if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
+ if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
+ 64) {
// Invalid MMX constraint
return nullptr;
}
@@ -1112,7 +1190,7 @@ public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
bool RetSmallStructInRegABI, bool Win32StructABI,
unsigned NumRegisterParameters, bool SoftFloatABI)
- : TargetCodeGenInfo(new X86_32ABIInfo(
+ : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
NumRegisterParameters, SoftFloatABI)) {}
@@ -1412,8 +1490,8 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
// registers and we need to make sure to pick a type the LLVM
// backend will like.
if (Size == 128)
- return ABIArgInfo::getDirect(llvm::VectorType::get(
- llvm::Type::getInt64Ty(getVMContext()), 2));
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
// Always return in register if it fits in a general purpose
// register, or if it is 64 bits and has a single element.
@@ -1470,15 +1548,19 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectReturnResult(RetTy, State);
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
-static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
+static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
}
-static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
+static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return 0;
@@ -1487,16 +1569,16 @@ static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
for (const auto &I : CXXRD->bases())
- if (!isRecordWithSSEVectorType(Context, I.getType()))
+ if (!isRecordWithSIMDVectorType(Context, I.getType()))
return false;
for (const auto *i : RD->fields()) {
QualType FT = i->getType();
- if (isSSEVectorType(Context, FT))
+ if (isSIMDVectorType(Context, FT))
return true;
- if (isRecordWithSSEVectorType(Context, FT))
+ if (isRecordWithSIMDVectorType(Context, FT))
return true;
}
@@ -1517,8 +1599,8 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
}
// Otherwise, if the type contains an SSE vector type, the alignment is 16.
- if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
- isRecordWithSSEVectorType(getContext(), Ty)))
+ if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
+ isRecordWithSIMDVectorType(getContext(), Ty)))
return 16;
return MinABIStackAlignInBytes;
@@ -1661,7 +1743,7 @@ void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) c
isHomogeneousAggregate(Ty, Base, NumElts)) {
if (State.FreeSSERegs >= NumElts) {
State.FreeSSERegs -= NumElts;
- Args[I].info = ABIArgInfo::getDirect();
+ Args[I].info = ABIArgInfo::getDirectInReg();
State.IsPreassigned.set(I);
}
}
@@ -1676,6 +1758,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
Ty = useFirstFieldIfTransparentUnion(Ty);
+ TypeInfo TI = getContext().getTypeInfo(Ty);
// Check with the C++ ABI first.
const RecordType *RT = Ty->getAs<RecordType>();
@@ -1725,7 +1808,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
bool NeedsPadding = false;
bool InReg;
if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
- unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ unsigned SizeInRegs = (TI.Width + 31) / 32;
SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
if (InReg)
@@ -1735,14 +1818,19 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
}
llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
+ // Pass over-aligned aggregates on Windows indirectly. This behavior was
+ // added in MSVC 2015.
+ if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32)
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+
// Expand small (<= 128-bit) record types when we know that the stack layout
// of those arguments will match the struct. This is important because the
// LLVM backend isn't smart enough to remove byval, which inhibits many
// optimizations.
// Don't do this for the MCU if there are still free integer registers
// (see X86_64 ABI for full explanation).
- if (getContext().getTypeSize(Ty) <= 4 * 32 &&
- (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
+ if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
+ canExpandIndirectArgument(Ty))
return ABIArgInfo::getExpandWithPadding(
IsFastCall || IsVectorCall || IsRegCall, PaddingType);
@@ -1750,14 +1838,24 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // On Windows, vectors are passed directly if registers are available, or
+ // indirectly if not. This avoids the need to align argument memory. Pass
+ // user-defined vector types larger than 512 bits indirectly for simplicity.
+ if (IsWin32StructABI) {
+ if (TI.Width <= 512 && State.FreeSSERegs > 0) {
+ --State.FreeSSERegs;
+ return ABIArgInfo::getDirectInReg();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
// On Darwin, some vectors are passed in memory, we handle this by passing
// it as an i8/i16/i32/i64.
if (IsDarwinVectorABI) {
- uint64_t Size = getContext().getTypeSize(Ty);
- if ((Size == 8 || Size == 16 || Size == 32) ||
- (Size == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
- Size));
+ if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
+ (TI.Width == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), TI.Width));
}
if (IsX86_MMXType(CGT.ConvertType(Ty)))
@@ -1772,12 +1870,21 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
bool InReg = shouldPrimitiveUseInReg(Ty, State);
- if (Ty->isPromotableIntegerType()) {
+ if (isPromotableIntegerTypeForABI(Ty)) {
if (InReg)
return ABIArgInfo::getExtendInReg(Ty);
return ABIArgInfo::getExtend(Ty);
}
+ if (const auto * EIT = Ty->getAs<ExtIntType>()) {
+ if (EIT->getNumBits() <= 64) {
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
if (InReg)
return ABIArgInfo::getDirectInReg();
return ABIArgInfo::getDirect();
@@ -1787,9 +1894,10 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
CCState State(FI);
if (IsMCUABI)
State.FreeRegs = 3;
- else if (State.CC == llvm::CallingConv::X86_FastCall)
+ else if (State.CC == llvm::CallingConv::X86_FastCall) {
State.FreeRegs = 2;
- else if (State.CC == llvm::CallingConv::X86_VectorCall) {
+ State.FreeSSERegs = 3;
+ } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
State.FreeRegs = 2;
State.FreeSSERegs = 6;
} else if (FI.getHasRegParm())
@@ -1797,6 +1905,11 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
else if (State.CC == llvm::CallingConv::X86_RegCall) {
State.FreeRegs = 5;
State.FreeSSERegs = 8;
+ } else if (IsWin32StructABI) {
+ // Since MSVC 2015, the first three SSE vectors have been passed in
+ // registers. The rest are passed indirectly.
+ State.FreeRegs = DefaultNumRegisterParameters;
+ State.FreeSSERegs = 3;
} else
State.FreeRegs = DefaultNumRegisterParameters;
@@ -1843,16 +1956,25 @@ X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
CharUnits &StackOffset, ABIArgInfo &Info,
QualType Type) const {
// Arguments are always 4-byte-aligned.
- CharUnits FieldAlign = CharUnits::fromQuantity(4);
+ CharUnits WordSize = CharUnits::fromQuantity(4);
+ assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
- assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
- Info = ABIArgInfo::getInAlloca(FrameFields.size());
- FrameFields.push_back(CGT.ConvertTypeForMem(Type));
- StackOffset += getContext().getTypeSizeInChars(Type);
+ // sret pointers and indirect things will require an extra pointer
+ // indirection, unless they are byval. Most things are byval, and will not
+ // require this indirection.
+ bool IsIndirect = false;
+ if (Info.isIndirect() && !Info.getIndirectByVal())
+ IsIndirect = true;
+ Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
+ llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
+ if (IsIndirect)
+ LLTy = LLTy->getPointerTo(0);
+ FrameFields.push_back(LLTy);
+ StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
// Insert padding bytes to respect alignment.
CharUnits FieldEnd = StackOffset;
- StackOffset = FieldEnd.alignTo(FieldAlign);
+ StackOffset = FieldEnd.alignTo(WordSize);
if (StackOffset != FieldEnd) {
CharUnits NumBytes = StackOffset - FieldEnd;
llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
@@ -1866,16 +1988,12 @@ static bool isArgInAlloca(const ABIArgInfo &Info) {
switch (Info.getKind()) {
case ABIArgInfo::InAlloca:
return true;
- case ABIArgInfo::Indirect:
- assert(Info.getIndirectByVal());
- return true;
case ABIArgInfo::Ignore:
return false;
+ case ABIArgInfo::Indirect:
case ABIArgInfo::Direct:
case ABIArgInfo::Extend:
- if (Info.getInReg())
- return false;
- return true;
+ return !Info.getInReg();
case ABIArgInfo::Expand:
case ABIArgInfo::CoerceAndExpand:
// These are aggregate types which are never passed in registers when
@@ -1909,8 +2027,7 @@ void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
// Put the sret parameter into the inalloca struct if it's in memory.
if (Ret.isIndirect() && !Ret.getInReg()) {
- CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
- addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
+ addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
// On Windows, the hidden sret parameter is always returned in eax.
Ret.setInAllocaSRet(IsWin32StructABI);
}
@@ -2207,7 +2324,7 @@ public:
if (info.isDirect()) {
llvm::Type *ty = info.getCoerceToType();
if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
- return (vectorTy->getBitWidth() > 128);
+ return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
}
return false;
}
@@ -2280,7 +2397,7 @@ private:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
+ : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {}
const X86_64ABIInfo &getABIInfo() const {
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
@@ -2361,8 +2478,110 @@ public:
}
}
}
+
+ void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args) const override;
};
+static void initFeatureMaps(const ASTContext &Ctx,
+ llvm::StringMap<bool> &CallerMap,
+ const FunctionDecl *Caller,
+ llvm::StringMap<bool> &CalleeMap,
+ const FunctionDecl *Callee) {
+ if (CalleeMap.empty() && CallerMap.empty()) {
+ // The caller is potentially nullptr in the case where the call isn't in a
+ // function. In this case, the getFunctionFeatureMap ensures we just get
+ // the TU level setting (since it cannot be modified by 'target'..
+ Ctx.getFunctionFeatureMap(CallerMap, Caller);
+ Ctx.getFunctionFeatureMap(CalleeMap, Callee);
+ }
+}
+
+static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
+ SourceLocation CallLoc,
+ const llvm::StringMap<bool> &CallerMap,
+ const llvm::StringMap<bool> &CalleeMap,
+ QualType Ty, StringRef Feature,
+ bool IsArgument) {
+ bool CallerHasFeat = CallerMap.lookup(Feature);
+ bool CalleeHasFeat = CalleeMap.lookup(Feature);
+ if (!CallerHasFeat && !CalleeHasFeat)
+ return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
+ << IsArgument << Ty << Feature;
+
+ // Mixing calling conventions here is very clearly an error.
+ if (!CallerHasFeat || !CalleeHasFeat)
+ return Diag.Report(CallLoc, diag::err_avx_calling_convention)
+ << IsArgument << Ty << Feature;
+
+ // Else, both caller and callee have the required feature, so there is no need
+ // to diagnose.
+ return false;
+}
+
+static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
+ SourceLocation CallLoc,
+ const llvm::StringMap<bool> &CallerMap,
+ const llvm::StringMap<bool> &CalleeMap, QualType Ty,
+ bool IsArgument) {
+ uint64_t Size = Ctx.getTypeSize(Ty);
+ if (Size > 256)
+ return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
+ "avx512f", IsArgument);
+
+ if (Size > 128)
+ return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
+ IsArgument);
+
+ return false;
+}
+
+void X86_64TargetCodeGenInfo::checkFunctionCallABI(
+ CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
+ const FunctionDecl *Callee, const CallArgList &Args) const {
+ llvm::StringMap<bool> CallerMap;
+ llvm::StringMap<bool> CalleeMap;
+ unsigned ArgIndex = 0;
+
+ // We need to loop through the actual call arguments rather than the the
+ // function's parameters, in case this variadic.
+ for (const CallArg &Arg : Args) {
+ // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
+ // additionally changes how vectors >256 in size are passed. Like GCC, we
+ // warn when a function is called with an argument where this will change.
+ // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
+ // the caller and callee features are mismatched.
+ // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
+ // change its ABI with attribute-target after this call.
+ if (Arg.getType()->isVectorType() &&
+ CGM.getContext().getTypeSize(Arg.getType()) > 128) {
+ initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
+ QualType Ty = Arg.getType();
+ // The CallArg seems to have desugared the type already, so for clearer
+ // diagnostics, replace it with the type in the FunctionDecl if possible.
+ if (ArgIndex < Callee->getNumParams())
+ Ty = Callee->getParamDecl(ArgIndex)->getType();
+
+ if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
+ CalleeMap, Ty, /*IsArgument*/ true))
+ return;
+ }
+ ++ArgIndex;
+ }
+
+ // Check return always, as we don't have a good way of knowing in codegen
+ // whether this value is used, tail-called, etc.
+ if (Callee->getReturnType()->isVectorType() &&
+ CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
+ initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
+ checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
+ CalleeMap, Callee->getReturnType(),
+ /*IsArgument*/ false);
+ }
+}
+
static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
// If the argument does not end in .lib, automatically add the suffix.
// If the argument contains a space, enclose it in quotes.
@@ -2424,7 +2643,7 @@ class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
- : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT, AVXLevel)) {}
+ : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override;
@@ -2731,6 +2950,15 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
return;
}
+ if (const auto *EITy = Ty->getAs<ExtIntType>()) {
+ if (EITy->getNumBits() <= 64)
+ Current = Integer;
+ else if (EITy->getNumBits() <= 128)
+ Lo = Hi = Integer;
+ // Larger values need to get passed in memory.
+ return;
+ }
+
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
// Arrays are treated like structures.
@@ -2905,8 +3133,11 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ if (Ty->isExtIntType())
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
return getNaturalAlignIndirect(Ty);
@@ -2938,13 +3169,14 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
// the argument in the free register. This does not seem to happen currently,
// but this code would be much safer if we could mark the argument with
// 'onstack'. See PR12193.
- if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
+ !Ty->isExtIntType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
@@ -3001,11 +3233,11 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
// Don't pass vXi128 vectors in their native type, the backend can't
// legalize them.
if (passInt128VectorsInMem() &&
- IRType->getVectorElementType()->isIntegerTy(128)) {
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
// Use a vXi64 vector.
uint64_t Size = getContext().getTypeSize(Ty);
- return llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()),
- Size / 64);
+ return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
+ Size / 64);
}
return IRType;
@@ -3020,8 +3252,8 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
// Return a LLVM IR vector type based on the size of 'Ty'.
- return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
- Size / 64);
+ return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
+ Size / 64);
}
/// BitsContainNoUserData - Return true if the specified [start,end) bit range
@@ -3155,7 +3387,8 @@ GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
// case.
if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
- return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
+ return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()),
+ 2);
return llvm::Type::getDoubleTy(getVMContext());
}
@@ -3326,7 +3559,7 @@ classifyReturnType(QualType RetTy) const {
RetTy = EnumTy->getDecl()->getIntegerType();
if (RetTy->isIntegralOrEnumerationType() &&
- RetTy->isPromotableIntegerType())
+ isPromotableIntegerTypeForABI(RetTy))
return ABIArgInfo::getExtend(RetTy);
}
break;
@@ -3471,7 +3704,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(
Ty = EnumTy->getDecl()->getIntegerType();
if (Ty->isIntegralOrEnumerationType() &&
- Ty->isPromotableIntegerType())
+ isPromotableIntegerTypeForABI(Ty))
return ABIArgInfo::getExtend(Ty);
}
@@ -3627,14 +3860,15 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
} else {
FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
}
- } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) {
+ } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
+ getContext().getCanonicalType(FI.getReturnType()
+ ->getAs<ComplexType>()
+ ->getElementType()) ==
+ getContext().LongDoubleTy)
// Complex Long Double Type is passed in Memory when Regcall
// calling convention is used.
- const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>();
- if (getContext().getCanonicalType(CT->getElementType()) ==
- getContext().LongDoubleTy)
- FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
- } else
+ FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
+ else
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
}
@@ -4021,14 +4255,25 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
// Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
// Clang matches them for compatibility.
- return ABIArgInfo::getDirect(
- llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2));
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
default:
break;
}
}
+ if (Ty->isExtIntType()) {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
+ // anyway as long is it fits in them, so we don't have to check the power of
+ // 2.
+ if (Width <= 64)
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+
return ABIArgInfo::getDirect();
}
@@ -4118,17 +4363,247 @@ Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
/*allowHigherAlign*/ false);
}
+static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address, bool Is64Bit,
+ bool IsAIX) {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all PPC ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-67 are various 4-byte or 8-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
+
+ // 68-76 are various 4-byte special-purpose registers:
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 68, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
+
+ // AIX does not utilize the rest of the registers.
+ if (IsAIX)
+ return false;
+
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
+
+ if (!Is64Bit)
+ return false;
+
+ // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
+ // or above CPU.
+ // 64-bit only registers:
+ // 114: tfhar
+ // 115: tfiar
+ // 116: texasr
+ AssignToArrayRange(Builder, Address, Eight8, 114, 116);
+
+ return false;
+}
+
+// AIX
+namespace {
+/// AIXABIInfo - The AIX XCOFF ABI information.
+class AIXABIInfo : public ABIInfo {
+ const bool Is64Bit;
+ const unsigned PtrByteSize;
+ CharUnits getParamTypeAlignment(QualType Ty) const;
+
+public:
+ AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
+ : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
+
+ bool isPromotableTypeForABI(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
+ const bool Is64Bit;
+
+public:
+ AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
+ : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
+ Is64Bit(Is64Bit) {}
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+} // namespace
+
+// Return true if the ABI requires Ty to be passed sign- or zero-
+// extended to 32/64 bits.
+bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (Ty->isPromotableIntegerType())
+ return true;
+
+ if (!Is64Bit)
+ return false;
+
+ // For 64 bit mode, in addition to the usual promotable integer types, we also
+ // need to extend all 32-bit types, since the ABI requires promotion to 64
+ // bits.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ llvm::report_fatal_error("complex type is not supported on AIX yet");
+
+ if (RetTy->isVectorType())
+ llvm::report_fatal_error("vector type is not supported on AIX yet");
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // TODO: Evaluate if AIX power alignment rule would have an impact on the
+ // alignment here.
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (Ty->isAnyComplexType())
+ llvm::report_fatal_error("complex type is not supported on AIX yet");
+
+ if (Ty->isVectorType())
+ llvm::report_fatal_error("vector type is not supported on AIX yet");
+
+ // TODO: Evaluate if AIX power alignment rule would have an impact on the
+ // alignment here.
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ CharUnits CCAlign = getParamTypeAlignment(Ty);
+ CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
+
+ return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
+ /*Realign*/ TyAlign > CCAlign);
+ }
+
+ return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
+ if (Ty->isAnyComplexType())
+ llvm::report_fatal_error("complex type is not supported on AIX yet");
+
+ if (Ty->isVectorType())
+ llvm::report_fatal_error("vector type is not supported on AIX yet");
+
+ // If the structure contains a vector type, the alignment is 16.
+ if (isRecordWithSIMDVectorType(getContext(), Ty))
+ return CharUnits::fromQuantity(16);
+
+ return CharUnits::fromQuantity(PtrByteSize);
+}
+
+Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ if (Ty->isAnyComplexType())
+ llvm::report_fatal_error("complex type is not supported on AIX yet");
+
+ if (Ty->isVectorType())
+ llvm::report_fatal_error("vector type is not supported on AIX yet");
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ TypeInfo.second = getParamTypeAlignment(Ty);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
+ SlotSize, /*AllowHigher*/ true);
+}
+
+bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
+}
+
// PowerPC-32
namespace {
/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
bool IsSoftFloatABI;
+ bool IsRetSmallStructInRegABI;
CharUnits getParamTypeAlignment(QualType Ty) const;
public:
- PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
- : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
+ PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
+ bool RetSmallStructInRegABI)
+ : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
+ IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
@@ -4136,8 +4611,13 @@ public:
class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
- : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
+ bool RetSmallStructInRegABI)
+ : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
+ CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
+
+ static bool isStructReturnInRegABI(const llvm::Triple &Triple,
+ const CodeGenOptions &Opts);
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
@@ -4150,7 +4630,7 @@ public:
}
CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
- // Complex types are passed just like their elements
+ // Complex types are passed just like their elements.
if (const ComplexType *CTy = Ty->getAs<ComplexType>())
Ty = CTy->getElementType();
@@ -4173,6 +4653,34 @@ CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
return CharUnits::fromQuantity(4);
}
+ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
+ uint64_t Size;
+
+ // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
+ if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
+ (Size = getContext().getTypeSize(RetTy)) <= 64) {
+ // System V ABI (1995), page 3-22, specified:
+ // > A structure or union whose size is less than or equal to 8 bytes
+ // > shall be returned in r3 and r4, as if it were first stored in the
+ // > 8-byte aligned memory area and then the low addressed word were
+ // > loaded into r3 and the high-addressed word into r4. Bits beyond
+ // > the last member of the structure or union are not defined.
+ //
+ // GCC for big-endian PPC32 inserts the pad before the first member,
+ // not "beyond the last member" of the struct. To stay compatible
+ // with GCC, we coerce the struct to an integer of the same size.
+ // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
+ if (Size == 0)
+ return ABIArgInfo::getIgnore();
+ else {
+ llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
// TODO: this implementation is now likely redundant with
// DefaultABIInfo::EmitVAArg.
Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
@@ -4328,47 +4836,32 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
return Result;
}
-bool
-PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- // This is calculated from the LLVM and GCC tables and verified
- // against gcc output. AFAIK all ABIs use the same encoding.
-
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::IntegerType *i8 = CGF.Int8Ty;
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
-
- // 0-31: r0-31, the 4-byte general-purpose registers
- AssignToArrayRange(Builder, Address, Four8, 0, 31);
-
- // 32-63: fp0-31, the 8-byte floating-point registers
- AssignToArrayRange(Builder, Address, Eight8, 32, 63);
-
- // 64-76 are various 4-byte special-purpose registers:
- // 64: mq
- // 65: lr
- // 66: ctr
- // 67: ap
- // 68-75 cr0-7
- // 76: xer
- AssignToArrayRange(Builder, Address, Four8, 64, 76);
+bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
+ const llvm::Triple &Triple, const CodeGenOptions &Opts) {
+ assert(Triple.getArch() == llvm::Triple::ppc);
- // 77-108: v0-31, the 16-byte vector registers
- AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+ switch (Opts.getStructReturnConvention()) {
+ case CodeGenOptions::SRCK_Default:
+ break;
+ case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
+ return false;
+ case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
+ return true;
+ }
- // 109: vrsave
- // 110: vscr
- // 111: spe_acc
- // 112: spefscr
- // 113: sfp
- AssignToArrayRange(Builder, Address, Four8, 109, 113);
+ if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
+ return true;
return false;
}
+bool
+PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
+ /*IsAIX*/ false);
+}
+
// PowerPC-64
namespace {
@@ -4477,8 +4970,8 @@ public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
bool SoftFloatABI)
- : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
- SoftFloatABI)) {}
+ : TargetCodeGenInfo(std::make_unique<PPC64_SVR4_ABIInfo>(
+ CGT, Kind, HasQPX, SoftFloatABI)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
@@ -4513,7 +5006,7 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
Ty = EnumTy->getDecl()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
- if (Ty->isPromotableIntegerType())
+ if (isPromotableIntegerTypeForABI(Ty))
return true;
// In addition to the usual promotable integer types, we also need to
@@ -4527,6 +5020,10 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
break;
}
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() < 64)
+ return true;
+
return false;
}
@@ -4744,6 +5241,10 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
}
}
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
@@ -4816,6 +5317,10 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
}
}
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
if (isAggregateTypeForABI(RetTy)) {
// ELFv2 homogeneous aggregates are returned as array types.
const Type *Base = nullptr;
@@ -4901,66 +5406,19 @@ Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
TypeInfo, SlotSize, /*AllowHigher*/ true);
}
-static bool
-PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) {
- // This is calculated from the LLVM and GCC tables and verified
- // against gcc output. AFAIK all ABIs use the same encoding.
-
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::IntegerType *i8 = CGF.Int8Ty;
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
-
- // 0-31: r0-31, the 8-byte general-purpose registers
- AssignToArrayRange(Builder, Address, Eight8, 0, 31);
-
- // 32-63: fp0-31, the 8-byte floating-point registers
- AssignToArrayRange(Builder, Address, Eight8, 32, 63);
-
- // 64-67 are various 8-byte special-purpose registers:
- // 64: mq
- // 65: lr
- // 66: ctr
- // 67: ap
- AssignToArrayRange(Builder, Address, Eight8, 64, 67);
-
- // 68-76 are various 4-byte special-purpose registers:
- // 68-75 cr0-7
- // 76: xer
- AssignToArrayRange(Builder, Address, Four8, 68, 76);
-
- // 77-108: v0-31, the 16-byte vector registers
- AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
-
- // 109: vrsave
- // 110: vscr
- // 111: spe_acc
- // 112: spefscr
- // 113: sfp
- // 114: tfhar
- // 115: tfiar
- // 116: texasr
- AssignToArrayRange(Builder, Address, Eight8, 109, 116);
-
- return false;
-}
-
bool
PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
-
- return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
+ /*IsAIX*/ false);
}
bool
PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
-
- return PPC64_initDwarfEHRegSizeTable(CGF, Address);
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
+ /*IsAIX*/ false);
}
//===----------------------------------------------------------------------===//
@@ -5031,12 +5489,16 @@ private:
bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
unsigned elts) const override;
+
+ bool allowBFloatArgsAndRet() const override {
+ return getTarget().hasBFloat16Type();
+ }
};
class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
- : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
+ : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {}
StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
@@ -5054,9 +5516,11 @@ public:
if (!FD)
return;
- CodeGenOptions::SignReturnAddressScope Scope = CGM.getCodeGenOpts().getSignReturnAddress();
- CodeGenOptions::SignReturnAddressKeyValue Key = CGM.getCodeGenOpts().getSignReturnAddressKey();
- bool BranchTargetEnforcement = CGM.getCodeGenOpts().BranchTargetEnforcement;
+ LangOptions::SignReturnAddressScopeKind Scope =
+ CGM.getLangOpts().getSignReturnAddressScope();
+ LangOptions::SignReturnAddressKeyKind Key =
+ CGM.getLangOpts().getSignReturnAddressKey();
+ bool BranchTargetEnforcement = CGM.getLangOpts().BranchTargetEnforcement;
if (const auto *TA = FD->getAttr<TargetAttr>()) {
ParsedTargetAttr Attr = TA->parse();
if (!Attr.BranchProtection.empty()) {
@@ -5072,14 +5536,14 @@ public:
}
auto *Fn = cast<llvm::Function>(GV);
- if (Scope != CodeGenOptions::SignReturnAddressScope::None) {
+ if (Scope != LangOptions::SignReturnAddressScopeKind::None) {
Fn->addFnAttr("sign-return-address",
- Scope == CodeGenOptions::SignReturnAddressScope::All
+ Scope == LangOptions::SignReturnAddressScopeKind::All
? "all"
: "non-leaf");
Fn->addFnAttr("sign-return-address-key",
- Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
+ Key == LangOptions::SignReturnAddressKeyKind::AKey
? "a_key"
: "b_key");
}
@@ -5133,13 +5597,13 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect(ResType);
}
if (Size == 64) {
- llvm::Type *ResType =
- llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
return ABIArgInfo::getDirect(ResType);
}
if (Size == 128) {
- llvm::Type *ResType =
- llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
return ABIArgInfo::getDirect(ResType);
}
return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
@@ -5150,7 +5614,11 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() && isDarwinPCS()
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
? ABIArgInfo::getExtend(Ty)
: ABIArgInfo::getDirect());
}
@@ -5227,7 +5695,11 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() && isDarwinPCS()
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
? ABIArgInfo::getExtend(RetTy)
: ABIArgInfo::getDirect());
}
@@ -5626,11 +6098,14 @@ public:
private:
ABIKind Kind;
+ bool IsFloatABISoftFP;
public:
ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
: SwiftABIInfo(CGT), Kind(_Kind) {
setCCs();
+ IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
+ CGT.getCodeGenOpts().FloatABI == ""; // default
}
bool isEABI() const {
@@ -5661,6 +6136,10 @@ public:
ABIKind getABIKind() const { return Kind; }
+ bool allowBFloatArgsAndRet() const override {
+ return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
+ }
+
private:
ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
unsigned functionCallConv) const;
@@ -5701,7 +6180,7 @@ private:
class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
public:
ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
- :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
+ : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {}
const ARMABIInfo &getABIInfo() const {
return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
@@ -5856,7 +6335,7 @@ ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
return ABIArgInfo::getDirect(ResType);
}
if (Size == 64 || Size == 128) {
- llvm::Type *ResType = llvm::VectorType::get(
+ auto *ResType = llvm::FixedVectorType::get(
llvm::Type::getInt32Ty(getVMContext()), Size / 32);
return ABIArgInfo::getDirect(ResType);
}
@@ -5872,7 +6351,7 @@ ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
// FP16 vectors should be converted to integer vectors
if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
uint64_t Size = getContext().getTypeSize(VT);
- llvm::Type *NewVecTy = llvm::VectorType::get(
+ auto *NewVecTy = llvm::FixedVectorType::get(
llvm::Type::getInt32Ty(getVMContext()), Size / 32);
llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
@@ -5900,25 +6379,18 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
if (isIllegalVectorType(Ty))
return coerceIllegalVector(Ty);
- // _Float16 and __fp16 get passed as if it were an int or float, but with
- // the top 16 bits unspecified. This is not done for OpenCL as it handles the
- // half type natively, and does not need to interwork with AAPCS code.
- if ((Ty->isFloat16Type() || Ty->isHalfType()) &&
- !getContext().getLangOpts().NativeHalfArgsAndReturns) {
- llvm::Type *ResType = IsAAPCS_VFP ?
- llvm::Type::getFloatTy(getVMContext()) :
- llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
-
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
Ty = EnumTy->getDecl()->getIntegerType();
}
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
@@ -6100,31 +6572,27 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
// Large vector types should be returned via memory.
if (getContext().getTypeSize(RetTy) > 128)
return getNaturalAlignIndirect(RetTy);
- // FP16 vectors should be converted to integer vectors
- if (!getTarget().hasLegalHalfType() &&
+ // TODO: FP16/BF16 vectors should be converted to integer vectors
+ // This check is similar to isIllegalVectorType - refactor?
+ if ((!getTarget().hasLegalHalfType() &&
(VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isHalfType()))
+ VT->getElementType()->isHalfType())) ||
+ (IsFloatABISoftFP &&
+ VT->getElementType()->isBFloat16Type()))
return coerceIllegalVector(RetTy);
}
- // _Float16 and __fp16 get returned as if it were an int or float, but with
- // the top 16 bits unspecified. This is not done for OpenCL as it handles the
- // half type natively, and does not need to interwork with AAPCS code.
- if ((RetTy->isFloat16Type() || RetTy->isHalfType()) &&
- !getContext().getLangOpts().NativeHalfArgsAndReturns) {
- llvm::Type *ResType = IsAAPCS_VFP ?
- llvm::Type::getFloatTy(getVMContext()) :
- llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
-
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect();
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect();
}
// Are we following APCS?
@@ -6200,12 +6668,17 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
/// isIllegalVector - check whether Ty is an illegal vector type.
bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
if (const VectorType *VT = Ty->getAs<VectorType> ()) {
- // On targets that don't support FP16, FP16 is expanded into float, and we
- // don't want the ABI to depend on whether or not FP16 is supported in
- // hardware. Thus return false to coerce FP16 vectors into integer vectors.
- if (!getTarget().hasLegalHalfType() &&
+ // On targets that don't support half, fp16 or bfloat, they are expanded
+ // into float, and we don't want the ABI to depend on whether or not they
+ // are supported in hardware. Thus return false to coerce vectors of these
+ // types into integer vectors.
+ // We do not depend on hasLegalHalfType for bfloat as it is a
+ // separate IR type.
+ if ((!getTarget().hasLegalHalfType() &&
(VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isHalfType()))
+ VT->getElementType()->isHalfType())) ||
+ (IsFloatABISoftFP &&
+ VT->getElementType()->isBFloat16Type()))
return true;
if (isAndroid()) {
// Android shipped using Clang 3.1, which supported a slightly different
@@ -6257,6 +6730,7 @@ bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
} else {
if (const VectorType *VT = Ty->getAs<VectorType>())
return (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isBFloat16Type() ||
VT->getElementType()->isHalfType());
return false;
}
@@ -6362,9 +6836,14 @@ Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
namespace {
+class NVPTXTargetCodeGenInfo;
+
class NVPTXABIInfo : public ABIInfo {
+ NVPTXTargetCodeGenInfo &CGInfo;
+
public:
- NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
+ : ABIInfo(CGT), CGInfo(Info) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -6372,36 +6851,87 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
+ bool isUnsupportedType(QualType T) const;
+ ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
};
class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
public:
NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
bool shouldEmitStaticExternCAliases() const override;
+ llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
+ // On the device side, surface reference is represented as an object handle
+ // in 64-bit integer.
+ return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
+ }
+
+ llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
+ // On the device side, texture reference is represented as an object handle
+ // in 64-bit integer.
+ return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
+ }
+
+ bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) const override {
+ emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
+ return true;
+ }
+
+ bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) const override {
+ emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
+ return true;
+ }
+
private:
- // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
+ // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
// resulting MDNode to the nvvm.annotations MDNode.
- static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
+ static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
+ int Operand);
+
+ static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) {
+ llvm::Value *Handle = nullptr;
+ llvm::Constant *C =
+ llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
+ // Lookup `addrspacecast` through the constant pointer if any.
+ if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
+ C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
+ if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
+ // Load the handle from the specific global variable using
+ // `nvvm.texsurf.handle.internal` intrinsic.
+ Handle = CGF.EmitRuntimeCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
+ {GV->getType()}),
+ {GV}, "texsurf_handle");
+ } else
+ Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
+ CGF.EmitStoreOfScalar(Handle, Dst);
+ }
};
/// Checks if the type is unsupported directly by the current target.
-static bool isUnsupportedType(ASTContext &Context, QualType T) {
+bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
+ ASTContext &Context = getContext();
if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
return true;
if (!Context.getTargetInfo().hasFloat128Type() &&
(T->isFloat128Type() ||
(T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
return true;
+ if (const auto *EIT = T->getAs<ExtIntType>())
+ return EIT->getNumBits() >
+ (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
- Context.getTypeSize(T) > 64)
+ Context.getTypeSize(T) > 64U)
return true;
if (const auto *AT = T->getAsArrayTypeUnsafe())
- return isUnsupportedType(Context, AT->getElementType());
+ return isUnsupportedType(AT->getElementType());
const auto *RT = T->getAs<RecordType>();
if (!RT)
return false;
@@ -6410,24 +6940,23 @@ static bool isUnsupportedType(ASTContext &Context, QualType T) {
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
for (const CXXBaseSpecifier &I : CXXRD->bases())
- if (isUnsupportedType(Context, I.getType()))
+ if (isUnsupportedType(I.getType()))
return true;
for (const FieldDecl *I : RD->fields())
- if (isUnsupportedType(Context, I->getType()))
+ if (isUnsupportedType(I->getType()))
return true;
return false;
}
/// Coerce the given type into an array with maximum allowed size of elements.
-static ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, ASTContext &Context,
- llvm::LLVMContext &LLVMContext,
- unsigned MaxSize) {
+ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
+ unsigned MaxSize) const {
// Alignment and Size are measured in bits.
- const uint64_t Size = Context.getTypeSize(Ty);
- const uint64_t Alignment = Context.getTypeAlign(Ty);
+ const uint64_t Size = getContext().getTypeSize(Ty);
+ const uint64_t Alignment = getContext().getTypeAlign(Ty);
const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
- llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div);
+ llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
const uint64_t NumElements = (Size + Div - 1) / Div;
return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
}
@@ -6437,9 +6966,8 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getIgnore();
if (getContext().getLangOpts().OpenMP &&
- getContext().getLangOpts().OpenMPIsDevice &&
- isUnsupportedType(getContext(), RetTy))
- return coerceToIntArrayWithLimit(RetTy, getContext(), getVMContext(), 64);
+ getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy))
+ return coerceToIntArrayWithLimit(RetTy, 64);
// note: this is different from default ABI
if (!RetTy->isScalarType())
@@ -6449,8 +6977,8 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
@@ -6459,11 +6987,29 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
Ty = EnumTy->getDecl()->getIntegerType();
// Return aggregates type as indirect by value
- if (isAggregateTypeForABI(Ty))
+ if (isAggregateTypeForABI(Ty)) {
+ // Under CUDA device compilation, tex/surf builtin types are replaced with
+ // object types and passed directly.
+ if (getContext().getLangOpts().CUDAIsDevice) {
+ if (Ty->isCUDADeviceBuiltinSurfaceType())
+ return ABIArgInfo::getDirect(
+ CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
+ if (Ty->isCUDADeviceBuiltinTextureType())
+ return ABIArgInfo::getDirect(
+ CGInfo.getCUDADeviceBuiltinTextureDeviceType());
+ }
return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ if ((EIT->getNumBits() > 128) ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -6488,6 +7034,17 @@ void NVPTXTargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
if (GV->isDeclaration())
return;
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
+ if (VD) {
+ if (M.getLangOpts().CUDA) {
+ if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
+ addNVVMMetadata(GV, "surface", 1);
+ else if (VD->getType()->isCUDADeviceBuiltinTextureType())
+ addNVVMMetadata(GV, "texture", 1);
+ return;
+ }
+ }
+
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) return;
@@ -6536,16 +7093,16 @@ void NVPTXTargetCodeGenInfo::setTargetAttributes(
}
}
-void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
- int Operand) {
- llvm::Module *M = F->getParent();
+void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
+ StringRef Name, int Operand) {
+ llvm::Module *M = GV->getParent();
llvm::LLVMContext &Ctx = M->getContext();
// Get "nvvm.annotations" metadata node
llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
llvm::Metadata *MDVals[] = {
- llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
+ llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
// Append metadata to nvvm.annotations
@@ -6565,12 +7122,13 @@ namespace {
class SystemZABIInfo : public SwiftABIInfo {
bool HasVector;
+ bool IsSoftFloatABI;
public:
- SystemZABIInfo(CodeGenTypes &CGT, bool HV)
- : SwiftABIInfo(CGT), HasVector(HV) {}
+ SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
+ : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
- bool isPromotableIntegerType(QualType Ty) const;
+ bool isPromotableIntegerTypeForABI(QualType Ty) const;
bool isCompoundType(QualType Ty) const;
bool isVectorArgumentType(QualType Ty) const;
bool isFPArgumentType(QualType Ty) const;
@@ -6600,21 +7158,26 @@ public:
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
- : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
+ SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
+ : TargetCodeGenInfo(
+ std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {}
};
}
-bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
+bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
- if (Ty->isPromotableIntegerType())
+ if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
return true;
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() < 64)
+ return true;
+
// 32-bit values must also be promoted.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
switch (BT->getKind()) {
@@ -6640,6 +7203,9 @@ bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
}
bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
+ if (IsSoftFloatABI)
+ return false;
+
if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
switch (BT->getKind()) {
case BuiltinType::Float:
@@ -6653,7 +7219,9 @@ bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
}
QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
- if (const RecordType *RT = Ty->getAsStructureType()) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ if (RT && RT->isStructureOrClassType()) {
const RecordDecl *RD = RT->getDecl();
QualType Found;
@@ -6679,6 +7247,10 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
if (getContext().getLangOpts().CPlusPlus &&
FD->isZeroLengthBitField(getContext()))
continue;
+ // Like isSingleElementStruct(), ignore C++20 empty data members.
+ if (FD->hasAttr<NoUniqueAddressAttr>() &&
+ isEmptyRecord(getContext(), FD->getType(), true))
+ continue;
// Unlike isSingleElementStruct(), arrays do not count.
// Nested structures still do though.
@@ -6725,7 +7297,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
} else {
if (AI.getCoerceToType())
ArgTy = AI.getCoerceToType();
- InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
+ InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
IsVector = ArgTy->isVectorTy();
UnpaddedSize = TyInfo.first;
DirectAlign = TyInfo.second;
@@ -6858,8 +7430,8 @@ ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect();
if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
return getNaturalAlignIndirect(RetTy);
- return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
@@ -6868,7 +7440,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// Integers and enums are extended to full register width.
- if (isPromotableIntegerType(Ty))
+ if (isPromotableIntegerTypeForABI(Ty))
return ABIArgInfo::getExtend(Ty);
// Handle vector types and vector-like structure types. Note that
@@ -6918,10 +7490,49 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
namespace {
+class MSP430ABIInfo : public DefaultABIInfo {
+ static ABIArgInfo complexArgInfo() {
+ ABIArgInfo Info = ABIArgInfo::getDirect();
+ Info.setCanBeFlattened(false);
+ return Info;
+ }
+
+public:
+ MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return complexArgInfo();
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+ }
+
+ ABIArgInfo classifyArgumentType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return complexArgInfo();
+
+ return DefaultABIInfo::classifyArgumentType(RetTy);
+ }
+
+ // Just copy the original implementations because
+ // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override {
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
+ }
+};
+
class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
public:
MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
};
@@ -6980,8 +7591,8 @@ class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
unsigned SizeOfUnwindException;
public:
MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
- : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
- SizeOfUnwindException(IsO32 ? 24 : 32) {}
+ : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
+ SizeOfUnwindException(IsO32 ? 24 : 32) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 29;
@@ -7163,6 +7774,13 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
+ // Make sure we pass indirectly things that are too large.
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128 ||
+ (EIT->getNumBits() > 64 &&
+ !getContext().getTargetInfo().hasInt128Type()))
+ return getNaturalAlignIndirect(Ty);
+
// All integral types are promoted to the GPR width.
if (Ty->isIntegralOrEnumerationType())
return extendType(Ty);
@@ -7247,7 +7865,14 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (RetTy->isPromotableIntegerType())
+ // Make sure we pass indirectly things that are too large.
+ if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 128 ||
+ (EIT->getNumBits() > 64 &&
+ !getContext().getTargetInfo().hasInt128Type()))
+ return getNaturalAlignIndirect(RetTy);
+
+ if (isPromotableIntegerTypeForABI(RetTy))
return ABIArgInfo::getExtend(RetTy);
if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
@@ -7366,7 +7991,7 @@ namespace {
class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
public:
AVRTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
@@ -7455,50 +8080,97 @@ void TCETargetCodeGenInfo::setTargetAttributes(
namespace {
-class HexagonABIInfo : public ABIInfo {
-
-
+class HexagonABIInfo : public DefaultABIInfo {
public:
- HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
private:
-
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
void computeInfo(CGFunctionInfo &FI) const override;
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
+ Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+ Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+ Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
};
class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
public:
HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
- :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
return 29;
}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &GCM) const override {
+ if (GV->isDeclaration())
+ return;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+ }
};
-}
+} // namespace
void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ unsigned RegsLeft = 6;
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
+ I.info = classifyArgumentType(I.type, &RegsLeft);
}
-ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
+static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
+ assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
+ " through registers");
+
+ if (*RegsLeft == 0)
+ return false;
+
+ if (Size <= 32) {
+ (*RegsLeft)--;
+ return true;
+ }
+
+ if (2 <= (*RegsLeft & (~1U))) {
+ *RegsLeft = (*RegsLeft & (~1U)) - 2;
+ return true;
+ }
+
+ // Next available register was r5 but candidate was greater than 32-bits so it
+ // has to go on the stack. However we still consume r5
+ if (*RegsLeft == 1)
+ *RegsLeft = 0;
+
+ return false;
+}
+
+ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
+ unsigned *RegsLeft) const {
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 64)
+ HexagonAdjustRegsLeft(Size, RegsLeft);
+
+ if (Size > 64 && Ty->isExtIntType())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect();
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
@@ -7509,63 +8181,304 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getIgnore();
uint64_t Size = getContext().getTypeSize(Ty);
+ unsigned Align = getContext().getTypeAlign(Ty);
+
if (Size > 64)
return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ if (HexagonAdjustRegsLeft(Size, RegsLeft))
+ Align = Size <= 32 ? 32 : 64;
+ if (Size <= Align) {
// Pass in the smallest viable integer type.
- else if (Size > 32)
- return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
- else if (Size > 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
- else if (Size > 8)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
- else
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (!llvm::isPowerOf2_64(Size))
+ Size = llvm::NextPowerOf2(Size);
+ return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
+ }
+ return DefaultABIInfo::classifyArgumentType(Ty);
}
ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
- // Large vector types should be returned via memory.
- if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
- return getNaturalAlignIndirect(RetTy);
+ const TargetInfo &T = CGT.getTarget();
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->getAs<VectorType>()) {
+ // HVX vectors are returned in vector registers or register pairs.
+ if (T.hasFeature("hvx")) {
+ assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"));
+ uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
+ if (Size == VecSize || Size == 2*VecSize)
+ return ABIArgInfo::getDirectInReg();
+ }
+ // Large vector types should be returned via memory.
+ if (Size > 64)
+ return getNaturalAlignIndirect(RetTy);
+ }
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
+ if (Size > 64 && RetTy->isExtIntType())
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect();
}
if (isEmptyRecord(getContext(), RetTy, true))
return ABIArgInfo::getIgnore();
- // Aggregates <= 8 bytes are returned in r0; other aggregates
+ // Aggregates <= 8 bytes are returned in registers, other aggregates
// are returned indirectly.
- uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 64) {
// Return in the smallest viable integer type.
- if (Size <= 8)
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
- if (Size <= 32)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
- return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ if (!llvm::isPowerOf2_64(Size))
+ Size = llvm::NextPowerOf2(Size);
+ return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
}
-
return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
}
+Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ // Load the overflow area pointer.
+ Address __overflow_area_pointer_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
+ llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
+ __overflow_area_pointer_p, "__overflow_area_pointer");
+
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 4) {
+ // Alignment should be a power of 2.
+ assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!");
+
+ // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
+
+ // Add offset to the current pointer to access the argument.
+ __overflow_area_pointer =
+ CGF.Builder.CreateGEP(__overflow_area_pointer, Offset);
+ llvm::Value *AsInt =
+ CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
+
+ // Create a mask which should be "AND"ed
+ // with (overflow_arg_area + align - 1)
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
+ __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
+ CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
+ "__overflow_area_pointer.align");
+ }
+
+ // Get the type of the argument from memory and bitcast
+ // overflow area pointer to the argument type.
+ llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
+ Address AddrTyped = CGF.Builder.CreateBitCast(
+ Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)),
+ llvm::PointerType::getUnqual(PTy));
+
+ // Round up to the minimum stack alignment for varargs which is 4 bytes.
+ uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
+
+ __overflow_area_pointer = CGF.Builder.CreateGEP(
+ __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "__overflow_area_pointer.next");
+ CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
+
+ return AddrTyped;
+}
+
+Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ // FIXME: Need to handle alignment
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+ CGBuilderTy &Builder = CGF.Builder;
+ Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ // Handle address alignment for type alignment > 32 bits
+ uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (TyAlign > 4) {
+ assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!");
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
+ AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ }
+ llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ Address AddrTyped = Builder.CreateBitCast(
+ Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy);
+
+ uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr = Builder.CreateGEP(
+ Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
+
+ if (ArgSize > 8)
+ return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // Here we have check if the argument is in register area or
+ // in overflow area.
+ // If the saved register area pointer + argsize rounded up to alignment >
+ // saved register area end pointer, argument is in overflow area.
+ unsigned RegsLeft = 6;
+ Ty = CGF.getContext().getCanonicalType(Ty);
+ (void)classifyArgumentType(Ty, &RegsLeft);
+
+ llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+
+ // Get rounded size of the argument.GCC does not allow vararg of
+ // size < 4 bytes. We follow the same logic here.
+ ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
+ int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
+
+ // Argument may be in saved register area
+ CGF.EmitBlock(MaybeRegBlock);
+
+ // Load the current saved register area pointer.
+ Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 0, "__current_saved_reg_area_pointer_p");
+ llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
+ __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
+
+ // Load the saved register area end pointer.
+ Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 1, "__saved_reg_area_end_pointer_p");
+ llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
+ __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
+
+ // If the size of argument is > 4 bytes, check if the stack
+ // location is aligned to 8 bytes
+ if (ArgAlign > 4) {
+
+ llvm::Value *__current_saved_reg_area_pointer_int =
+ CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
+ CGF.Int32Ty);
+
+ __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
+ __current_saved_reg_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
+ "align_current_saved_reg_area_pointer");
+
+ __current_saved_reg_area_pointer_int =
+ CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
+ "align_current_saved_reg_area_pointer");
+
+ __current_saved_reg_area_pointer =
+ CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
+ __current_saved_reg_area_pointer->getType(),
+ "align_current_saved_reg_area_pointer");
+ }
+
+ llvm::Value *__new_saved_reg_area_pointer =
+ CGF.Builder.CreateGEP(__current_saved_reg_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
+ "__new_saved_reg_area_pointer");
+
+ llvm::Value *UsingStack = 0;
+ UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
+ __saved_reg_area_end_pointer);
+
+ CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
+
+ // Argument in saved register area
+ // Implement the block where argument is in register saved area
+ CGF.EmitBlock(InRegBlock);
+
+ llvm::Type *PTy = CGF.ConvertType(Ty);
+ llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
+ __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
+
+ CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
+ __current_saved_reg_area_pointer_p);
+
+ CGF.EmitBranch(ContBlock);
+
+ // Argument in overflow area
+ // Implement the block where the argument is in overflow area.
+ CGF.EmitBlock(OnStackBlock);
+
+ // Load the overflow area pointer
+ Address __overflow_area_pointer_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
+ llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
+ __overflow_area_pointer_p, "__overflow_area_pointer");
+
+ // Align the overflow area pointer according to the alignment of the argument
+ if (ArgAlign > 4) {
+ llvm::Value *__overflow_area_pointer_int =
+ CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
+
+ __overflow_area_pointer_int =
+ CGF.Builder.CreateAdd(__overflow_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
+ "align_overflow_area_pointer");
+
+ __overflow_area_pointer_int =
+ CGF.Builder.CreateAnd(__overflow_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
+ "align_overflow_area_pointer");
+
+ __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
+ __overflow_area_pointer_int, __overflow_area_pointer->getType(),
+ "align_overflow_area_pointer");
+ }
+
+ // Get the pointer for next argument in overflow area and store it
+ // to overflow area pointer.
+ llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
+ __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
+ "__overflow_area_pointer.next");
+
+ CGF.Builder.CreateStore(__new_overflow_area_pointer,
+ __overflow_area_pointer_p);
+
+ CGF.Builder.CreateStore(__new_overflow_area_pointer,
+ __current_saved_reg_area_pointer_p);
+
+ // Bitcast the overflow area pointer to the type of argument.
+ llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
+ __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
+
+ CGF.EmitBranch(ContBlock);
+
+ // Get the correct pointer to load the variable argument
+ // Implement the ContBlock
+ CGF.EmitBlock(ContBlock);
+
+ llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
+ llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
+ ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
+ ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
+
+ return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign));
+}
+
Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
- // FIXME: Someone needs to audit that this handle alignment correctly.
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
- getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(4),
- /*AllowHigherAlign*/ true);
+
+ if (getTarget().getTriple().isMusl())
+ return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
+
+ return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
}
//===----------------------------------------------------------------------===//
@@ -7676,7 +8589,13 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
Ty = EnumTy->getDecl()->getIntegerType();
bool InReg = shouldUseInReg(Ty, State);
- if (Ty->isPromotableIntegerType()) {
+
+ // Don't pass >64 bit integers in registers.
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectResult(Ty, /*ByVal=*/true, State);
+
+ if (isPromotableIntegerTypeForABI(Ty)) {
if (InReg)
return ABIArgInfo::getDirectInReg();
return ABIArgInfo::getExtend(Ty);
@@ -7690,7 +8609,7 @@ namespace {
class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
public:
LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
};
}
@@ -7730,7 +8649,7 @@ private:
EltTys, (STy->getName() + ".coerce").str(), STy->isPacked());
return llvm::StructType::get(getVMContext(), EltTys, STy->isPacked());
}
- // Arrary types.
+ // Array types.
if (auto ATy = dyn_cast<llvm::ArrayType>(Ty)) {
auto T = ATy->getElementType();
auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
@@ -7958,7 +8877,7 @@ ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
public:
AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
unsigned getOpenCLKernelCallingConv() const override;
@@ -7994,23 +8913,13 @@ static bool requiresAMDGPUProtectedVisibility(const Decl *D,
(isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
(isa<VarDecl>(D) &&
(D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
- D->hasAttr<HIPPinnedShadowAttr>()));
-}
-
-static bool requiresAMDGPUDefaultVisibility(const Decl *D,
- llvm::GlobalValue *GV) {
- if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
- return false;
-
- return isa<VarDecl>(D) && D->hasAttr<HIPPinnedShadowAttr>();
+ cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
}
void AMDGPUTargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (requiresAMDGPUDefaultVisibility(D, GV)) {
- GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
- GV->setDSOLocal(false);
- } else if (requiresAMDGPUProtectedVisibility(D, GV)) {
+ if (requiresAMDGPUProtectedVisibility(D, GV)) {
GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
GV->setDSOLocal(true);
}
@@ -8035,6 +8944,10 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
(M.getTriple().getOS() == llvm::Triple::AMDHSA))
F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
+ if (IsHIPKernel)
+ F->addFnAttr("uniform-work-group-size", "true");
+
+
const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
if (ReqdWGS || FlatWGS) {
unsigned Min = 0;
@@ -8059,9 +8972,13 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
assert(Max == 0 && "Max must be zero");
} else if (IsOpenCLKernel || IsHIPKernel) {
// By default, restrict the maximum size to a value specified by
- // --gpu-max-threads-per-block=n or its default value.
+ // --gpu-max-threads-per-block=n or its default value for HIP.
+ const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
+ const unsigned DefaultMaxWorkGroupSize =
+ IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
+ : M.getLangOpts().GPUMaxThreadsPerBlock;
std::string AttrVal =
- std::string("1,") + llvm::utostr(M.getLangOpts().GPUMaxThreadsPerBlock);
+ std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
}
@@ -8223,7 +9140,7 @@ namespace {
class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
public:
SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
};
} // end anonymous namespace
@@ -8392,6 +9309,10 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
if (Size < 64 && Ty->isIntegerType())
return ABIArgInfo::getExtend(Ty);
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() < 64)
+ return ABIArgInfo::getExtend(Ty);
+
// Other non-aggregates go in registers.
if (!isAggregateTypeForABI(Ty))
return ABIArgInfo::getDirect();
@@ -8485,7 +9406,7 @@ namespace {
class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
public:
SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
return 14;
@@ -8578,7 +9499,7 @@ private:
class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
public:
ARCTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new ARCABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
};
@@ -8641,11 +9562,15 @@ ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
ABIArgInfo::getDirect(Result, 0, nullptr, false);
}
- return Ty->isPromotableIntegerType() ?
- (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) :
- ABIArgInfo::getExtend(Ty)) :
- (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() :
- ABIArgInfo::getDirect());
+ if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectByValue(Ty);
+
+ return isPromotableIntegerTypeForABI(Ty)
+ ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
+ : ABIArgInfo::getExtend(Ty))
+ : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
+ : ABIArgInfo::getDirect());
}
ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
@@ -8769,11 +9694,15 @@ public:
class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
mutable TypeStringCache TSC;
+ void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
+ const CodeGen::CodeGenModule &M) const;
+
public:
XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
- :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
- void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
+ : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
+ void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef>
+ &MangledDeclNames) const override;
};
} // End anonymous namespace.
@@ -8934,11 +9863,13 @@ StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
/// The output is tested by test/CodeGen/xcore-stringtype.c.
///
static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
- CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC);
/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
-void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const {
+void XCoreTargetCodeGenInfo::emitTargetMD(
+ const Decl *D, llvm::GlobalValue *GV,
+ const CodeGen::CodeGenModule &CGM) const {
SmallStringEnc Enc;
if (getTypeString(Enc, D, CGM, TSC)) {
llvm::LLVMContext &Ctx = CGM.getModule().getContext();
@@ -8950,6 +9881,21 @@ void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
}
}
+void XCoreTargetCodeGenInfo::emitTargetMetadata(
+ CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
+ // Warning, new MangledDeclNames may be appended within this loop.
+ // We rely on MapVector insertions adding new elements to the end
+ // of the container.
+ for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
+ auto Val = *(MangledDeclNames.begin() + I);
+ llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
+ if (GV) {
+ const Decl *D = Val.first.getDecl()->getMostRecentDecl();
+ emitTargetMD(D, GV, CGM);
+ }
+ }
+}
//===----------------------------------------------------------------------===//
// SPIR ABI Implementation
//===----------------------------------------------------------------------===//
@@ -8958,7 +9904,7 @@ namespace {
class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
public:
SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
unsigned getOpenCLKernelCallingConv() const override;
};
@@ -9283,7 +10229,8 @@ static bool appendType(SmallStringEnc &Enc, QualType QType,
}
static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
- CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
if (!D)
return false;
@@ -9613,7 +10560,8 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
uint64_t Size = getContext().getTypeSize(Ty);
// Pass floating point values via FPRs if possible.
- if (IsFixed && Ty->isFloatingType() && FLen >= Size && ArgFPRsLeft) {
+ if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
+ FLen >= Size && ArgFPRsLeft) {
ArgFPRsLeft--;
return ABIArgInfo::getDirect();
}
@@ -9676,6 +10624,15 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
return extendType(Ty);
}
+ if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ if (EIT->getNumBits() < XLen && !MustUseStack)
+ return extendType(Ty);
+ if (EIT->getNumBits() > 128 ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ }
+
return ABIArgInfo::getDirect();
}
@@ -9747,7 +10704,7 @@ class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
public:
RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
unsigned FLen)
- : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen, FLen)) {}
+ : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
@@ -9773,6 +10730,56 @@ public:
} // namespace
//===----------------------------------------------------------------------===//
+// VE ABI Implementation.
+//
+namespace {
+class VEABIInfo : public DefaultABIInfo {
+public:
+ VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+};
+} // end anonymous namespace
+
+ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
+ if (Ty->isAnyComplexType()) {
+ return ABIArgInfo::getDirect();
+ }
+ return DefaultABIInfo::classifyReturnType(Ty);
+}
+
+ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
+ if (Ty->isAnyComplexType()) {
+ return ABIArgInfo::getDirect();
+ }
+ return DefaultABIInfo::classifyArgumentType(Ty);
+}
+
+void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+}
+
+namespace {
+class VETargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ VETargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
+ // VE ABI requires the arguments of variadic and prototype-less functions
+ // are passed in both registers and memory.
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const override {
+ return true;
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
// Driver code
//===----------------------------------------------------------------------===//
@@ -9824,8 +10831,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
}
case llvm::Triple::wasm32:
- case llvm::Triple::wasm64:
- return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
+ case llvm::Triple::wasm64: {
+ WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP;
+ if (getTarget().getABI() == "experimental-mv")
+ Kind = WebAssemblyABIInfo::ExperimentalMV;
+ return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind));
+ }
case llvm::Triple::arm:
case llvm::Triple::armeb:
@@ -9852,11 +10863,21 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
}
- case llvm::Triple::ppc:
+ case llvm::Triple::ppc: {
+ if (Triple.isOSAIX())
+ return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false));
+
+ bool IsSoftFloat =
+ CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe");
+ bool RetSmallStructInRegABI =
+ PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
return SetCGInfo(
- new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft" ||
- getTarget().hasFeature("spe")));
+ new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
+ }
case llvm::Triple::ppc64:
+ if (Triple.isOSAIX())
+ return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true));
+
if (Triple.isOSBinFormatELF()) {
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
if (getTarget().getABI() == "elfv2")
@@ -9866,8 +10887,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
IsSoftFloat));
- } else
- return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
+ }
+ return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
case llvm::Triple::ppc64le: {
assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
@@ -9900,8 +10921,9 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
}
case llvm::Triple::systemz: {
- bool HasVector = getTarget().getABI() == "vector";
- return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
+ bool SoftFloat = CodeGenOpts.FloatABI == "soft";
+ bool HasVector = !SoftFloat && getTarget().getABI() == "vector";
+ return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat));
}
case llvm::Triple::tce:
@@ -9959,6 +10981,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::spir:
case llvm::Triple::spir64:
return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
+ case llvm::Triple::ve:
+ return SetCGInfo(new VETargetCodeGenInfo(Types));
}
}
@@ -10042,9 +11066,9 @@ llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
auto IP = CGF.Builder.saveIP();
auto *BB = llvm::BasicBlock::Create(C, "entry", F);
Builder.SetInsertPoint(BB);
- unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy);
+ const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
- BlockPtr->setAlignment(llvm::MaybeAlign(BlockAlign));
+ BlockPtr->setAlignment(BlockAlign);
Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
llvm::SmallVector<llvm::Value *, 2> Args;
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index e1e90e73cb58..1152cabce4a0 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -43,11 +43,10 @@ class CGFunctionInfo;
/// codegeneration issues, like target-specific attributes, builtins and so
/// on.
class TargetCodeGenInfo {
- ABIInfo *Info;
+ std::unique_ptr<ABIInfo> Info = nullptr;
public:
- // WARNING: Acquires the ownership of ABIInfo.
- TargetCodeGenInfo(ABIInfo *info = nullptr) : Info(info) {}
+ TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info) : Info(std::move(Info)) {}
virtual ~TargetCodeGenInfo();
/// getABIInfo() - Returns ABI info helper for the target.
@@ -58,10 +57,18 @@ public:
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const {}
- /// emitTargetMD - Provides a convenient hook to handle extra
- /// target-specific metadata for the given global.
- virtual void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const {}
+ /// emitTargetMetadata - Provides a convenient hook to handle extra
+ /// target-specific metadata for the given globals.
+ virtual void emitTargetMetadata(
+ CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {}
+
+ /// Any further codegen related checks that need to be done on a function call
+ /// in a target specific manner.
+ virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args) const {}
/// Determines the size of struct _Unwind_Exception on this platform,
/// in 8-bit units. The Itanium ABI defines this as:
@@ -315,6 +322,32 @@ public:
virtual bool shouldEmitStaticExternCAliases() const { return true; }
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const {}
+
+ /// Return the device-side type for the CUDA device builtin surface type.
+ virtual llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const {
+ // By default, no change from the original one.
+ return nullptr;
+ }
+ /// Return the device-side type for the CUDA device builtin texture type.
+ virtual llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const {
+ // By default, no change from the original one.
+ return nullptr;
+ }
+
+ /// Emit the device-side copy of the builtin surface type.
+ virtual bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF,
+ LValue Dst,
+ LValue Src) const {
+ // DO NOTHING by default.
+ return false;
+ }
+ /// Emit the device-side copy of the builtin texture type.
+ virtual bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF,
+ LValue Dst,
+ LValue Src) const {
+ // DO NOTHING by default.
+ return false;
+ }
};
} // namespace CodeGen
diff --git a/clang/lib/CrossTU/CrossTranslationUnit.cpp b/clang/lib/CrossTU/CrossTranslationUnit.cpp
index 7391d7132daf..80465c41d151 100644
--- a/clang/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/clang/lib/CrossTU/CrossTranslationUnit.cpp
@@ -12,20 +12,26 @@
#include "clang/CrossTU/CrossTranslationUnit.h"
#include "clang/AST/ASTImporter.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CrossTU/CrossTUDiagnostic.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Index/USRGeneration.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Option/ArgList.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
#include <fstream>
#include <sstream>
+#include <tuple>
namespace clang {
namespace cross_tu {
@@ -110,6 +116,17 @@ public:
return "Language dialect mismatch";
case index_error_code::load_threshold_reached:
return "Load threshold reached";
+ case index_error_code::invocation_list_ambiguous:
+ return "Invocation list file contains multiple references to the same "
+ "source file.";
+ case index_error_code::invocation_list_file_not_found:
+ return "Invocation list file is not found.";
+ case index_error_code::invocation_list_empty:
+ return "Invocation list file is empty.";
+ case index_error_code::invocation_list_wrong_format:
+ return "Invocation list file is in wrong format.";
+ case index_error_code::invocation_list_lookup_unsuccessful:
+ return "Invocation list file does not contain the requested source file.";
}
llvm_unreachable("Unrecognized index_error_code.");
}
@@ -129,8 +146,8 @@ std::error_code IndexError::convertToErrorCode() const {
}
llvm::Expected<llvm::StringMap<std::string>>
-parseCrossTUIndex(StringRef IndexPath, StringRef CrossTUDir) {
- std::ifstream ExternalMapFile(IndexPath);
+parseCrossTUIndex(StringRef IndexPath) {
+ std::ifstream ExternalMapFile{std::string(IndexPath)};
if (!ExternalMapFile)
return llvm::make_error<IndexError>(index_error_code::missing_index_file,
IndexPath.str());
@@ -139,21 +156,26 @@ parseCrossTUIndex(StringRef IndexPath, StringRef CrossTUDir) {
std::string Line;
unsigned LineNo = 1;
while (std::getline(ExternalMapFile, Line)) {
- const size_t Pos = Line.find(" ");
- if (Pos > 0 && Pos != std::string::npos) {
- StringRef LineRef{Line};
- StringRef LookupName = LineRef.substr(0, Pos);
- if (Result.count(LookupName))
+ StringRef LineRef{Line};
+ const size_t Delimiter = LineRef.find(" ");
+ if (Delimiter > 0 && Delimiter != std::string::npos) {
+ StringRef LookupName = LineRef.substr(0, Delimiter);
+
+ // Store paths with posix-style directory separator.
+ SmallVector<char, 32> FilePath;
+ llvm::Twine{LineRef.substr(Delimiter + 1)}.toVector(FilePath);
+ llvm::sys::path::native(FilePath, llvm::sys::path::Style::posix);
+
+ bool InsertionOccured;
+ std::tie(std::ignore, InsertionOccured) =
+ Result.try_emplace(LookupName, FilePath.begin(), FilePath.end());
+ if (!InsertionOccured)
return llvm::make_error<IndexError>(
index_error_code::multiple_definitions, IndexPath.str(), LineNo);
- StringRef FileName = LineRef.substr(Pos + 1);
- SmallString<256> FilePath = CrossTUDir;
- llvm::sys::path::append(FilePath, FileName);
- Result[LookupName] = FilePath.str().str();
} else
return llvm::make_error<IndexError>(
index_error_code::invalid_index_format, IndexPath.str(), LineNo);
- LineNo++;
+ ++LineNo;
}
return Result;
}
@@ -258,8 +280,8 @@ llvm::Expected<const T *> CrossTranslationUnitContext::getCrossTUDefinitionImpl(
// diagnostics.
++NumTripleMismatch;
return llvm::make_error<IndexError>(index_error_code::triple_mismatch,
- Unit->getMainFileName(), TripleTo.str(),
- TripleFrom.str());
+ std::string(Unit->getMainFileName()),
+ TripleTo.str(), TripleFrom.str());
}
const auto &LangTo = Context.getLangOpts();
@@ -288,7 +310,7 @@ llvm::Expected<const T *> CrossTranslationUnitContext::getCrossTUDefinitionImpl(
if (LangTo.CPlusPlus11 != LangFrom.CPlusPlus11 ||
LangTo.CPlusPlus14 != LangFrom.CPlusPlus14 ||
LangTo.CPlusPlus17 != LangFrom.CPlusPlus17 ||
- LangTo.CPlusPlus2a != LangFrom.CPlusPlus2a) {
+ LangTo.CPlusPlus20 != LangFrom.CPlusPlus20) {
++NumLangDialectMismatch;
return llvm::make_error<IndexError>(
index_error_code::lang_dialect_mismatch);
@@ -341,30 +363,13 @@ void CrossTranslationUnitContext::emitCrossTUDiagnostics(const IndexError &IE) {
}
}
-CrossTranslationUnitContext::ASTFileLoader::ASTFileLoader(
- const CompilerInstance &CI)
- : CI(CI) {}
-
-std::unique_ptr<ASTUnit>
-CrossTranslationUnitContext::ASTFileLoader::operator()(StringRef ASTFilePath) {
- // Load AST from ast-dump.
- IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
- TextDiagnosticPrinter *DiagClient =
- new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, &*DiagOpts, DiagClient));
-
- return ASTUnit::LoadFromASTFile(
- ASTFilePath, CI.getPCHContainerOperations()->getRawReader(),
- ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts());
-}
-
CrossTranslationUnitContext::ASTUnitStorage::ASTUnitStorage(
- const CompilerInstance &CI)
- : FileAccessor(CI), LoadGuard(const_cast<CompilerInstance &>(CI)
- .getAnalyzerOpts()
- ->CTUImportThreshold) {}
+ CompilerInstance &CI)
+ : Loader(CI, CI.getAnalyzerOpts()->CTUDir,
+ CI.getAnalyzerOpts()->CTUInvocationList),
+ LoadGuard(CI.getASTContext().getLangOpts().CPlusPlus
+ ? CI.getAnalyzerOpts()->CTUImportCppThreshold
+ : CI.getAnalyzerOpts()->CTUImportThreshold) {}
llvm::Expected<ASTUnit *>
CrossTranslationUnitContext::ASTUnitStorage::getASTUnitForFile(
@@ -380,8 +385,12 @@ CrossTranslationUnitContext::ASTUnitStorage::getASTUnitForFile(
index_error_code::load_threshold_reached);
}
- // Load the ASTUnit from the pre-dumped AST file specified by ASTFileName.
- std::unique_ptr<ASTUnit> LoadedUnit = FileAccessor(FileName);
+ auto LoadAttempt = Loader.load(FileName);
+
+ if (!LoadAttempt)
+ return LoadAttempt.takeError();
+
+ std::unique_ptr<ASTUnit> LoadedUnit = std::move(LoadAttempt.get());
// Need the raw pointer and the unique_ptr as well.
ASTUnit *Unit = LoadedUnit.get();
@@ -461,7 +470,7 @@ llvm::Error CrossTranslationUnitContext::ASTUnitStorage::ensureCTUIndexLoaded(
else
llvm::sys::path::append(IndexFile, IndexName);
- if (auto IndexMapping = parseCrossTUIndex(IndexFile, CrossTUDir)) {
+ if (auto IndexMapping = parseCrossTUIndex(IndexFile)) {
// Initialize member map.
NameFileMap = *IndexMapping;
return llvm::Error::success();
@@ -494,6 +503,193 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
return Unit;
}
+CrossTranslationUnitContext::ASTLoader::ASTLoader(
+ CompilerInstance &CI, StringRef CTUDir, StringRef InvocationListFilePath)
+ : CI(CI), CTUDir(CTUDir), InvocationListFilePath(InvocationListFilePath) {}
+
+CrossTranslationUnitContext::LoadResultTy
+CrossTranslationUnitContext::ASTLoader::load(StringRef Identifier) {
+ llvm::SmallString<256> Path;
+ if (llvm::sys::path::is_absolute(Identifier, PathStyle)) {
+ Path = Identifier;
+ } else {
+ Path = CTUDir;
+ llvm::sys::path::append(Path, PathStyle, Identifier);
+ }
+
+ // The path is stored in the InvocationList member in posix style. To
+ // successfully lookup an entry based on filepath, it must be converted.
+ llvm::sys::path::native(Path, PathStyle);
+
+ // Normalize by removing relative path components.
+ llvm::sys::path::remove_dots(Path, /*remove_dot_dot*/ true, PathStyle);
+
+ if (Path.endswith(".ast"))
+ return loadFromDump(Path);
+ else
+ return loadFromSource(Path);
+}
+
+CrossTranslationUnitContext::LoadResultTy
+CrossTranslationUnitContext::ASTLoader::loadFromDump(StringRef ASTDumpPath) {
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ TextDiagnosticPrinter *DiagClient =
+ new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, &*DiagOpts, DiagClient));
+ return ASTUnit::LoadFromASTFile(
+ std::string(ASTDumpPath.str()),
+ CI.getPCHContainerOperations()->getRawReader(), ASTUnit::LoadEverything,
+ Diags, CI.getFileSystemOpts());
+}
+
+/// Load the AST from a source-file, which is supposed to be located inside the
+/// YAML formatted invocation list file under the filesystem path specified by
+/// \p InvocationList. The invocation list should contain absolute paths.
+/// \p SourceFilePath is the absolute path of the source file that contains the
+/// function definition the analysis is looking for. The Index is built by the
+/// \p clang-extdef-mapping tool, which is also supposed to be generating
+/// absolute paths.
+///
+/// Proper diagnostic emission requires absolute paths, so even if a future
+/// change introduces the handling of relative paths, this must be taken into
+/// consideration.
+CrossTranslationUnitContext::LoadResultTy
+CrossTranslationUnitContext::ASTLoader::loadFromSource(
+ StringRef SourceFilePath) {
+
+ if (llvm::Error InitError = lazyInitInvocationList())
+ return std::move(InitError);
+ assert(InvocationList);
+
+ auto Invocation = InvocationList->find(SourceFilePath);
+ if (Invocation == InvocationList->end())
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_lookup_unsuccessful);
+
+ const InvocationListTy::mapped_type &InvocationCommand = Invocation->second;
+
+ SmallVector<const char *, 32> CommandLineArgs(InvocationCommand.size());
+ std::transform(InvocationCommand.begin(), InvocationCommand.end(),
+ CommandLineArgs.begin(),
+ [](auto &&CmdPart) { return CmdPart.c_str(); });
+
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts{&CI.getDiagnosticOpts()};
+ auto *DiagClient = new ForwardingDiagnosticConsumer{CI.getDiagnosticClient()};
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID{
+ CI.getDiagnostics().getDiagnosticIDs()};
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine{DiagID, &*DiagOpts, DiagClient});
+
+ return std::unique_ptr<ASTUnit>(ASTUnit::LoadFromCommandLine(
+ CommandLineArgs.begin(), (CommandLineArgs.end()),
+ CI.getPCHContainerOperations(), Diags,
+ CI.getHeaderSearchOpts().ResourceDir));
+}
+
+llvm::Expected<InvocationListTy>
+parseInvocationList(StringRef FileContent, llvm::sys::path::Style PathStyle) {
+ InvocationListTy InvocationList;
+
+ /// LLVM YAML parser is used to extract information from invocation list file.
+ llvm::SourceMgr SM;
+ llvm::yaml::Stream InvocationFile(FileContent, SM);
+
+ /// Only the first document is processed.
+ llvm::yaml::document_iterator FirstInvocationFile = InvocationFile.begin();
+
+ /// There has to be at least one document available.
+ if (FirstInvocationFile == InvocationFile.end())
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_empty);
+
+ llvm::yaml::Node *DocumentRoot = FirstInvocationFile->getRoot();
+ if (!DocumentRoot)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+
+ /// According to the format specified the document must be a mapping, where
+ /// the keys are paths to source files, and values are sequences of invocation
+ /// parts.
+ auto *Mappings = dyn_cast<llvm::yaml::MappingNode>(DocumentRoot);
+ if (!Mappings)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+
+ for (auto &NextMapping : *Mappings) {
+ /// The keys should be strings, which represent a source-file path.
+ auto *Key = dyn_cast<llvm::yaml::ScalarNode>(NextMapping.getKey());
+ if (!Key)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+
+ SmallVector<char, 32> ValueStorage;
+ StringRef SourcePath = Key->getValue(ValueStorage);
+
+ // Store paths with PathStyle directory separator.
+ SmallVector<char, 32> NativeSourcePath;
+ llvm::Twine{SourcePath}.toVector(NativeSourcePath);
+ llvm::sys::path::native(NativeSourcePath, PathStyle);
+
+ StringRef InvocationKey{NativeSourcePath.begin(), NativeSourcePath.size()};
+
+ if (InvocationList.find(InvocationKey) != InvocationList.end())
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_ambiguous);
+
+ /// The values should be sequences of strings, each representing a part of
+ /// the invocation.
+ auto *Args = dyn_cast<llvm::yaml::SequenceNode>(NextMapping.getValue());
+ if (!Args)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+
+ for (auto &Arg : *Args) {
+ auto *CmdString = dyn_cast<llvm::yaml::ScalarNode>(&Arg);
+ if (!CmdString)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+ /// Every conversion starts with an empty working storage, as it is not
+ /// clear if this is a requirement of the YAML parser.
+ ValueStorage.clear();
+ InvocationList[InvocationKey].emplace_back(
+ CmdString->getValue(ValueStorage));
+ }
+
+ if (InvocationList[InvocationKey].empty())
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_wrong_format);
+ }
+
+ return InvocationList;
+}
+
+llvm::Error CrossTranslationUnitContext::ASTLoader::lazyInitInvocationList() {
+ /// Lazily initialize the invocation list member used for on-demand parsing.
+ if (InvocationList)
+ return llvm::Error::success();
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileContent =
+ llvm::MemoryBuffer::getFile(InvocationListFilePath);
+ if (!FileContent)
+ return llvm::make_error<IndexError>(
+ index_error_code::invocation_list_file_not_found);
+ std::unique_ptr<llvm::MemoryBuffer> ContentBuffer = std::move(*FileContent);
+ assert(ContentBuffer && "If no error was produced after loading, the pointer "
+ "should not be nullptr.");
+
+ llvm::Expected<InvocationListTy> ExpectedInvocationList =
+ parseInvocationList(ContentBuffer->getBuffer(), PathStyle);
+
+ if (!ExpectedInvocationList)
+ return ExpectedInvocationList.takeError();
+
+ InvocationList = *ExpectedInvocationList;
+
+ return llvm::Error::success();
+}
+
template <typename T>
llvm::Expected<const T *>
CrossTranslationUnitContext::importDefinitionImpl(const T *D, ASTUnit *Unit) {
@@ -525,6 +721,9 @@ CrossTranslationUnitContext::importDefinitionImpl(const T *D, ASTUnit *Unit) {
assert(hasBodyOrInit(ToDecl) && "Imported Decl should have body or init.");
++NumGetCTUSuccess;
+ // Parent map is invalidated after changing the AST.
+ ToDecl->getASTContext().getParentMapContext().clear();
+
return ToDecl;
}
diff --git a/clang/lib/DirectoryWatcher/DirectoryScanner.cpp b/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
index ecfec52f459e..1bc286236a0e 100644
--- a/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
+++ b/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
@@ -51,4 +51,4 @@ getAsFileEvents(const std::vector<std::string> &Scan) {
return Events;
}
-} // namespace clang \ No newline at end of file
+} // namespace clang
diff --git a/clang/lib/DirectoryWatcher/DirectoryScanner.h b/clang/lib/DirectoryWatcher/DirectoryScanner.h
index 55731225e251..feb8b4ea861e 100644
--- a/clang/lib/DirectoryWatcher/DirectoryScanner.h
+++ b/clang/lib/DirectoryWatcher/DirectoryScanner.h
@@ -26,4 +26,4 @@ getAsFileEvents(const std::vector<std::string> &Scan);
/// \returns llvm::None if \p Path doesn't exist or can't get the status.
llvm::Optional<llvm::sys::fs::file_status> getFileStatus(llvm::StringRef Path);
-} // namespace clang \ No newline at end of file
+} // namespace clang
diff --git a/clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp b/clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp
index 200e540624a6..bc410822d7ae 100644
--- a/clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp
+++ b/clang/lib/DirectoryWatcher/default/DirectoryWatcher-not-implemented.cpp
@@ -18,4 +18,4 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
return llvm::make_error<llvm::StringError>(
"DirectoryWatcher is not implemented for this platform!",
llvm::inconvertibleErrorCode());
-} \ No newline at end of file
+}
diff --git a/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp b/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
index 7a60369a4da0..bdc389516289 100644
--- a/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
+++ b/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
@@ -14,10 +14,13 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/Path.h"
#include <CoreServices/CoreServices.h>
+#include <TargetConditionals.h>
using namespace llvm;
using namespace clang;
+#if TARGET_OS_OSX
+
static void stopFSEventStream(FSEventStreamRef);
namespace {
@@ -43,24 +46,32 @@ namespace {
class DirectoryWatcherMac : public clang::DirectoryWatcher {
public:
DirectoryWatcherMac(
- FSEventStreamRef EventStream,
+ dispatch_queue_t Queue, FSEventStreamRef EventStream,
std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)>
Receiver,
llvm::StringRef WatchedDirPath)
- : EventStream(EventStream), Receiver(Receiver),
+ : Queue(Queue), EventStream(EventStream), Receiver(Receiver),
WatchedDirPath(WatchedDirPath) {}
~DirectoryWatcherMac() override {
- stopFSEventStream(EventStream);
- EventStream = nullptr;
- // Now it's safe to use Receiver as the only other concurrent use would have
- // been in EventStream processing.
- Receiver(DirectoryWatcher::Event(
- DirectoryWatcher::Event::EventKind::WatcherGotInvalidated, ""),
- false);
+ // FSEventStreamStop and Invalidate must be called after Start and
+ // SetDispatchQueue to follow FSEvents API contract. The call to Receiver
+ // also uses Queue to not race with the initial scan.
+ dispatch_sync(Queue, ^{
+ stopFSEventStream(EventStream);
+ EventStream = nullptr;
+ Receiver(
+ DirectoryWatcher::Event(
+ DirectoryWatcher::Event::EventKind::WatcherGotInvalidated, ""),
+ false);
+ });
+
+ // Balance initial creation.
+ dispatch_release(Queue);
}
private:
+ dispatch_queue_t Queue;
FSEventStreamRef EventStream;
std::function<void(llvm::ArrayRef<Event>, bool)> Receiver;
const std::string WatchedDirPath;
@@ -173,7 +184,7 @@ FSEventStreamRef createFSEventStream(
if (::realpath(P.begin(), Buffer) != nullptr)
RealPath = Buffer;
else
- RealPath = Path;
+ RealPath = Path.str();
}
FSEventStreamContext Context;
@@ -217,11 +228,11 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
assert(EventStream && "EventStream expected to be non-null");
std::unique_ptr<DirectoryWatcher> Result =
- std::make_unique<DirectoryWatcherMac>(EventStream, Receiver, Path);
+ std::make_unique<DirectoryWatcherMac>(Queue, EventStream, Receiver, Path);
// We need to copy the data so the lifetime is ok after a const copy is made
// for the block.
- const std::string CopiedPath = Path;
+ const std::string CopiedPath = Path.str();
auto InitWork = ^{
// We need to start watching the directory before we start scanning in order
@@ -230,10 +241,6 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
// inital scan and handling events ONLY AFTER the scan finishes.
FSEventStreamSetDispatchQueue(EventStream, Queue);
FSEventStreamStart(EventStream);
- // We need to decrement the ref count for Queue as initialize() will return
- // and FSEvents has incremented it. Since we have to wait for FSEvents to
- // take ownership it's the easiest to do it here rather than main thread.
- dispatch_release(Queue);
Receiver(getAsFileEvents(scanDirectory(CopiedPath)), /*IsInitial=*/true);
};
@@ -245,3 +252,17 @@ llvm::Expected<std::unique_ptr<DirectoryWatcher>> clang::DirectoryWatcher::creat
return Result;
}
+
+#else // TARGET_OS_OSX
+
+llvm::Expected<std::unique_ptr<DirectoryWatcher>>
+clang::DirectoryWatcher::create(
+ StringRef Path,
+ std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)> Receiver,
+ bool WaitForInitialSync) {
+ return llvm::make_error<llvm::StringError>(
+ "DirectoryWatcher is not implemented for this platform!",
+ llvm::inconvertibleErrorCode());
+}
+
+#endif // TARGET_OS_OSX
diff --git a/clang/lib/Driver/Action.cpp b/clang/lib/Driver/Action.cpp
index 0eb4c7257e7a..2ec063d873be 100644
--- a/clang/lib/Driver/Action.cpp
+++ b/clang/lib/Driver/Action.cpp
@@ -43,6 +43,8 @@ const char *Action::getClassName(ActionClass AC) {
return "clang-offload-unbundler";
case OffloadWrapperJobClass:
return "clang-offload-wrapper";
+ case StaticLibJobClass:
+ return "static-lib-linker";
}
llvm_unreachable("invalid class");
@@ -415,3 +417,8 @@ void OffloadWrapperJobAction::anchor() {}
OffloadWrapperJobAction::OffloadWrapperJobAction(ActionList &Inputs,
types::ID Type)
: JobAction(OffloadWrapperJobClass, Inputs, Type) {}
+
+void StaticLibJobAction::anchor() {}
+
+StaticLibJobAction::StaticLibJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(StaticLibJobClass, Inputs, Type) {}
diff --git a/clang/lib/Driver/Compilation.cpp b/clang/lib/Driver/Compilation.cpp
index ba188f5c4083..05ee5091396b 100644
--- a/clang/lib/Driver/Compilation.cpp
+++ b/clang/lib/Driver/Compilation.cpp
@@ -76,16 +76,29 @@ Compilation::getArgsForToolChain(const ToolChain *TC, StringRef BoundArch,
*TranslatedArgs, SameTripleAsHost, AllocatedArgs);
}
+ DerivedArgList *NewDAL = nullptr;
if (!OpenMPArgs) {
+ NewDAL = TC->TranslateXarchArgs(*TranslatedArgs, BoundArch,
+ DeviceOffloadKind, &AllocatedArgs);
+ } else {
+ NewDAL = TC->TranslateXarchArgs(*OpenMPArgs, BoundArch, DeviceOffloadKind,
+ &AllocatedArgs);
+ if (!NewDAL)
+ NewDAL = OpenMPArgs;
+ else
+ delete OpenMPArgs;
+ }
+
+ if (!NewDAL) {
Entry = TC->TranslateArgs(*TranslatedArgs, BoundArch, DeviceOffloadKind);
if (!Entry)
Entry = TranslatedArgs;
} else {
- Entry = TC->TranslateArgs(*OpenMPArgs, BoundArch, DeviceOffloadKind);
+ Entry = TC->TranslateArgs(*NewDAL, BoundArch, DeviceOffloadKind);
if (!Entry)
- Entry = OpenMPArgs;
+ Entry = NewDAL;
else
- delete OpenMPArgs;
+ delete NewDAL;
}
// Add allocated arguments to the final DAL.
@@ -172,7 +185,7 @@ int Compilation::ExecuteCommand(const Command &C,
}
if (getDriver().CCPrintOptions)
- *OS << "[Logging clang options]";
+ *OS << "[Logging clang options]\n";
C.Print(*OS, "\n", /*Quote=*/getDriver().CCPrintOptions);
}
@@ -258,14 +271,23 @@ void Compilation::initCompilationForDiagnostics() {
// Remove any user specified output. Claim any unclaimed arguments, so as
// to avoid emitting warnings about unused args.
- OptSpecifier OutputOpts[] = { options::OPT_o, options::OPT_MD,
- options::OPT_MMD };
+ OptSpecifier OutputOpts[] = {
+ options::OPT_o, options::OPT_MD, options::OPT_MMD, options::OPT_M,
+ options::OPT_MM, options::OPT_MF, options::OPT_MG, options::OPT_MJ,
+ options::OPT_MQ, options::OPT_MT, options::OPT_MV};
for (unsigned i = 0, e = llvm::array_lengthof(OutputOpts); i != e; ++i) {
if (TranslatedArgs->hasArg(OutputOpts[i]))
TranslatedArgs->eraseArg(OutputOpts[i]);
}
TranslatedArgs->ClaimAllArgs();
+ // Force re-creation of the toolchain Args, otherwise our modifications just
+ // above will have no effect.
+ for (auto Arg : TCArgs)
+ if (Arg.second != TranslatedArgs)
+ delete Arg.second;
+ TCArgs.clear();
+
// Redirect stdout/stderr to /dev/null.
Redirects = {None, {""}, {""}};
diff --git a/clang/lib/Driver/Distro.cpp b/clang/lib/Driver/Distro.cpp
index 06707fefc9d0..4d58ad1ae78c 100644
--- a/clang/lib/Driver/Distro.cpp
+++ b/clang/lib/Driver/Distro.cpp
@@ -11,9 +11,10 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/ADT/Triple.h"
using namespace clang::driver;
using namespace clang;
@@ -70,6 +71,7 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS,
.Case("disco", Distro::UbuntuDisco)
.Case("eoan", Distro::UbuntuEoan)
.Case("focal", Distro::UbuntuFocal)
+ .Case("groovy", Distro::UbuntuGroovy)
.Default(Distro::UnknownDistro);
if (Version != Distro::UnknownDistro)
return Version;
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index e718b8366df0..ece8222dcf24 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -38,11 +38,12 @@
#include "ToolChains/NaCl.h"
#include "ToolChains/NetBSD.h"
#include "ToolChains/OpenBSD.h"
-#include "ToolChains/PS4CPU.h"
#include "ToolChains/PPCLinux.h"
+#include "ToolChains/PS4CPU.h"
#include "ToolChains/RISCVToolchain.h"
#include "ToolChains/Solaris.h"
#include "ToolChains/TCE.h"
+#include "ToolChains/VEToolchain.h"
#include "ToolChains/WebAssembly.h"
#include "ToolChains/XCore.h"
#include "clang/Basic/Version.h"
@@ -71,6 +72,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
@@ -99,7 +101,7 @@ std::string Driver::GetResourcesPath(StringRef BinaryPath,
// exact same string ("a/../b/" and "b/" get different hashes, for example).
// Dir is bin/ or lib/, depending on where BinaryPath is.
- std::string Dir = llvm::sys::path::parent_path(BinaryPath);
+ std::string Dir = std::string(llvm::sys::path::parent_path(BinaryPath));
SmallString<128> P(Dir);
if (CustomResourceDir != "") {
@@ -115,7 +117,7 @@ std::string Driver::GetResourcesPath(StringRef BinaryPath,
CLANG_VERSION_STRING);
}
- return P.str();
+ return std::string(P.str());
}
Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
@@ -131,15 +133,21 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
TargetTriple(TargetTriple), CCCGenericGCCName(""), Saver(Alloc),
CheckInputsExist(true), GenReproducer(false),
SuppressMissingInputWarning(false) {
-
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
this->VFS = llvm::vfs::getRealFileSystem();
- Name = llvm::sys::path::filename(ClangExecutable);
- Dir = llvm::sys::path::parent_path(ClangExecutable);
+ Name = std::string(llvm::sys::path::filename(ClangExecutable));
+ Dir = std::string(llvm::sys::path::parent_path(ClangExecutable));
InstalledDir = Dir; // Provide a sensible default installed dir.
+ if ((!SysRoot.empty()) && llvm::sys::path::is_relative(SysRoot)) {
+ // Prepend InstalledDir if SysRoot is relative
+ SmallString<128> P(InstalledDir);
+ llvm::sys::path::append(P, SysRoot);
+ SysRoot = std::string(P);
+ }
+
#if defined(CLANG_CONFIG_FILE_SYSTEM_DIR)
SystemConfigDir = CLANG_CONFIG_FILE_SYSTEM_DIR;
#endif
@@ -467,6 +475,26 @@ static llvm::Triple computeTargetTriple(const Driver &D,
Target.getOS() == llvm::Triple::Minix)
return Target;
+ // On AIX, the env OBJECT_MODE may affect the resulting arch variant.
+ if (Target.isOSAIX()) {
+ if (Optional<std::string> ObjectModeValue =
+ llvm::sys::Process::GetEnv("OBJECT_MODE")) {
+ StringRef ObjectMode = *ObjectModeValue;
+ llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
+
+ if (ObjectMode.equals("64")) {
+ AT = Target.get64BitArchVariant().getArch();
+ } else if (ObjectMode.equals("32")) {
+ AT = Target.get32BitArchVariant().getArch();
+ } else {
+ D.Diag(diag::err_drv_invalid_object_mode) << ObjectMode;
+ }
+
+ if (AT != llvm::Triple::UnknownArch && AT != Target.getArch())
+ Target.setArch(AT);
+ }
+ }
+
// Handle pseudo-target flags '-m64', '-mx32', '-m32' and '-m16'.
Arg *A = Args.getLastArg(options::OPT_m64, options::OPT_mx32,
options::OPT_m32, options::OPT_m16);
@@ -769,7 +797,7 @@ bool Driver::readConfigFile(StringRef FileName) {
// Read options from config file.
llvm::SmallString<128> CfgFileName(FileName);
llvm::sys::path::native(CfgFileName);
- ConfigFile = CfgFileName.str();
+ ConfigFile = std::string(CfgFileName.str());
bool ContainErrors;
CfgOptions = std::make_unique<InputArgList>(
ParseArgStrings(NewCfgArgs, IsCLMode(), ContainErrors));
@@ -826,8 +854,12 @@ bool Driver::loadConfigFile() {
std::vector<std::string> ConfigFiles =
CLOptions->getAllArgValues(options::OPT_config);
if (ConfigFiles.size() > 1) {
- Diag(diag::err_drv_duplicate_config);
- return true;
+ if (!std::all_of(
+ ConfigFiles.begin(), ConfigFiles.end(),
+ [ConfigFiles](std::string s) { return s == ConfigFiles[0]; })) {
+ Diag(diag::err_drv_duplicate_config);
+ return true;
+ }
}
if (!ConfigFiles.empty()) {
@@ -952,7 +984,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
while (!CompilerPath.empty()) {
std::pair<StringRef, StringRef> Split =
CompilerPath.split(llvm::sys::EnvPathSeparator);
- PrefixDirs.push_back(Split.first);
+ PrefixDirs.push_back(std::string(Split.first));
CompilerPath = Split.second;
}
}
@@ -1035,6 +1067,10 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// -no-canonical-prefixes is used very early in main.
Args.ClaimAllArgs(options::OPT_no_canonical_prefixes);
+ // f(no-)integated-cc1 is also used very early in main.
+ Args.ClaimAllArgs(options::OPT_fintegrated_cc1);
+ Args.ClaimAllArgs(options::OPT_fno_integrated_cc1);
+
// Ignore -pipe.
Args.ClaimAllArgs(options::OPT_pipe);
@@ -1152,7 +1188,7 @@ static void printArgList(raw_ostream &OS, const llvm::opt::ArgList &Args) {
for (auto I = ASL.begin(), E = ASL.end(); I != E; ++I) {
if (I != ASL.begin())
OS << ' ';
- Command::printArg(OS, *I, true);
+ llvm::sys::printArg(OS, *I, true);
}
OS << '\n';
}
@@ -1263,10 +1299,6 @@ void Driver::generateCompilationDiagnostics(
// Print the version of the compiler.
PrintVersion(C, llvm::errs());
- Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "PLEASE submit a bug report to " BUG_REPORT_URL " and include the "
- "crash backtrace, preprocessed source, and associated run script.";
-
// Suppress driver output and emit preprocessor output to temp file.
Mode = CPPMode;
CCGenDiagnostics = true;
@@ -1409,7 +1441,7 @@ void Driver::generateCompilationDiagnostics(
ScriptOS << "\n# Additional information: " << AdditionalInformation
<< "\n";
if (Report)
- Report->TemporaryFiles.push_back(Script.str());
+ Report->TemporaryFiles.push_back(std::string(Script.str()));
Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
}
@@ -1444,7 +1476,8 @@ void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
// capacity if the tool does not support response files, there is a chance/
// that things will just work without a response file, so we silently just
// skip it.
- if (Cmd.getCreator().getResponseFilesSupport() == Tool::RF_None ||
+ if (Cmd.getResponseFileSupport().ResponseKind ==
+ ResponseFileSupport::RF_None ||
llvm::sys::commandLineFitsWithinSystemLimits(Cmd.getExecutable(),
Cmd.getArguments()))
return;
@@ -1638,7 +1671,7 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
// this code.
for (StringRef S : DiagnosticIDs::getDiagnosticFlags())
if (S.startswith(Cur))
- SuggestedCompletions.push_back(S);
+ SuggestedCompletions.push_back(std::string(S));
}
// Sort the autocomplete candidates so that shells print them out in a
@@ -1808,6 +1841,11 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
+ if (C.getArgs().hasArg(options::OPT_print_targets)) {
+ llvm::TargetRegistry::printRegisteredTargetsForVersion(llvm::outs());
+ return false;
+ }
+
return true;
}
@@ -1845,6 +1883,7 @@ static unsigned PrintActions1(const Compilation &C, Action *A,
bool IsFirst = true;
OA->doOnEachDependence(
[&](Action *A, const ToolChain *TC, const char *BoundArch) {
+ assert(TC && "Unknown host toolchain");
// E.g. for two CUDA device dependences whose bound arch is sm_20 and
// sm_35 this will generate:
// "cuda-device" (nvptx64-nvidia-cuda:sm_20) {#ID}, "cuda-device"
@@ -1852,13 +1891,9 @@ static unsigned PrintActions1(const Compilation &C, Action *A,
if (!IsFirst)
os << ", ";
os << '"';
- if (TC)
- os << A->getOffloadingKindPrefix();
- else
- os << "host";
+ os << A->getOffloadingKindPrefix();
os << " (";
os << TC->getTriple().normalize();
-
if (BoundArch)
os << ":" << BoundArch;
os << ")";
@@ -2312,8 +2347,11 @@ class OffloadingActionBuilder final {
/// Append top level actions generated by the builder.
virtual void appendTopLevelActions(ActionList &AL) {}
- /// Append linker actions generated by the builder.
- virtual void appendLinkActions(ActionList &AL) {}
+ /// Append linker device actions generated by the builder.
+ virtual void appendLinkDeviceActions(ActionList &AL) {}
+
+ /// Append linker host action generated by the builder.
+ virtual Action* appendLinkHostActions(ActionList &AL) { return nullptr; }
/// Append linker actions generated by the builder.
virtual void appendLinkDependences(OffloadAction::DeviceDependences &DA) {}
@@ -2522,13 +2560,13 @@ class OffloadingActionBuilder final {
std::set<CudaArch> GpuArchs;
bool Error = false;
for (Arg *A : Args) {
- if (!(A->getOption().matches(options::OPT_cuda_gpu_arch_EQ) ||
- A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ)))
+ if (!(A->getOption().matches(options::OPT_offload_arch_EQ) ||
+ A->getOption().matches(options::OPT_no_offload_arch_EQ)))
continue;
A->claim();
const StringRef ArchStr = A->getValue();
- if (A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ) &&
+ if (A->getOption().matches(options::OPT_no_offload_arch_EQ) &&
ArchStr == "all") {
GpuArchs.clear();
continue;
@@ -2537,9 +2575,9 @@ class OffloadingActionBuilder final {
if (Arch == CudaArch::UNKNOWN) {
C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr;
Error = true;
- } else if (A->getOption().matches(options::OPT_cuda_gpu_arch_EQ))
+ } else if (A->getOption().matches(options::OPT_offload_arch_EQ))
GpuArchs.insert(Arch);
- else if (A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ))
+ else if (A->getOption().matches(options::OPT_no_offload_arch_EQ))
GpuArchs.erase(Arch);
else
llvm_unreachable("Unexpected option.");
@@ -2696,9 +2734,7 @@ class OffloadingActionBuilder final {
// backend and assemble phases to output LLVM IR. Except for generating
// non-relocatable device coee, where we generate fat binary for device
// code and pass to host in Backend phase.
- if (CudaDeviceActions.empty() ||
- (CurPhase == phases::Backend && Relocatable) ||
- CurPhase == phases::Assemble)
+ if (CudaDeviceActions.empty())
return ABRT_Success;
assert(((CurPhase == phases::Link && Relocatable) ||
@@ -2715,10 +2751,15 @@ class OffloadingActionBuilder final {
// a fat binary containing all the code objects for different GPU's.
// The fat binary is then an input to the host action.
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ auto BackendAction = C.getDriver().ConstructPhaseAction(
+ C, Args, phases::Backend, CudaDeviceActions[I],
+ AssociatedOffloadKind);
+ auto AssembleAction = C.getDriver().ConstructPhaseAction(
+ C, Args, phases::Assemble, BackendAction, AssociatedOffloadKind);
// Create a link action to link device IR with device library
// and generate ISA.
ActionList AL;
- AL.push_back(CudaDeviceActions[I]);
+ AL.push_back(AssembleAction);
CudaDeviceActions[I] =
C.MakeAction<LinkJobAction>(AL, types::TY_Image);
@@ -2780,17 +2821,45 @@ class OffloadingActionBuilder final {
: ABRT_Success;
}
- void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {
+ void appendLinkDeviceActions(ActionList &AL) override {
+ if (DeviceLinkerInputs.size() == 0)
+ return;
+
+ assert(DeviceLinkerInputs.size() == GpuArchList.size() &&
+ "Linker inputs and GPU arch list sizes do not match.");
+
// Append a new link action for each device.
unsigned I = 0;
for (auto &LI : DeviceLinkerInputs) {
+ // Each entry in DeviceLinkerInputs corresponds to a GPU arch.
auto *DeviceLinkAction =
C.MakeAction<LinkJobAction>(LI, types::TY_Image);
- DA.add(*DeviceLinkAction, *ToolChains[0],
- CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
+ // Linking all inputs for the current GPU arch.
+ // LI contains all the inputs for the linker.
+ OffloadAction::DeviceDependences DeviceLinkDeps;
+ DeviceLinkDeps.add(*DeviceLinkAction, *ToolChains[0],
+ CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
+ AL.push_back(C.MakeAction<OffloadAction>(DeviceLinkDeps,
+ DeviceLinkAction->getType()));
++I;
}
+ DeviceLinkerInputs.clear();
+
+ // Create a host object from all the device images by embedding them
+ // in a fat binary.
+ OffloadAction::DeviceDependences DDeps;
+ auto *TopDeviceLinkAction =
+ C.MakeAction<LinkJobAction>(AL, types::TY_Object);
+ DDeps.add(*TopDeviceLinkAction, *ToolChains[0],
+ nullptr, AssociatedOffloadKind);
+
+ // Offload the host object to the host linker.
+ AL.push_back(C.MakeAction<OffloadAction>(DDeps, TopDeviceLinkAction->getType()));
}
+
+ Action* appendLinkHostActions(ActionList &AL) override { return AL.back(); }
+
+ void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {}
};
/// OpenMP action builder. The host bitcode is passed to the device frontend
@@ -2918,7 +2987,7 @@ class OffloadingActionBuilder final {
OpenMPDeviceActions.clear();
}
- void appendLinkActions(ActionList &AL) override {
+ void appendLinkDeviceActions(ActionList &AL) override {
assert(ToolChains.size() == DeviceLinkerInputs.size() &&
"Toolchains and linker inputs sizes do not match.");
@@ -2937,6 +3006,14 @@ class OffloadingActionBuilder final {
DeviceLinkerInputs.clear();
}
+ Action* appendLinkHostActions(ActionList &AL) override {
+ // Create wrapper bitcode from the result of device link actions and compile
+ // it to an object which will be added to the host link command.
+ auto *BC = C.MakeAction<OffloadWrapperJobAction>(AL, types::TY_LLVM_BC);
+ auto *ASM = C.MakeAction<BackendJobAction>(BC, types::TY_PP_Asm);
+ return C.MakeAction<AssembleJobAction>(ASM, types::TY_Object);
+ }
+
void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {}
bool initialize() override {
@@ -3169,17 +3246,20 @@ public:
for (DeviceActionBuilder *SB : SpecializedBuilders) {
if (!SB->isValid())
continue;
- SB->appendLinkActions(DeviceAL);
+ SB->appendLinkDeviceActions(DeviceAL);
}
if (DeviceAL.empty())
return nullptr;
- // Create wrapper bitcode from the result of device link actions and compile
- // it to an object which will be added to the host link command.
- auto *BC = C.MakeAction<OffloadWrapperJobAction>(DeviceAL, types::TY_LLVM_BC);
- auto *ASM = C.MakeAction<BackendJobAction>(BC, types::TY_PP_Asm);
- return C.MakeAction<AssembleJobAction>(ASM, types::TY_Object);
+ // Let builders add host linking actions.
+ Action* HA;
+ for (DeviceActionBuilder *SB : SpecializedBuilders) {
+ if (!SB->isValid())
+ continue;
+ HA = SB->appendLinkHostActions(DeviceAL);
+ }
+ return HA;
}
/// Processes the host linker action. This currently consists of replacing it
@@ -3267,8 +3347,7 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
types::ID InputType = I.first;
const Arg *InputArg = I.second;
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
- types::getCompilationPhases(InputType, PL);
+ auto PL = types::getCompilationPhases(InputType);
LastPLSize = PL.size();
// If the first step comes after the final phase we are doing as part of
@@ -3313,11 +3392,9 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
// Add a separate precompile phase for the compile phase.
if (FinalPhase >= phases::Compile) {
const types::ID HeaderType = lookupHeaderTypeForSourceType(InputType);
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PCHPL;
- types::getCompilationPhases(HeaderType, PCHPL);
// Build the pipeline for the pch file.
Action *ClangClPch = C.MakeAction<InputAction>(*InputArg, HeaderType);
- for (phases::ID Phase : PCHPL)
+ for (phases::ID Phase : types::getCompilationPhases(HeaderType))
ClangClPch = ConstructPhaseAction(C, Args, Phase, ClangClPch);
assert(ClangClPch);
Actions.push_back(ClangClPch);
@@ -3400,13 +3477,11 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
types::ID InputType = I.first;
const Arg *InputArg = I.second;
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
- types::getCompilationPhases(*this, Args, InputType, PL);
+ auto PL = types::getCompilationPhases(*this, Args, InputType);
if (PL.empty())
continue;
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> FullPL;
- types::getCompilationPhases(InputType, FullPL);
+ auto FullPL = types::getCompilationPhases(InputType);
// Build the pipeline for this file.
Action *Current = C.MakeAction<InputAction>(*InputArg, InputType);
@@ -3489,7 +3564,13 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
if (!LinkerInputs.empty()) {
if (Action *Wrapper = OffloadBuilder.makeHostLinkAction())
LinkerInputs.push_back(Wrapper);
- Action *LA = C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image);
+ Action *LA;
+ // Check if this Linker Job should emit a static library.
+ if (ShouldEmitStaticLibrary(Args)) {
+ LA = C.MakeAction<StaticLibJobAction>(LinkerInputs, types::TY_Image);
+ } else {
+ LA = C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image);
+ }
LA = OffloadBuilder.processHostLinkAction(LA);
Actions.push_back(LA);
}
@@ -3500,15 +3581,9 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
C.MakeAction<IfsMergeJobAction>(MergerInputs, types::TY_Image));
if (Args.hasArg(options::OPT_emit_interface_stubs)) {
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PhaseList;
- if (Args.hasArg(options::OPT_c)) {
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> CompilePhaseList;
- types::getCompilationPhases(types::TY_IFS_CPP, CompilePhaseList);
- llvm::copy_if(CompilePhaseList, std::back_inserter(PhaseList),
- [&](phases::ID Phase) { return Phase <= phases::Compile; });
- } else {
- types::getCompilationPhases(types::TY_IFS_CPP, PhaseList);
- }
+ auto PhaseList = types::getCompilationPhases(
+ types::TY_IFS_CPP,
+ Args.hasArg(options::OPT_c) ? phases::Compile : phases::LastPhase);
ActionList MergerInputs;
@@ -3670,7 +3745,10 @@ Action *Driver::ConstructPhaseAction(
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
- if (Args.hasArg(options::OPT_emit_llvm)) {
+ if (Args.hasArg(options::OPT_emit_llvm) ||
+ (TargetDeviceOffloadKind == Action::OFK_HIP &&
+ Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false))) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LLVM_IR : types::TY_LLVM_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
@@ -3753,6 +3831,11 @@ void Driver::BuildJobs(Compilation &C) const {
/*TargetDeviceOffloadKind*/ Action::OFK_None);
}
+ // If we have more than one job, then disable integrated-cc1 for now.
+ if (C.getJobs().size() > 1)
+ for (auto &J : C.getJobs())
+ J.InProcess = false;
+
// If the user passed -Qunused-arguments or there were errors, don't warn
// about any unused arguments.
if (Diags.hasErrorOccurred() ||
@@ -4585,8 +4668,19 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
// When using both -save-temps and -emit-llvm, use a ".tmp.bc" suffix for
// the unoptimized bitcode so that it does not get overwritten by the ".bc"
// optimized bitcode output.
- if (!AtTopLevel && C.getArgs().hasArg(options::OPT_emit_llvm) &&
- JA.getType() == types::TY_LLVM_BC)
+ auto IsHIPRDCInCompilePhase = [](const JobAction &JA,
+ const llvm::opt::DerivedArgList &Args) {
+ // The relocatable compilation in HIP implies -emit-llvm. Similarly, use a
+ // ".tmp.bc" suffix for the unoptimized bitcode (generated in the compile
+ // phase.)
+ return isa<CompileJobAction>(JA) &&
+ JA.getOffloadingDeviceKind() == Action::OFK_HIP &&
+ Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false);
+ };
+ if (!AtTopLevel && JA.getType() == types::TY_LLVM_BC &&
+ (C.getArgs().hasArg(options::OPT_emit_llvm) ||
+ IsHIPRDCInCompilePhase(JA, C.getArgs())))
Suffixed += ".tmp";
Suffixed += '.';
Suffixed += Suffix;
@@ -4647,7 +4741,7 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
llvm::sys::path::append(P, Name);
if (llvm::sys::fs::exists(Twine(P)))
- return P.str().str();
+ return std::string(P);
}
return None;
};
@@ -4658,17 +4752,17 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
SmallString<128> R(ResourceDir);
llvm::sys::path::append(R, Name);
if (llvm::sys::fs::exists(Twine(R)))
- return R.str();
+ return std::string(R.str());
SmallString<128> P(TC.getCompilerRTPath());
llvm::sys::path::append(P, Name);
if (llvm::sys::fs::exists(Twine(P)))
- return P.str();
+ return std::string(P.str());
SmallString<128> D(Dir);
llvm::sys::path::append(D, "..", Name);
if (llvm::sys::fs::exists(Twine(D)))
- return D.str();
+ return std::string(D.str());
if (auto P = SearchPaths(TC.getLibraryPaths()))
return *P;
@@ -4676,7 +4770,7 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
if (auto P = SearchPaths(TC.getFilePaths()))
return *P;
- return Name;
+ return std::string(Name);
}
void Driver::generatePrefixedToolNames(
@@ -4693,13 +4787,11 @@ void Driver::generatePrefixedToolNames(
}
static bool ScanDirForExecutable(SmallString<128> &Dir,
- ArrayRef<std::string> Names) {
- for (const auto &Name : Names) {
- llvm::sys::path::append(Dir, Name);
- if (llvm::sys::fs::can_execute(Twine(Dir)))
- return true;
- llvm::sys::path::remove_filename(Dir);
- }
+ const std::string &Name) {
+ llvm::sys::path::append(Dir, Name);
+ if (llvm::sys::fs::can_execute(Twine(Dir)))
+ return true;
+ llvm::sys::path::remove_filename(Dir);
return false;
}
@@ -4712,29 +4804,38 @@ std::string Driver::GetProgramPath(StringRef Name, const ToolChain &TC) const {
for (const auto &PrefixDir : PrefixDirs) {
if (llvm::sys::fs::is_directory(PrefixDir)) {
SmallString<128> P(PrefixDir);
- if (ScanDirForExecutable(P, TargetSpecificExecutables))
- return P.str();
+ for (const auto &TargetSpecificExecutable : TargetSpecificExecutables)
+ if (ScanDirForExecutable(P, TargetSpecificExecutable))
+ return std::string(P.str());
} else {
SmallString<128> P((PrefixDir + Name).str());
if (llvm::sys::fs::can_execute(Twine(P)))
- return P.str();
+ return std::string(P.str());
}
}
const ToolChain::path_list &List = TC.getProgramPaths();
- for (const auto &Path : List) {
- SmallString<128> P(Path);
- if (ScanDirForExecutable(P, TargetSpecificExecutables))
- return P.str();
- }
+ for (const auto &TargetSpecificExecutable : TargetSpecificExecutables) {
+ // For each possible name of the tool look for it in
+ // program paths first, then the path.
+ // Higher priority names will be first, meaning that
+ // a higher priority name in the path will be found
+ // instead of a lower priority name in the program path.
+ // E.g. <triple>-gcc on the path will be found instead
+ // of gcc in the program path
+ for (const auto &Path : List) {
+ SmallString<128> P(Path);
+ if (ScanDirForExecutable(P, TargetSpecificExecutable))
+ return std::string(P.str());
+ }
- // If all else failed, search the path.
- for (const auto &TargetSpecificExecutable : TargetSpecificExecutables)
+ // Fall back to the path
if (llvm::ErrorOr<std::string> P =
llvm::sys::findProgramByName(TargetSpecificExecutable))
return *P;
+ }
- return Name;
+ return std::string(Name);
}
std::string Driver::GetTemporaryPath(StringRef Prefix, StringRef Suffix) const {
@@ -4745,7 +4846,7 @@ std::string Driver::GetTemporaryPath(StringRef Prefix, StringRef Suffix) const {
return "";
}
- return Path.str();
+ return std::string(Path.str());
}
std::string Driver::GetTemporaryDirectory(StringRef Prefix) const {
@@ -4756,7 +4857,7 @@ std::string Driver::GetTemporaryDirectory(StringRef Prefix) const {
return "";
}
- return Path.str();
+ return std::string(Path.str());
}
std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
@@ -4778,7 +4879,7 @@ std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
Output = BaseName;
llvm::sys::path::replace_extension(Output, ".pch");
}
- return Output.str();
+ return std::string(Output.str());
}
const ToolChain &Driver::getToolChain(const ArgList &Args,
@@ -4835,6 +4936,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
Target.getArch() == llvm::Triple::ppc64le)
TC = std::make_unique<toolchains::PPCLinuxToolChain>(*this, Target,
Args);
+ else if (Target.getArch() == llvm::Triple::ve)
+ TC = std::make_unique<toolchains::VEToolChain>(*this, Target, Args);
+
else
TC = std::make_unique<toolchains::Linux>(*this, Target, Args);
break;
@@ -4848,6 +4952,8 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
TC = std::make_unique<toolchains::Solaris>(*this, Target, Args);
break;
case llvm::Triple::AMDHSA:
+ TC = std::make_unique<toolchains::ROCMToolChain>(*this, Target, Args);
+ break;
case llvm::Triple::AMDPAL:
case llvm::Triple::Mesa3D:
TC = std::make_unique<toolchains::AMDGPUToolChain>(*this, Target, Args);
@@ -4925,6 +5031,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::riscv64:
TC = std::make_unique<toolchains::RISCVToolChain>(*this, Target, Args);
break;
+ case llvm::Triple::ve:
+ TC = std::make_unique<toolchains::VEToolChain>(*this, Target, Args);
+ break;
default:
if (Target.getVendor() == llvm::Triple::Myriad)
TC = std::make_unique<toolchains::MyriadToolChain>(*this, Target,
@@ -4976,6 +5085,13 @@ bool Driver::ShouldUseFlangCompiler(const JobAction &JA) const {
return true;
}
+bool Driver::ShouldEmitStaticLibrary(const ArgList &Args) const {
+ // Only emit static library if the flag is set explicitly.
+ if (Args.hasArg(options::OPT_emit_static_lib))
+ return true;
+ return false;
+}
+
/// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and return the
/// grouped values as integers. Numbers which are not provided are set to 0.
///
diff --git a/clang/lib/Driver/Job.cpp b/clang/lib/Driver/Job.cpp
index d57c3a1cdbb8..4808a9f4628d 100644
--- a/clang/lib/Driver/Job.cpp
+++ b/clang/lib/Driver/Job.cpp
@@ -36,11 +36,11 @@ using namespace clang;
using namespace driver;
Command::Command(const Action &Source, const Tool &Creator,
- const char *Executable,
+ ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments,
ArrayRef<InputInfo> Inputs)
- : Source(Source), Creator(Creator), Executable(Executable),
- Arguments(Arguments) {
+ : Source(Source), Creator(Creator), ResponseSupport(ResponseSupport),
+ Executable(Executable), Arguments(Arguments) {
for (const auto &II : Inputs)
if (II.isFilename())
InputFilenames.push_back(II.getFilename());
@@ -100,27 +100,9 @@ static bool skipArgs(const char *Flag, bool HaveCrashVFS, int &SkipNum,
return false;
}
-void Command::printArg(raw_ostream &OS, StringRef Arg, bool Quote) {
- const bool Escape = Arg.find_first_of(" \"\\$") != StringRef::npos;
-
- if (!Quote && !Escape) {
- OS << Arg;
- return;
- }
-
- // Quote and escape. This isn't really complete, but good enough.
- OS << '"';
- for (const auto c : Arg) {
- if (c == '"' || c == '\\' || c == '$')
- OS << '\\';
- OS << c;
- }
- OS << '"';
-}
-
void Command::writeResponseFile(raw_ostream &OS) const {
// In a file list, we only write the set of inputs to the response file
- if (Creator.getResponseFilesSupport() == Tool::RF_FileList) {
+ if (ResponseSupport.ResponseKind == ResponseFileSupport::RF_FileList) {
for (const auto *Arg : InputFileList) {
OS << Arg << '\n';
}
@@ -149,7 +131,7 @@ void Command::buildArgvForResponseFile(
// When not a file list, all arguments are sent to the response file.
// This leaves us to set the argv to a single parameter, requesting the tool
// to read the response file.
- if (Creator.getResponseFilesSupport() != Tool::RF_FileList) {
+ if (ResponseSupport.ResponseKind != ResponseFileSupport::RF_FileList) {
Out.push_back(Executable);
Out.push_back(ResponseFileFlag.c_str());
return;
@@ -167,7 +149,7 @@ void Command::buildArgvForResponseFile(
Out.push_back(Arg);
} else if (FirstInput) {
FirstInput = false;
- Out.push_back(Creator.getResponseFileFlag());
+ Out.push_back(ResponseSupport.ResponseFlag);
Out.push_back(ResponseFile);
}
}
@@ -217,7 +199,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
CrashReportInfo *CrashInfo) const {
// Always quote the exe.
OS << ' ';
- printArg(OS, Executable, /*Quote=*/true);
+ llvm::sys::printArg(OS, Executable, /*Quote=*/true);
ArrayRef<const char *> Args = Arguments;
SmallVector<const char *, 128> ArgsRespFile;
@@ -245,7 +227,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
if (!NewIncFlags.empty()) {
for (auto &F : NewIncFlags) {
OS << ' ';
- printArg(OS, F.c_str(), Quote);
+ llvm::sys::printArg(OS, F.c_str(), Quote);
}
i += NumArgs - 1;
continue;
@@ -259,20 +241,20 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
// Replace the input file name with the crashinfo's file name.
OS << ' ';
StringRef ShortName = llvm::sys::path::filename(CrashInfo->Filename);
- printArg(OS, ShortName.str(), Quote);
+ llvm::sys::printArg(OS, ShortName.str(), Quote);
continue;
}
}
OS << ' ';
- printArg(OS, Arg, Quote);
+ llvm::sys::printArg(OS, Arg, Quote);
}
if (CrashInfo && HaveCrashVFS) {
OS << ' ';
- printArg(OS, "-ivfsoverlay", Quote);
+ llvm::sys::printArg(OS, "-ivfsoverlay", Quote);
OS << ' ';
- printArg(OS, CrashInfo->VFSPath.str(), Quote);
+ llvm::sys::printArg(OS, CrashInfo->VFSPath.str(), Quote);
// The leftover modules from the crash are stored in
// <name>.cache/vfs/modules
@@ -287,7 +269,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
ModCachePath.append(RelModCacheDir.c_str());
OS << ' ';
- printArg(OS, ModCachePath, Quote);
+ llvm::sys::printArg(OS, ModCachePath, Quote);
}
if (ResponseFile != nullptr) {
@@ -295,7 +277,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
writeResponseFile(OS);
// Avoiding duplicated newline terminator, since FileLists are
// newline-separated.
- if (Creator.getResponseFilesSupport() != Tool::RF_FileList)
+ if (ResponseSupport.ResponseKind != ResponseFileSupport::RF_FileList)
OS << "\n";
OS << " (end of response file)";
}
@@ -305,7 +287,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
void Command::setResponseFile(const char *FileName) {
ResponseFile = FileName;
- ResponseFileFlag = Creator.getResponseFileFlag();
+ ResponseFileFlag = ResponseSupport.ResponseFlag;
ResponseFileFlag += FileName;
}
@@ -345,7 +327,7 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
// Save the response file in the appropriate encoding
if (std::error_code EC = writeFileWithEncoding(
- ResponseFile, RespContents, Creator.getResponseFileEncoding())) {
+ ResponseFile, RespContents, ResponseSupport.ResponseEncoding)) {
if (ErrMsg)
*ErrMsg = EC.message();
if (ExecutionFailed)
@@ -371,14 +353,30 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
/*memoryLimit*/ 0, ErrMsg, ExecutionFailed);
}
+CC1Command::CC1Command(const Action &Source, const Tool &Creator,
+ ResponseFileSupport ResponseSupport,
+ const char *Executable,
+ const llvm::opt::ArgStringList &Arguments,
+ ArrayRef<InputInfo> Inputs)
+ : Command(Source, Creator, ResponseSupport, Executable, Arguments, Inputs) {
+ InProcess = true;
+}
+
void CC1Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
CrashReportInfo *CrashInfo) const {
- OS << " (in-process)";
+ if (InProcess)
+ OS << " (in-process)\n";
Command::Print(OS, Terminator, Quote, CrashInfo);
}
-int CC1Command::Execute(ArrayRef<llvm::Optional<StringRef>> /*Redirects*/,
+int CC1Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
std::string *ErrMsg, bool *ExecutionFailed) const {
+ // FIXME: Currently, if there're more than one job, we disable
+ // -fintegrate-cc1. If we're no longer a integrated-cc1 job, fallback to
+ // out-of-process execution. See discussion in https://reviews.llvm.org/D74447
+ if (!InProcess)
+ return Command::Execute(Redirects, ErrMsg, ExecutionFailed);
+
PrintFileNames();
SmallVector<const char *, 128> Argv;
@@ -413,11 +411,13 @@ void CC1Command::setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) {
}
FallbackCommand::FallbackCommand(const Action &Source_, const Tool &Creator_,
+ ResponseFileSupport ResponseSupport,
const char *Executable_,
const llvm::opt::ArgStringList &Arguments_,
ArrayRef<InputInfo> Inputs,
std::unique_ptr<Command> Fallback_)
- : Command(Source_, Creator_, Executable_, Arguments_, Inputs),
+ : Command(Source_, Creator_, ResponseSupport, Executable_, Arguments_,
+ Inputs),
Fallback(std::move(Fallback_)) {}
void FallbackCommand::Print(raw_ostream &OS, const char *Terminator,
@@ -454,9 +454,11 @@ int FallbackCommand::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
}
ForceSuccessCommand::ForceSuccessCommand(
- const Action &Source_, const Tool &Creator_, const char *Executable_,
+ const Action &Source_, const Tool &Creator_,
+ ResponseFileSupport ResponseSupport, const char *Executable_,
const llvm::opt::ArgStringList &Arguments_, ArrayRef<InputInfo> Inputs)
- : Command(Source_, Creator_, Executable_, Arguments_, Inputs) {}
+ : Command(Source_, Creator_, ResponseSupport, Executable_, Arguments_,
+ Inputs) {}
void ForceSuccessCommand::Print(raw_ostream &OS, const char *Terminator,
bool Quote, CrashReportInfo *CrashInfo) const {
diff --git a/clang/lib/Driver/Multilib.cpp b/clang/lib/Driver/Multilib.cpp
index 303047e05f78..5dd55553bcb5 100644
--- a/clang/lib/Driver/Multilib.cpp
+++ b/clang/lib/Driver/Multilib.cpp
@@ -46,7 +46,7 @@ static void normalizePathSegment(std::string &Segment) {
if (seg.front() != '/') {
Segment = "/" + seg.str();
} else {
- Segment = seg;
+ Segment = std::string(seg);
}
}
@@ -60,19 +60,19 @@ Multilib::Multilib(StringRef GCCSuffix, StringRef OSSuffix,
}
Multilib &Multilib::gccSuffix(StringRef S) {
- GCCSuffix = S;
+ GCCSuffix = std::string(S);
normalizePathSegment(GCCSuffix);
return *this;
}
Multilib &Multilib::osSuffix(StringRef S) {
- OSSuffix = S;
+ OSSuffix = std::string(S);
normalizePathSegment(OSSuffix);
return *this;
}
Multilib &Multilib::includeSuffix(StringRef S) {
- IncludeSuffix = S;
+ IncludeSuffix = std::string(S);
normalizePathSegment(IncludeSuffix);
return *this;
}
diff --git a/clang/lib/Driver/SanitizerArgs.cpp b/clang/lib/Driver/SanitizerArgs.cpp
index ac9a294ee3fa..bcc9ffc7ff8f 100644
--- a/clang/lib/Driver/SanitizerArgs.cpp
+++ b/clang/lib/Driver/SanitizerArgs.cpp
@@ -14,10 +14,10 @@
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SpecialCaseList.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <memory>
using namespace clang;
@@ -27,7 +27,8 @@ using namespace llvm::opt;
static const SanitizerMask NeedsUbsanRt =
SanitizerKind::Undefined | SanitizerKind::Integer |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
- SanitizerKind::CFI | SanitizerKind::FloatDivideByZero;
+ SanitizerKind::CFI | SanitizerKind::FloatDivideByZero |
+ SanitizerKind::ObjCCast;
static const SanitizerMask NeedsUbsanCxxRt =
SanitizerKind::Vptr | SanitizerKind::CFI;
static const SanitizerMask NotAllowedWithTrap = SanitizerKind::Vptr;
@@ -43,51 +44,53 @@ static const SanitizerMask SupportsCoverage =
SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress |
SanitizerKind::MemTag | SanitizerKind::Memory |
SanitizerKind::KernelMemory | SanitizerKind::Leak |
- SanitizerKind::Undefined | SanitizerKind::Integer |
+ SanitizerKind::Undefined | SanitizerKind::Integer | SanitizerKind::Bounds |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
SanitizerKind::DataFlow | SanitizerKind::Fuzzer |
SanitizerKind::FuzzerNoLink | SanitizerKind::FloatDivideByZero |
- SanitizerKind::SafeStack | SanitizerKind::ShadowCallStack;
+ SanitizerKind::SafeStack | SanitizerKind::ShadowCallStack |
+ SanitizerKind::Thread | SanitizerKind::ObjCCast;
static const SanitizerMask RecoverableByDefault =
SanitizerKind::Undefined | SanitizerKind::Integer |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
- SanitizerKind::FloatDivideByZero;
+ SanitizerKind::FloatDivideByZero | SanitizerKind::ObjCCast;
static const SanitizerMask Unrecoverable =
SanitizerKind::Unreachable | SanitizerKind::Return;
static const SanitizerMask AlwaysRecoverable =
SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress;
-static const SanitizerMask LegacyFsanitizeRecoverMask =
- SanitizerKind::Undefined | SanitizerKind::Integer;
static const SanitizerMask NeedsLTO = SanitizerKind::CFI;
static const SanitizerMask TrappingSupported =
(SanitizerKind::Undefined & ~SanitizerKind::Vptr) |
SanitizerKind::UnsignedIntegerOverflow | SanitizerKind::ImplicitConversion |
SanitizerKind::Nullability | SanitizerKind::LocalBounds |
- SanitizerKind::CFI | SanitizerKind::FloatDivideByZero;
+ SanitizerKind::CFI | SanitizerKind::FloatDivideByZero |
+ SanitizerKind::ObjCCast;
static const SanitizerMask TrappingDefault = SanitizerKind::CFI;
static const SanitizerMask CFIClasses =
SanitizerKind::CFIVCall | SanitizerKind::CFINVCall |
SanitizerKind::CFIMFCall | SanitizerKind::CFIDerivedCast |
SanitizerKind::CFIUnrelatedCast;
static const SanitizerMask CompatibleWithMinimalRuntime =
- TrappingSupported | SanitizerKind::Scudo | SanitizerKind::ShadowCallStack;
+ TrappingSupported | SanitizerKind::Scudo | SanitizerKind::ShadowCallStack |
+ SanitizerKind::MemTag;
enum CoverageFeature {
CoverageFunc = 1 << 0,
CoverageBB = 1 << 1,
CoverageEdge = 1 << 2,
CoverageIndirCall = 1 << 3,
- CoverageTraceBB = 1 << 4, // Deprecated.
+ CoverageTraceBB = 1 << 4, // Deprecated.
CoverageTraceCmp = 1 << 5,
CoverageTraceDiv = 1 << 6,
CoverageTraceGep = 1 << 7,
- Coverage8bitCounters = 1 << 8, // Deprecated.
+ Coverage8bitCounters = 1 << 8, // Deprecated.
CoverageTracePC = 1 << 9,
CoverageTracePCGuard = 1 << 10,
CoverageNoPrune = 1 << 11,
CoverageInline8bitCounters = 1 << 12,
CoveragePCTable = 1 << 13,
CoverageStackDepth = 1 << 14,
+ CoverageInlineBoolFlag = 1 << 15,
};
/// Parse a -fsanitize= or -fno-sanitize= argument's values, diagnosing any
@@ -118,6 +121,19 @@ static std::string describeSanitizeArg(const llvm::opt::Arg *A,
/// Sanitizers set.
static std::string toString(const clang::SanitizerSet &Sanitizers);
+static void validateSpecialCaseListFormat(const Driver &D,
+ std::vector<std::string> &SCLFiles,
+ unsigned MalformedSCLErrorDiagID) {
+ if (SCLFiles.empty())
+ return;
+
+ std::string BLError;
+ std::unique_ptr<llvm::SpecialCaseList> SCL(
+ llvm::SpecialCaseList::create(SCLFiles, D.getVFS(), BLError));
+ if (!SCL.get())
+ D.Diag(MalformedSCLErrorDiagID) << BLError;
+}
+
static void addDefaultBlacklists(const Driver &D, SanitizerMask Kinds,
std::vector<std::string> &BlacklistFiles) {
struct Blacklist {
@@ -142,12 +158,41 @@ static void addDefaultBlacklists(const Driver &D, SanitizerMask Kinds,
clang::SmallString<64> Path(D.ResourceDir);
llvm::sys::path::append(Path, "share", BL.File);
if (D.getVFS().exists(Path))
- BlacklistFiles.push_back(Path.str());
+ BlacklistFiles.push_back(std::string(Path.str()));
else if (BL.Mask == SanitizerKind::CFI)
// If cfi_blacklist.txt cannot be found in the resource dir, driver
// should fail.
D.Diag(clang::diag::err_drv_no_such_file) << Path;
}
+ validateSpecialCaseListFormat(
+ D, BlacklistFiles, clang::diag::err_drv_malformed_sanitizer_blacklist);
+}
+
+/// Parse -f(no-)?sanitize-(coverage-)?(white|black)list argument's values,
+/// diagnosing any invalid file paths and validating special case list format.
+static void parseSpecialCaseListArg(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ std::vector<std::string> &SCLFiles,
+ llvm::opt::OptSpecifier SCLOptionID,
+ llvm::opt::OptSpecifier NoSCLOptionID,
+ unsigned MalformedSCLErrorDiagID) {
+ for (const auto *Arg : Args) {
+ // Match -fsanitize-(coverage-)?(white|black)list.
+ if (Arg->getOption().matches(SCLOptionID)) {
+ Arg->claim();
+ std::string SCLPath = Arg->getValue();
+ if (D.getVFS().exists(SCLPath)) {
+ SCLFiles.push_back(SCLPath);
+ } else {
+ D.Diag(clang::diag::err_drv_no_such_file) << SCLPath;
+ }
+ // Match -fno-sanitize-blacklist.
+ } else if (Arg->getOption().matches(NoSCLOptionID)) {
+ Arg->claim();
+ SCLFiles.clear();
+ }
+ }
+ validateSpecialCaseListFormat(D, SCLFiles, MalformedSCLErrorDiagID);
}
/// Sets group bits for every group that has at least one representative already
@@ -186,16 +231,6 @@ static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_trap_EQ)) {
Arg->claim();
TrapRemove |= expandSanitizerGroups(parseArgValues(D, Arg, true));
- } else if (Arg->getOption().matches(
- options::OPT_fsanitize_undefined_trap_on_error)) {
- Arg->claim();
- TrappingKinds |=
- expandSanitizerGroups(SanitizerKind::UndefinedGroup & ~TrapRemove) &
- ~TrapRemove;
- } else if (Arg->getOption().matches(
- options::OPT_fno_sanitize_undefined_trap_on_error)) {
- Arg->claim();
- TrapRemove |= expandSanitizerGroups(SanitizerKind::UndefinedGroup);
}
}
@@ -412,9 +447,11 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
SanitizerKind::Leak | SanitizerKind::Thread |
SanitizerKind::Memory | SanitizerKind::KernelAddress),
std::make_pair(SanitizerKind::SafeStack,
- SanitizerKind::Address | SanitizerKind::HWAddress |
- SanitizerKind::Leak | SanitizerKind::Thread |
- SanitizerKind::Memory | SanitizerKind::KernelAddress),
+ (TC.getTriple().isOSFuchsia() ? SanitizerMask()
+ : SanitizerKind::Leak) |
+ SanitizerKind::Address | SanitizerKind::HWAddress |
+ SanitizerKind::Thread | SanitizerKind::Memory |
+ SanitizerKind::KernelAddress),
std::make_pair(SanitizerKind::KernelHWAddress,
SanitizerKind::Address | SanitizerKind::HWAddress |
SanitizerKind::Leak | SanitizerKind::Thread |
@@ -454,8 +491,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< lastArgumentForMask(D, Args, Kinds & NeedsLTO) << "-flto";
}
- if ((Kinds & SanitizerKind::ShadowCallStack) &&
- TC.getTriple().getArch() == llvm::Triple::aarch64 &&
+ if ((Kinds & SanitizerKind::ShadowCallStack) && TC.getTriple().isAArch64() &&
!llvm::AArch64::isX18ReservedByDefault(TC.getTriple()) &&
!Args.hasArg(options::OPT_ffixed_x18)) {
D.Diag(diag::err_drv_argument_only_allowed_with)
@@ -504,18 +540,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
SanitizerMask DiagnosedUnrecoverableKinds;
SanitizerMask DiagnosedAlwaysRecoverableKinds;
for (const auto *Arg : Args) {
- const char *DeprecatedReplacement = nullptr;
- if (Arg->getOption().matches(options::OPT_fsanitize_recover)) {
- DeprecatedReplacement =
- "-fsanitize-recover=undefined,integer' or '-fsanitize-recover=all";
- RecoverableKinds |= expandSanitizerGroups(LegacyFsanitizeRecoverMask);
- Arg->claim();
- } else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover)) {
- DeprecatedReplacement = "-fno-sanitize-recover=undefined,integer' or "
- "'-fno-sanitize-recover=all";
- RecoverableKinds &= ~expandSanitizerGroups(LegacyFsanitizeRecoverMask);
- Arg->claim();
- } else if (Arg->getOption().matches(options::OPT_fsanitize_recover_EQ)) {
+ if (Arg->getOption().matches(options::OPT_fsanitize_recover_EQ)) {
SanitizerMask Add = parseArgValues(D, Arg, true);
// Report error if user explicitly tries to recover from unrecoverable
// sanitizer.
@@ -544,10 +569,6 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
RecoverableKinds &= ~expandSanitizerGroups(Remove);
Arg->claim();
}
- if (DeprecatedReplacement) {
- D.Diag(diag::warn_drv_deprecated_arg) << Arg->getAsString(Args)
- << DeprecatedReplacement;
- }
}
RecoverableKinds &= Kinds;
RecoverableKinds &= ~Unrecoverable;
@@ -556,39 +577,17 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
RecoverableKinds &= ~TrappingKinds;
// Setup blacklist files.
- // Add default blacklist from resource directory.
- addDefaultBlacklists(D, Kinds, SystemBlacklistFiles);
- // Parse -f(no-)sanitize-blacklist options.
- for (const auto *Arg : Args) {
- if (Arg->getOption().matches(options::OPT_fsanitize_blacklist)) {
- Arg->claim();
- std::string BLPath = Arg->getValue();
- if (D.getVFS().exists(BLPath)) {
- UserBlacklistFiles.push_back(BLPath);
- } else {
- D.Diag(clang::diag::err_drv_no_such_file) << BLPath;
- }
- } else if (Arg->getOption().matches(options::OPT_fno_sanitize_blacklist)) {
- Arg->claim();
- UserBlacklistFiles.clear();
- SystemBlacklistFiles.clear();
- }
- }
- // Validate blacklists format.
- {
- std::string BLError;
- std::unique_ptr<llvm::SpecialCaseList> SCL(
- llvm::SpecialCaseList::create(UserBlacklistFiles, D.getVFS(), BLError));
- if (!SCL.get())
- D.Diag(clang::diag::err_drv_malformed_sanitizer_blacklist) << BLError;
- }
- {
- std::string BLError;
- std::unique_ptr<llvm::SpecialCaseList> SCL(llvm::SpecialCaseList::create(
- SystemBlacklistFiles, D.getVFS(), BLError));
- if (!SCL.get())
- D.Diag(clang::diag::err_drv_malformed_sanitizer_blacklist) << BLError;
- }
+ // Add default blacklist from resource directory for activated sanitizers, and
+ // validate special case lists format.
+ if (!Args.hasArgNoClaim(options::OPT_fno_sanitize_blacklist))
+ addDefaultBlacklists(D, Kinds, SystemBlacklistFiles);
+
+ // Parse -f(no-)?sanitize-blacklist options.
+ // This also validates special case lists format.
+ parseSpecialCaseListArg(D, Args, UserBlacklistFiles,
+ options::OPT_fsanitize_blacklist,
+ options::OPT_fno_sanitize_blacklist,
+ clang::diag::err_drv_malformed_sanitizer_blacklist);
// Parse -f[no-]sanitize-memory-track-origins[=level] options.
if (AllAddedKinds & SanitizerKind::Memory) {
@@ -721,8 +720,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< "-fsanitize-coverage=trace-pc-guard";
int InsertionPointTypes = CoverageFunc | CoverageBB | CoverageEdge;
- int InstrumentationTypes =
- CoverageTracePC | CoverageTracePCGuard | CoverageInline8bitCounters;
+ int InstrumentationTypes = CoverageTracePC | CoverageTracePCGuard |
+ CoverageInline8bitCounters |
+ CoverageInlineBoolFlag;
if ((CoverageFeatures & InsertionPointTypes) &&
!(CoverageFeatures & InstrumentationTypes)) {
D.Diag(clang::diag::warn_drv_deprecated_arg)
@@ -733,13 +733,29 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// trace-pc w/o func/bb/edge implies edge.
if (!(CoverageFeatures & InsertionPointTypes)) {
if (CoverageFeatures &
- (CoverageTracePC | CoverageTracePCGuard | CoverageInline8bitCounters))
+ (CoverageTracePC | CoverageTracePCGuard | CoverageInline8bitCounters |
+ CoverageInlineBoolFlag))
CoverageFeatures |= CoverageEdge;
if (CoverageFeatures & CoverageStackDepth)
CoverageFeatures |= CoverageFunc;
}
+ // Parse -fsanitize-coverage-(black|white)list options if coverage enabled.
+ // This also validates special case lists format.
+ // Here, OptSpecifier() acts as a never-matching command-line argument.
+ // So, there is no way to clear coverage lists but you can append to them.
+ if (CoverageFeatures) {
+ parseSpecialCaseListArg(
+ D, Args, CoverageAllowlistFiles,
+ options::OPT_fsanitize_coverage_allowlist, OptSpecifier(),
+ clang::diag::err_drv_malformed_sanitizer_coverage_whitelist);
+ parseSpecialCaseListArg(
+ D, Args, CoverageBlocklistFiles,
+ options::OPT_fsanitize_coverage_blocklist, OptSpecifier(),
+ clang::diag::err_drv_malformed_sanitizer_coverage_blacklist);
+ }
+
SharedRuntime =
Args.hasFlag(options::OPT_shared_libsan, options::OPT_static_libsan,
TC.getTriple().isAndroid() || TC.getTriple().isOSFuchsia() ||
@@ -831,8 +847,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
if (AllAddedKinds & SanitizerKind::SafeStack) {
- // SafeStack runtime is built into the system on Fuchsia.
- SafeStackRuntime = !TC.getTriple().isOSFuchsia();
+ // SafeStack runtime is built into the system on Android and Fuchsia.
+ SafeStackRuntime =
+ !TC.getTriple().isAndroid() && !TC.getTriple().isOSFuchsia();
}
LinkRuntimes =
@@ -865,6 +882,17 @@ static std::string toString(const clang::SanitizerSet &Sanitizers) {
return Res;
}
+static void addSpecialCaseListOpt(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const char *SCLOptFlag,
+ const std::vector<std::string> &SCLFiles) {
+ for (const auto &SCLPath : SCLFiles) {
+ SmallString<64> SCLOpt(SCLOptFlag);
+ SCLOpt += SCLPath;
+ CmdArgs.push_back(Args.MakeArgString(SCLOpt));
+ }
+}
+
static void addIncludeLinkerOption(const ToolChain &TC,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
@@ -904,45 +932,55 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
// Do it even if Sanitizers.empty() since some forms of coverage don't require
// sanitizers.
std::pair<int, const char *> CoverageFlags[] = {
- std::make_pair(CoverageFunc, "-fsanitize-coverage-type=1"),
- std::make_pair(CoverageBB, "-fsanitize-coverage-type=2"),
- std::make_pair(CoverageEdge, "-fsanitize-coverage-type=3"),
- std::make_pair(CoverageIndirCall, "-fsanitize-coverage-indirect-calls"),
- std::make_pair(CoverageTraceBB, "-fsanitize-coverage-trace-bb"),
- std::make_pair(CoverageTraceCmp, "-fsanitize-coverage-trace-cmp"),
- std::make_pair(CoverageTraceDiv, "-fsanitize-coverage-trace-div"),
- std::make_pair(CoverageTraceGep, "-fsanitize-coverage-trace-gep"),
- std::make_pair(Coverage8bitCounters, "-fsanitize-coverage-8bit-counters"),
- std::make_pair(CoverageTracePC, "-fsanitize-coverage-trace-pc"),
- std::make_pair(CoverageTracePCGuard, "-fsanitize-coverage-trace-pc-guard"),
- std::make_pair(CoverageInline8bitCounters, "-fsanitize-coverage-inline-8bit-counters"),
- std::make_pair(CoveragePCTable, "-fsanitize-coverage-pc-table"),
- std::make_pair(CoverageNoPrune, "-fsanitize-coverage-no-prune"),
- std::make_pair(CoverageStackDepth, "-fsanitize-coverage-stack-depth")};
+ std::make_pair(CoverageFunc, "-fsanitize-coverage-type=1"),
+ std::make_pair(CoverageBB, "-fsanitize-coverage-type=2"),
+ std::make_pair(CoverageEdge, "-fsanitize-coverage-type=3"),
+ std::make_pair(CoverageIndirCall, "-fsanitize-coverage-indirect-calls"),
+ std::make_pair(CoverageTraceBB, "-fsanitize-coverage-trace-bb"),
+ std::make_pair(CoverageTraceCmp, "-fsanitize-coverage-trace-cmp"),
+ std::make_pair(CoverageTraceDiv, "-fsanitize-coverage-trace-div"),
+ std::make_pair(CoverageTraceGep, "-fsanitize-coverage-trace-gep"),
+ std::make_pair(Coverage8bitCounters, "-fsanitize-coverage-8bit-counters"),
+ std::make_pair(CoverageTracePC, "-fsanitize-coverage-trace-pc"),
+ std::make_pair(CoverageTracePCGuard,
+ "-fsanitize-coverage-trace-pc-guard"),
+ std::make_pair(CoverageInline8bitCounters,
+ "-fsanitize-coverage-inline-8bit-counters"),
+ std::make_pair(CoverageInlineBoolFlag,
+ "-fsanitize-coverage-inline-bool-flag"),
+ std::make_pair(CoveragePCTable, "-fsanitize-coverage-pc-table"),
+ std::make_pair(CoverageNoPrune, "-fsanitize-coverage-no-prune"),
+ std::make_pair(CoverageStackDepth, "-fsanitize-coverage-stack-depth")};
for (auto F : CoverageFlags) {
if (CoverageFeatures & F.first)
CmdArgs.push_back(F.second);
}
+ addSpecialCaseListOpt(
+ Args, CmdArgs, "-fsanitize-coverage-allowlist=", CoverageAllowlistFiles);
+ addSpecialCaseListOpt(
+ Args, CmdArgs, "-fsanitize-coverage-blocklist=", CoverageBlocklistFiles);
if (TC.getTriple().isOSWindows() && needsUbsanRt()) {
// Instruct the code generator to embed linker directives in the object file
// that cause the required runtime libraries to be linked.
- CmdArgs.push_back(Args.MakeArgString(
- "--dependent-lib=" + TC.getCompilerRT(Args, "ubsan_standalone")));
+ CmdArgs.push_back(
+ Args.MakeArgString("--dependent-lib=" +
+ TC.getCompilerRTBasename(Args, "ubsan_standalone")));
if (types::isCXX(InputType))
CmdArgs.push_back(Args.MakeArgString(
- "--dependent-lib=" + TC.getCompilerRT(Args, "ubsan_standalone_cxx")));
+ "--dependent-lib=" +
+ TC.getCompilerRTBasename(Args, "ubsan_standalone_cxx")));
}
if (TC.getTriple().isOSWindows() && needsStatsRt()) {
- CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
- TC.getCompilerRT(Args, "stats_client")));
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRTBasename(Args, "stats_client")));
// The main executable must export the stats runtime.
// FIXME: Only exporting from the main executable (e.g. based on whether the
// translation unit defines main()) would save a little space, but having
// multiple copies of the runtime shouldn't hurt.
- CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
- TC.getCompilerRT(Args, "stats")));
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRTBasename(Args, "stats")));
addIncludeLinkerOption(TC, Args, CmdArgs, "__sanitizer_stats_register");
}
@@ -958,16 +996,10 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString("-fsanitize-trap=" + toString(TrapSanitizers)));
- for (const auto &BLPath : UserBlacklistFiles) {
- SmallString<64> BlacklistOpt("-fsanitize-blacklist=");
- BlacklistOpt += BLPath;
- CmdArgs.push_back(Args.MakeArgString(BlacklistOpt));
- }
- for (const auto &BLPath : SystemBlacklistFiles) {
- SmallString<64> BlacklistOpt("-fsanitize-system-blacklist=");
- BlacklistOpt += BLPath;
- CmdArgs.push_back(Args.MakeArgString(BlacklistOpt));
- }
+ addSpecialCaseListOpt(Args, CmdArgs,
+ "-fsanitize-blacklist=", UserBlacklistFiles);
+ addSpecialCaseListOpt(Args, CmdArgs,
+ "-fsanitize-system-blacklist=", SystemBlacklistFiles);
if (MsanTrackOrigins)
CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-track-origins=" +
@@ -1038,7 +1070,7 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("hwasan-abi=" + HwasanAbi));
}
- if (Sanitizers.has(SanitizerKind::HWAddress)) {
+ if (Sanitizers.has(SanitizerKind::HWAddress) && TC.getTriple().isAArch64()) {
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("+tagged-globals");
}
@@ -1102,22 +1134,23 @@ int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A) {
for (int i = 0, n = A->getNumValues(); i != n; ++i) {
const char *Value = A->getValue(i);
int F = llvm::StringSwitch<int>(Value)
- .Case("func", CoverageFunc)
- .Case("bb", CoverageBB)
- .Case("edge", CoverageEdge)
- .Case("indirect-calls", CoverageIndirCall)
- .Case("trace-bb", CoverageTraceBB)
- .Case("trace-cmp", CoverageTraceCmp)
- .Case("trace-div", CoverageTraceDiv)
- .Case("trace-gep", CoverageTraceGep)
- .Case("8bit-counters", Coverage8bitCounters)
- .Case("trace-pc", CoverageTracePC)
- .Case("trace-pc-guard", CoverageTracePCGuard)
- .Case("no-prune", CoverageNoPrune)
- .Case("inline-8bit-counters", CoverageInline8bitCounters)
- .Case("pc-table", CoveragePCTable)
- .Case("stack-depth", CoverageStackDepth)
- .Default(0);
+ .Case("func", CoverageFunc)
+ .Case("bb", CoverageBB)
+ .Case("edge", CoverageEdge)
+ .Case("indirect-calls", CoverageIndirCall)
+ .Case("trace-bb", CoverageTraceBB)
+ .Case("trace-cmp", CoverageTraceCmp)
+ .Case("trace-div", CoverageTraceDiv)
+ .Case("trace-gep", CoverageTraceGep)
+ .Case("8bit-counters", Coverage8bitCounters)
+ .Case("trace-pc", CoverageTracePC)
+ .Case("trace-pc-guard", CoverageTracePCGuard)
+ .Case("no-prune", CoverageNoPrune)
+ .Case("inline-8bit-counters", CoverageInline8bitCounters)
+ .Case("inline-bool-flag", CoverageInlineBoolFlag)
+ .Case("pc-table", CoveragePCTable)
+ .Case("stack-depth", CoverageStackDepth)
+ .Default(0);
if (F == 0)
D.Diag(clang::diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
diff --git a/clang/lib/Driver/Tool.cpp b/clang/lib/Driver/Tool.cpp
index 9ff6e863a124..449f69cfcb35 100644
--- a/clang/lib/Driver/Tool.cpp
+++ b/clang/lib/Driver/Tool.cpp
@@ -11,13 +11,8 @@
using namespace clang::driver;
-Tool::Tool(const char *_Name, const char *_ShortName, const ToolChain &TC,
- ResponseFileSupport _ResponseSupport,
- llvm::sys::WindowsEncodingMethod _ResponseEncoding,
- const char *_ResponseFlag)
- : Name(_Name), ShortName(_ShortName), TheToolChain(TC),
- ResponseSupport(_ResponseSupport), ResponseEncoding(_ResponseEncoding),
- ResponseFlag(_ResponseFlag) {}
+Tool::Tool(const char *_Name, const char *_ShortName, const ToolChain &TC)
+ : Name(_Name), ShortName(_ShortName), TheToolChain(TC) {}
Tool::~Tool() {
}
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 3ebbd30195b3..b8c12fc9241a 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -68,8 +68,7 @@ static ToolChain::RTTIMode CalculateRTTIMode(const ArgList &Args,
}
// -frtti is default, except for the PS4 CPU.
- return (Triple.isPS4CPU() || Triple.isNVPTX()) ? ToolChain::RM_Disabled
- : ToolChain::RM_Enabled;
+ return (Triple.isPS4CPU()) ? ToolChain::RM_Disabled : ToolChain::RM_Enabled;
}
ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
@@ -169,7 +168,7 @@ static const DriverSuffix *FindDriverSuffix(StringRef ProgName, size_t &Pos) {
/// Normalize the program name from argv[0] by stripping the file extension if
/// present and lower-casing the string on Windows.
static std::string normalizeProgramName(llvm::StringRef Argv0) {
- std::string ProgName = llvm::sys::path::stem(Argv0);
+ std::string ProgName = std::string(llvm::sys::path::stem(Argv0));
#ifdef _WIN32
// Transform to lowercase for case insensitive file systems.
std::transform(ProgName.begin(), ProgName.end(), ProgName.begin(), ::tolower);
@@ -222,16 +221,21 @@ ToolChain::getTargetAndModeFromProgramName(StringRef PN) {
StringRef Prefix(ProgName);
Prefix = Prefix.slice(0, LastComponent);
std::string IgnoredError;
- bool IsRegistered = llvm::TargetRegistry::lookupTarget(Prefix, IgnoredError);
- return ParsedClangName{Prefix, ModeSuffix, DS->ModeFlag, IsRegistered};
+ bool IsRegistered =
+ llvm::TargetRegistry::lookupTarget(std::string(Prefix), IgnoredError);
+ return ParsedClangName{std::string(Prefix), ModeSuffix, DS->ModeFlag,
+ IsRegistered};
}
StringRef ToolChain::getDefaultUniversalArchName() const {
// In universal driver terms, the arch name accepted by -arch isn't exactly
// the same as the ones that appear in the triple. Roughly speaking, this is
- // an inverse of the darwin::getArchTypeForDarwinArchName() function, but the
- // only interesting special case is powerpc.
+ // an inverse of the darwin::getArchTypeForDarwinArchName() function.
switch (Triple.getArch()) {
+ case llvm::Triple::aarch64:
+ return "arm64";
+ case llvm::Triple::aarch64_32:
+ return "arm64_32";
case llvm::Triple::ppc:
return "ppc";
case llvm::Triple::ppc64:
@@ -271,6 +275,10 @@ Tool *ToolChain::buildLinker() const {
llvm_unreachable("Linking is not supported by this toolchain");
}
+Tool *ToolChain::buildStaticLibTool() const {
+ llvm_unreachable("Creating static lib is not supported by this toolchain");
+}
+
Tool *ToolChain::getAssemble() const {
if (!Assemble)
Assemble.reset(buildAssembler());
@@ -289,6 +297,12 @@ Tool *ToolChain::getLink() const {
return Link.get();
}
+Tool *ToolChain::getStaticLibTool() const {
+ if (!StaticLibTool)
+ StaticLibTool.reset(buildStaticLibTool());
+ return StaticLibTool.get();
+}
+
Tool *ToolChain::getIfsMerge() const {
if (!IfsMerge)
IfsMerge.reset(new tools::ifstool::Merger(*this));
@@ -318,6 +332,9 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::LinkJobClass:
return getLink();
+ case Action::StaticLibJobClass:
+ return getStaticLibTool();
+
case Action::InputClass:
case Action::BindArchClass:
case Action::OffloadClass:
@@ -386,11 +403,12 @@ std::string ToolChain::getCompilerRTPath() const {
} else {
llvm::sys::path::append(Path, "lib", getOSLibName());
}
- return Path.str();
+ return std::string(Path.str());
}
-std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
- FileType Type) const {
+std::string ToolChain::getCompilerRTBasename(const ArgList &Args,
+ StringRef Component, FileType Type,
+ bool AddArch) const {
const llvm::Triple &TT = getTriple();
bool IsITANMSVCWindows =
TT.isWindowsMSVCEnvironment() || TT.isWindowsItaniumEnvironment();
@@ -412,19 +430,33 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
break;
}
+ std::string ArchAndEnv;
+ if (AddArch) {
+ StringRef Arch = getArchNameForCompilerRTLib(*this, Args);
+ const char *Env = TT.isAndroid() ? "-android" : "";
+ ArchAndEnv = ("-" + Arch + Env).str();
+ }
+ return (Prefix + Twine("clang_rt.") + Component + ArchAndEnv + Suffix).str();
+}
+
+std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
+ FileType Type) const {
+ // Check for runtime files in the new layout without the architecture first.
+ std::string CRTBasename =
+ getCompilerRTBasename(Args, Component, Type, /*AddArch=*/false);
for (const auto &LibPath : getLibraryPaths()) {
SmallString<128> P(LibPath);
- llvm::sys::path::append(P, Prefix + Twine("clang_rt.") + Component + Suffix);
+ llvm::sys::path::append(P, CRTBasename);
if (getVFS().exists(P))
- return P.str();
+ return std::string(P.str());
}
- StringRef Arch = getArchNameForCompilerRTLib(*this, Args);
- const char *Env = TT.isAndroid() ? "-android" : "";
+ // Fall back to the old expected compiler-rt name if the new one does not
+ // exist.
+ CRTBasename = getCompilerRTBasename(Args, Component, Type, /*AddArch=*/true);
SmallString<128> Path(getCompilerRTPath());
- llvm::sys::path::append(Path, Prefix + Twine("clang_rt.") + Component + "-" +
- Arch + Env + Suffix);
- return Path.str();
+ llvm::sys::path::append(Path, CRTBasename);
+ return std::string(Path.str());
}
const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
@@ -441,13 +473,13 @@ Optional<std::string> ToolChain::getRuntimePath() const {
P.assign(D.ResourceDir);
llvm::sys::path::append(P, "lib", D.getTargetTriple());
if (getVFS().exists(P))
- return llvm::Optional<std::string>(P.str());
+ return llvm::Optional<std::string>(std::string(P.str()));
// Second try the normalized triple.
P.assign(D.ResourceDir);
llvm::sys::path::append(P, "lib", Triple.str());
if (getVFS().exists(P))
- return llvm::Optional<std::string>(P.str());
+ return llvm::Optional<std::string>(std::string(P.str()));
return None;
}
@@ -459,13 +491,13 @@ Optional<std::string> ToolChain::getCXXStdlibPath() const {
P.assign(D.Dir);
llvm::sys::path::append(P, "..", "lib", D.getTargetTriple(), "c++");
if (getVFS().exists(P))
- return llvm::Optional<std::string>(P.str());
+ return llvm::Optional<std::string>(std::string(P.str()));
// Second try the normalized triple.
P.assign(D.Dir);
llvm::sys::path::append(P, "..", "lib", Triple.str(), "c++");
if (getVFS().exists(P))
- return llvm::Optional<std::string>(P.str());
+ return llvm::Optional<std::string>(std::string(P.str()));
return None;
}
@@ -474,31 +506,27 @@ std::string ToolChain::getArchSpecificLibPath() const {
SmallString<128> Path(getDriver().ResourceDir);
llvm::sys::path::append(Path, "lib", getOSLibName(),
llvm::Triple::getArchTypeName(getArch()));
- return Path.str();
+ return std::string(Path.str());
}
bool ToolChain::needsProfileRT(const ArgList &Args) {
if (Args.hasArg(options::OPT_noprofilelib))
return false;
- if (needsGCovInstrumentation(Args) ||
- Args.hasArg(options::OPT_fprofile_generate) ||
- Args.hasArg(options::OPT_fprofile_generate_EQ) ||
- Args.hasArg(options::OPT_fcs_profile_generate) ||
- Args.hasArg(options::OPT_fcs_profile_generate_EQ) ||
- Args.hasArg(options::OPT_fprofile_instr_generate) ||
- Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
- Args.hasArg(options::OPT_fcreate_profile) ||
- Args.hasArg(options::OPT_forder_file_instrumentation))
- return true;
-
- return false;
+ return Args.hasArg(options::OPT_fprofile_generate) ||
+ Args.hasArg(options::OPT_fprofile_generate_EQ) ||
+ Args.hasArg(options::OPT_fcs_profile_generate) ||
+ Args.hasArg(options::OPT_fcs_profile_generate_EQ) ||
+ Args.hasArg(options::OPT_fprofile_instr_generate) ||
+ Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_forder_file_instrumentation);
}
bool ToolChain::needsGCovInstrumentation(const llvm::opt::ArgList &Args) {
- return Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
- false) ||
- Args.hasArg(options::OPT_coverage);
+ return Args.hasArg(options::OPT_coverage) ||
+ Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false);
}
Tool *ToolChain::SelectTool(const JobAction &JA) const {
@@ -526,7 +554,7 @@ std::string ToolChain::GetLinkerPath() const {
// If we're passed what looks like an absolute path, don't attempt to
// second-guess that.
if (llvm::sys::fs::can_execute(UseLinker))
- return UseLinker;
+ return std::string(UseLinker);
} else if (UseLinker.empty() || UseLinker == "ld") {
// If we're passed -fuse-ld= with no argument, or with the argument ld,
// then use whatever the default system linker is.
@@ -550,6 +578,11 @@ std::string ToolChain::GetLinkerPath() const {
return GetProgramPath(getDefaultLinker());
}
+std::string ToolChain::GetStaticLibToolPath() const {
+ // TODO: Add support for static lib archiving on Windows
+ return GetProgramPath("llvm-ar");
+}
+
types::ID ToolChain::LookupTypeForExtension(StringRef Ext) const {
types::ID id = types::lookupTypeForExtension(Ext);
@@ -735,6 +768,10 @@ std::string ToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
return ComputeLLVMTriple(Args, InputType);
}
+std::string ToolChain::computeSysRoot() const {
+ return D.SysRoot;
+}
+
void ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Each toolchain should provide the appropriate include flags.
@@ -748,7 +785,8 @@ void ToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {}
void ToolChain::addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
- if (!needsProfileRT(Args)) return;
+ if (!needsProfileRT(Args) && !needsGCovInstrumentation(Args))
+ return;
CmdArgs.push_back(getCompilerRTArgString(Args, "profile"));
}
@@ -916,28 +954,35 @@ void ToolChain::AddCCKextLibArgs(const ArgList &Args,
CmdArgs.push_back("-lcc_kext");
}
-bool ToolChain::AddFastMathRuntimeIfAvailable(const ArgList &Args,
- ArgStringList &CmdArgs) const {
+bool ToolChain::isFastMathRuntimeAvailable(const ArgList &Args,
+ std::string &Path) const {
// Do not check for -fno-fast-math or -fno-unsafe-math when -Ofast passed
// (to keep the linker options consistent with gcc and clang itself).
if (!isOptimizationLevelFast(Args)) {
// Check if -ffast-math or -funsafe-math.
Arg *A =
- Args.getLastArg(options::OPT_ffast_math, options::OPT_fno_fast_math,
- options::OPT_funsafe_math_optimizations,
- options::OPT_fno_unsafe_math_optimizations);
+ Args.getLastArg(options::OPT_ffast_math, options::OPT_fno_fast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations);
if (!A || A->getOption().getID() == options::OPT_fno_fast_math ||
A->getOption().getID() == options::OPT_fno_unsafe_math_optimizations)
return false;
}
// If crtfastmath.o exists add it to the arguments.
- std::string Path = GetFilePath("crtfastmath.o");
- if (Path == "crtfastmath.o") // Not found.
- return false;
+ Path = GetFilePath("crtfastmath.o");
+ return (Path != "crtfastmath.o"); // Not found.
+}
- CmdArgs.push_back(Args.MakeArgString(Path));
- return true;
+bool ToolChain::addFastMathRuntimeIfAvailable(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ std::string Path;
+ if (isFastMathRuntimeAvailable(Args, Path)) {
+ CmdArgs.push_back(Args.MakeArgString(Path));
+ return true;
+ }
+
+ return false;
}
SanitizerMask ToolChain::getSupportedSanitizers() const {
@@ -955,15 +1000,12 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
if (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64 ||
getTriple().getArch() == llvm::Triple::arm ||
- getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::wasm32 ||
- getTriple().getArch() == llvm::Triple::wasm64)
+ getTriple().getArch() == llvm::Triple::wasm64 || getTriple().isAArch64())
Res |= SanitizerKind::CFIICall;
- if (getTriple().getArch() == llvm::Triple::x86_64 ||
- getTriple().getArch() == llvm::Triple::aarch64)
+ if (getTriple().getArch() == llvm::Triple::x86_64 || getTriple().isAArch64())
Res |= SanitizerKind::ShadowCallStack;
- if (getTriple().getArch() == llvm::Triple::aarch64 ||
- getTriple().getArch() == llvm::Triple::aarch64_be)
+ if (getTriple().isAArch64())
Res |= SanitizerKind::MemTag;
return Res;
}
@@ -971,6 +1013,9 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
void ToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {}
+void ToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {}
+
void ToolChain::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {}
@@ -1094,3 +1139,86 @@ llvm::opt::DerivedArgList *ToolChain::TranslateOpenMPTargetArgs(
delete DAL;
return nullptr;
}
+
+// TODO: Currently argument values separated by space e.g.
+// -Xclang -mframe-pointer=no cannot be passed by -Xarch_. This should be
+// fixed.
+void ToolChain::TranslateXarchArgs(
+ const llvm::opt::DerivedArgList &Args, llvm::opt::Arg *&A,
+ llvm::opt::DerivedArgList *DAL,
+ SmallVectorImpl<llvm::opt::Arg *> *AllocatedArgs) const {
+ const OptTable &Opts = getDriver().getOpts();
+ unsigned ValuePos = 1;
+ if (A->getOption().matches(options::OPT_Xarch_device) ||
+ A->getOption().matches(options::OPT_Xarch_host))
+ ValuePos = 0;
+
+ unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(ValuePos));
+ unsigned Prev = Index;
+ std::unique_ptr<llvm::opt::Arg> XarchArg(Opts.ParseOneArg(Args, Index));
+
+ // If the argument parsing failed or more than one argument was
+ // consumed, the -Xarch_ argument's parameter tried to consume
+ // extra arguments. Emit an error and ignore.
+ //
+ // We also want to disallow any options which would alter the
+ // driver behavior; that isn't going to work in our model. We
+ // use isDriverOption() as an approximation, although things
+ // like -O4 are going to slip through.
+ if (!XarchArg || Index > Prev + 1) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
+ << A->getAsString(Args);
+ return;
+ } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
+ << A->getAsString(Args);
+ return;
+ }
+ XarchArg->setBaseArg(A);
+ A = XarchArg.release();
+ if (!AllocatedArgs)
+ DAL->AddSynthesizedArg(A);
+ else
+ AllocatedArgs->push_back(A);
+}
+
+llvm::opt::DerivedArgList *ToolChain::TranslateXarchArgs(
+ const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind OFK,
+ SmallVectorImpl<llvm::opt::Arg *> *AllocatedArgs) const {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+ bool Modified = false;
+
+ bool IsGPU = OFK == Action::OFK_Cuda || OFK == Action::OFK_HIP;
+ for (Arg *A : Args) {
+ bool NeedTrans = false;
+ bool Skip = false;
+ if (A->getOption().matches(options::OPT_Xarch_device)) {
+ NeedTrans = IsGPU;
+ Skip = !IsGPU;
+ } else if (A->getOption().matches(options::OPT_Xarch_host)) {
+ NeedTrans = !IsGPU;
+ Skip = IsGPU;
+ } else if (A->getOption().matches(options::OPT_Xarch__) && IsGPU) {
+ // Do not translate -Xarch_ options for non CUDA/HIP toolchain since
+ // they may need special translation.
+ // Skip this argument unless the architecture matches BoundArch
+ if (BoundArch.empty() || A->getValue(0) != BoundArch)
+ Skip = true;
+ else
+ NeedTrans = true;
+ }
+ if (NeedTrans || Skip)
+ Modified = true;
+ if (NeedTrans)
+ TranslateXarchArgs(Args, A, DAL, AllocatedArgs);
+ if (!Skip)
+ DAL->append(A);
+ }
+
+ if (Modified)
+ return DAL;
+
+ delete DAL;
+ return nullptr;
+}
diff --git a/clang/lib/Driver/ToolChains/AIX.cpp b/clang/lib/Driver/ToolChains/AIX.cpp
index 6fbff61f7656..ac5544eedb00 100644
--- a/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/clang/lib/Driver/ToolChains/AIX.cpp
@@ -13,12 +13,15 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Path.h"
using AIX = clang::driver::toolchains::AIX;
using namespace clang::driver;
using namespace clang::driver::tools;
+using namespace clang::driver::toolchains;
using namespace llvm::opt;
+using namespace llvm::sys;
void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -73,7 +76,8 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -81,6 +85,7 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs, const ArgList &Args,
const char *LinkingOutput) const {
const AIX &ToolChain = static_cast<const AIX &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
ArgStringList CmdArgs;
const bool IsArch32Bit = ToolChain.getTriple().isArch32Bit();
@@ -129,6 +134,12 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(ToolChain.GetFilePath(getCrt0Basename())));
}
+ // Collect all static constructor and destructor functions in CXX mode. This
+ // has to come before AddLinkerInputs as the implied option needs to precede
+ // any other '-bcdtors' settings or '-bnocdtors' that '-Wl' might forward.
+ if (D.CCCIsCXX())
+ CmdArgs.push_back("-bcdtors:all:0:s");
+
// Specify linker input file(s).
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
@@ -145,7 +156,8 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
/// AIX - AIX tool chain which can call as(1) and ld(1) directly.
@@ -154,6 +166,43 @@ AIX::AIX(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
}
+// Returns the effective header sysroot path to use.
+// This comes from either -isysroot or --sysroot.
+llvm::StringRef
+AIX::GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const {
+ if (DriverArgs.hasArg(options::OPT_isysroot))
+ return DriverArgs.getLastArgValue(options::OPT_isysroot);
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+ return "/";
+}
+
+void AIX::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // Return if -nostdinc is specified as a driver option.
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ llvm::StringRef Sysroot = GetHeaderSysroot(DriverArgs);
+ const Driver &D = getDriver();
+
+ // Add the Clang builtin headers (<resource>/include).
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ path::append(P, "/include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ }
+
+ // Return if -nostdlibinc is specified as a driver option.
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Add <sysroot>/usr/include.
+ SmallString<128> UP(Sysroot);
+ path::append(UP, "/usr/include");
+ addSystemInclude(DriverArgs, CC1Args, UP.str());
+}
+
auto AIX::buildAssembler() const -> Tool * { return new aix::Assembler(*this); }
auto AIX::buildLinker() const -> Tool * { return new aix::Linker(*this); }
diff --git a/clang/lib/Driver/ToolChains/AIX.h b/clang/lib/Driver/ToolChains/AIX.h
index 69b948bc0ea8..942bb3cceb8a 100644
--- a/clang/lib/Driver/ToolChains/AIX.h
+++ b/clang/lib/Driver/ToolChains/AIX.h
@@ -63,9 +63,16 @@ public:
bool isPIEDefault() const override { return false; }
bool isPICDefaultForced() const override { return true; }
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
+
+private:
+ llvm::StringRef GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const;
};
} // end namespace toolchains
diff --git a/clang/lib/Driver/ToolChains/AMDGPU.cpp b/clang/lib/Driver/ToolChains/AMDGPU.cpp
index 71a2c68b4197..bc6d1fcd4a00 100644
--- a/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -12,6 +12,8 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -19,6 +21,327 @@ using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
+void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) {
+ assert(!Path.empty());
+
+ const StringRef Suffix(".bc");
+ const StringRef Suffix2(".amdgcn.bc");
+
+ std::error_code EC;
+ for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(Path, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef FilePath = LI->path();
+ StringRef FileName = llvm::sys::path::filename(FilePath);
+ if (!FileName.endswith(Suffix))
+ continue;
+
+ StringRef BaseName;
+ if (FileName.endswith(Suffix2))
+ BaseName = FileName.drop_back(Suffix2.size());
+ else if (FileName.endswith(Suffix))
+ BaseName = FileName.drop_back(Suffix.size());
+
+ if (BaseName == "ocml") {
+ OCML = FilePath;
+ } else if (BaseName == "ockl") {
+ OCKL = FilePath;
+ } else if (BaseName == "opencl") {
+ OpenCL = FilePath;
+ } else if (BaseName == "hip") {
+ HIP = FilePath;
+ } else if (BaseName == "oclc_finite_only_off") {
+ FiniteOnly.Off = FilePath;
+ } else if (BaseName == "oclc_finite_only_on") {
+ FiniteOnly.On = FilePath;
+ } else if (BaseName == "oclc_daz_opt_on") {
+ DenormalsAreZero.On = FilePath;
+ } else if (BaseName == "oclc_daz_opt_off") {
+ DenormalsAreZero.Off = FilePath;
+ } else if (BaseName == "oclc_correctly_rounded_sqrt_on") {
+ CorrectlyRoundedSqrt.On = FilePath;
+ } else if (BaseName == "oclc_correctly_rounded_sqrt_off") {
+ CorrectlyRoundedSqrt.Off = FilePath;
+ } else if (BaseName == "oclc_unsafe_math_on") {
+ UnsafeMath.On = FilePath;
+ } else if (BaseName == "oclc_unsafe_math_off") {
+ UnsafeMath.Off = FilePath;
+ } else if (BaseName == "oclc_wavefrontsize64_on") {
+ WavefrontSize64.On = FilePath;
+ } else if (BaseName == "oclc_wavefrontsize64_off") {
+ WavefrontSize64.Off = FilePath;
+ } else {
+ // Process all bitcode filenames that look like
+ // ocl_isa_version_XXX.amdgcn.bc
+ const StringRef DeviceLibPrefix = "oclc_isa_version_";
+ if (!BaseName.startswith(DeviceLibPrefix))
+ continue;
+
+ StringRef IsaVersionNumber =
+ BaseName.drop_front(DeviceLibPrefix.size());
+
+ llvm::Twine GfxName = Twine("gfx") + IsaVersionNumber;
+ SmallString<8> Tmp;
+ LibDeviceMap.insert(
+ std::make_pair(GfxName.toStringRef(Tmp), FilePath.str()));
+ }
+ }
+}
+
+void RocmInstallationDetector::ParseHIPVersionFile(llvm::StringRef V) {
+ SmallVector<StringRef, 4> VersionParts;
+ V.split(VersionParts, '\n');
+ unsigned Major;
+ unsigned Minor;
+ for (auto Part : VersionParts) {
+ auto Splits = Part.split('=');
+ if (Splits.first == "HIP_VERSION_MAJOR")
+ Splits.second.getAsInteger(0, Major);
+ else if (Splits.first == "HIP_VERSION_MINOR")
+ Splits.second.getAsInteger(0, Minor);
+ else if (Splits.first == "HIP_VERSION_PATCH")
+ VersionPatch = Splits.second.str();
+ }
+ VersionMajorMinor = llvm::VersionTuple(Major, Minor);
+ DetectedVersion =
+ (Twine(Major) + "." + Twine(Minor) + "." + VersionPatch).str();
+}
+
+// For candidate specified by --rocm-path we do not do strict check.
+SmallVector<RocmInstallationDetector::Candidate, 4>
+RocmInstallationDetector::getInstallationPathCandidates() {
+ SmallVector<Candidate, 4> Candidates;
+ if (!RocmPathArg.empty()) {
+ Candidates.emplace_back(RocmPathArg.str());
+ return Candidates;
+ }
+
+ // Try to find relative to the compiler binary.
+ const char *InstallDir = D.getInstalledDir();
+
+ // Check both a normal Unix prefix position of the clang binary, as well as
+ // the Windows-esque layout the ROCm packages use with the host architecture
+ // subdirectory of bin.
+
+ // Strip off directory (usually bin)
+ StringRef ParentDir = llvm::sys::path::parent_path(InstallDir);
+ StringRef ParentName = llvm::sys::path::filename(ParentDir);
+
+ // Some builds use bin/{host arch}, so go up again.
+ if (ParentName == "bin") {
+ ParentDir = llvm::sys::path::parent_path(ParentDir);
+ ParentName = llvm::sys::path::filename(ParentDir);
+ }
+
+ // Some versions of the rocm llvm package install to /opt/rocm/llvm/bin
+ if (ParentName == "llvm")
+ ParentDir = llvm::sys::path::parent_path(ParentDir);
+
+ Candidates.emplace_back(ParentDir.str(), /*StrictChecking=*/true);
+
+ // Device library may be installed in clang resource directory.
+ Candidates.emplace_back(D.ResourceDir, /*StrictChecking=*/true);
+
+ Candidates.emplace_back(D.SysRoot + "/opt/rocm", /*StrictChecking=*/true);
+ return Candidates;
+}
+
+RocmInstallationDetector::RocmInstallationDetector(
+ const Driver &D, const llvm::Triple &HostTriple,
+ const llvm::opt::ArgList &Args, bool DetectHIPRuntime, bool DetectDeviceLib)
+ : D(D) {
+ RocmPathArg = Args.getLastArgValue(clang::driver::options::OPT_rocm_path_EQ);
+ RocmDeviceLibPathArg =
+ Args.getAllArgValues(clang::driver::options::OPT_rocm_device_lib_path_EQ);
+ if (auto *A = Args.getLastArg(clang::driver::options::OPT_hip_version_EQ)) {
+ HIPVersionArg = A->getValue();
+ unsigned Major = 0;
+ unsigned Minor = 0;
+ SmallVector<StringRef, 3> Parts;
+ HIPVersionArg.split(Parts, '.');
+ if (Parts.size())
+ Parts[0].getAsInteger(0, Major);
+ if (Parts.size() > 1)
+ Parts[1].getAsInteger(0, Minor);
+ if (Parts.size() > 2)
+ VersionPatch = Parts[2].str();
+ if (VersionPatch.empty())
+ VersionPatch = "0";
+ if (Major == 0 || Minor == 0)
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << HIPVersionArg;
+
+ VersionMajorMinor = llvm::VersionTuple(Major, Minor);
+ DetectedVersion =
+ (Twine(Major) + "." + Twine(Minor) + "." + VersionPatch).str();
+ } else {
+ VersionPatch = DefaultVersionPatch;
+ VersionMajorMinor =
+ llvm::VersionTuple(DefaultVersionMajor, DefaultVersionMinor);
+ DetectedVersion = (Twine(DefaultVersionMajor) + "." +
+ Twine(DefaultVersionMinor) + "." + VersionPatch)
+ .str();
+ }
+
+ if (DetectHIPRuntime)
+ detectHIPRuntime();
+ if (DetectDeviceLib)
+ detectDeviceLibrary();
+}
+
+void RocmInstallationDetector::detectDeviceLibrary() {
+ assert(LibDevicePath.empty());
+
+ if (!RocmDeviceLibPathArg.empty())
+ LibDevicePath = RocmDeviceLibPathArg[RocmDeviceLibPathArg.size() - 1];
+ else if (const char *LibPathEnv = ::getenv("HIP_DEVICE_LIB_PATH"))
+ LibDevicePath = LibPathEnv;
+
+ auto &FS = D.getVFS();
+ if (!LibDevicePath.empty()) {
+ // Maintain compatability with HIP flag/envvar pointing directly at the
+ // bitcode library directory. This points directly at the library path instead
+ // of the rocm root installation.
+ if (!FS.exists(LibDevicePath))
+ return;
+
+ scanLibDevicePath(LibDevicePath);
+ HasDeviceLibrary = allGenericLibsValid() && !LibDeviceMap.empty();
+ return;
+ }
+
+ // The install path situation in old versions of ROCm is a real mess, and
+ // use a different install layout. Multiple copies of the device libraries
+ // exist for each frontend project, and differ depending on which build
+ // system produced the packages. Standalone OpenCL builds also have a
+ // different directory structure from the ROCm OpenCL package.
+ auto Candidates = getInstallationPathCandidates();
+ for (const auto &Candidate : Candidates) {
+ auto CandidatePath = Candidate.Path;
+
+ // Check device library exists at the given path.
+ auto CheckDeviceLib = [&](StringRef Path) {
+ bool CheckLibDevice = (!NoBuiltinLibs || Candidate.StrictChecking);
+ if (CheckLibDevice && !FS.exists(Path))
+ return false;
+
+ scanLibDevicePath(Path);
+
+ if (!NoBuiltinLibs) {
+ // Check that the required non-target libraries are all available.
+ if (!allGenericLibsValid())
+ return false;
+
+ // Check that we have found at least one libdevice that we can link in
+ // if -nobuiltinlib hasn't been specified.
+ if (LibDeviceMap.empty())
+ return false;
+ }
+ return true;
+ };
+
+ // The possible structures are:
+ // - ${ROCM_ROOT}/amdgcn/bitcode/*
+ // - ${ROCM_ROOT}/lib/*
+ // - ${ROCM_ROOT}/lib/bitcode/*
+ // so try to detect these layouts.
+ static llvm::SmallVector<const char *, 2> SubDirsList[] = {
+ {"amdgcn", "bitcode"},
+ {"lib"},
+ {"lib", "bitcode"},
+ };
+
+ // Make a path by appending sub-directories to InstallPath.
+ auto MakePath = [&](const llvm::ArrayRef<const char *> &SubDirs) {
+ auto Path = CandidatePath;
+ for (auto SubDir : SubDirs)
+ llvm::sys::path::append(Path, SubDir);
+ return Path;
+ };
+
+ for (auto SubDirs : SubDirsList) {
+ LibDevicePath = MakePath(SubDirs);
+ HasDeviceLibrary = CheckDeviceLib(LibDevicePath);
+ if (HasDeviceLibrary)
+ return;
+ }
+ }
+}
+
+void RocmInstallationDetector::detectHIPRuntime() {
+ auto Candidates = getInstallationPathCandidates();
+ auto &FS = D.getVFS();
+
+ for (const auto &Candidate : Candidates) {
+ InstallPath = Candidate.Path;
+ if (InstallPath.empty() || !FS.exists(InstallPath))
+ continue;
+
+ BinPath = InstallPath;
+ llvm::sys::path::append(BinPath, "bin");
+ IncludePath = InstallPath;
+ llvm::sys::path::append(IncludePath, "include");
+ LibPath = InstallPath;
+ llvm::sys::path::append(LibPath, "lib");
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
+ FS.getBufferForFile(BinPath + "/.hipVersion");
+ if (!VersionFile && Candidate.StrictChecking)
+ continue;
+
+ if (HIPVersionArg.empty() && VersionFile)
+ ParseHIPVersionFile((*VersionFile)->getBuffer());
+
+ HasHIPRuntime = true;
+ return;
+ }
+ HasHIPRuntime = false;
+}
+
+void RocmInstallationDetector::print(raw_ostream &OS) const {
+ if (hasHIPRuntime())
+ OS << "Found HIP installation: " << InstallPath << ", version "
+ << DetectedVersion << '\n';
+}
+
+void RocmInstallationDetector::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ bool UsesRuntimeWrapper = VersionMajorMinor > llvm::VersionTuple(3, 5);
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ // HIP header includes standard library wrapper headers under clang
+ // cuda_wrappers directory. Since these wrapper headers include_next
+ // standard C++ headers, whereas libc++ headers include_next other clang
+ // headers. The include paths have to follow this order:
+ // - wrapper include path
+ // - standard C++ include path
+ // - other clang include path
+ // Since standard C++ and other clang include paths are added in other
+ // places after this function, here we only need to make sure wrapper
+ // include path is added.
+ //
+ // ROCm 3.5 does not fully support the wrapper headers. Therefore it needs
+ // a workaround.
+ SmallString<128> P(D.ResourceDir);
+ if (UsesRuntimeWrapper)
+ llvm::sys::path::append(P, "include", "cuda_wrappers");
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(P));
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nogpuinc))
+ return;
+
+ if (!hasHIPRuntime()) {
+ D.Diag(diag::err_drv_no_hip_runtime);
+ return;
+ }
+
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(getIncludePath()));
+ if (UsesRuntimeWrapper)
+ CC1Args.append({"-include", "__clang_hip_runtime_wrapper.h"});
+}
+
void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -31,8 +354,9 @@ void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-shared");
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Linker), CmdArgs, Inputs));
}
void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
@@ -102,6 +426,73 @@ AMDGPUToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
return DAL;
}
+bool AMDGPUToolChain::getDefaultDenormsAreZeroForTarget(
+ llvm::AMDGPU::GPUKind Kind) {
+
+ // Assume nothing without a specific target.
+ if (Kind == llvm::AMDGPU::GK_NONE)
+ return false;
+
+ const unsigned ArchAttr = llvm::AMDGPU::getArchAttrAMDGCN(Kind);
+
+ // Default to enabling f32 denormals by default on subtargets where fma is
+ // fast with denormals
+ const bool BothDenormAndFMAFast =
+ (ArchAttr & llvm::AMDGPU::FEATURE_FAST_FMA_F32) &&
+ (ArchAttr & llvm::AMDGPU::FEATURE_FAST_DENORMAL_F32);
+ return !BothDenormAndFMAFast;
+}
+
+llvm::DenormalMode AMDGPUToolChain::getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType) const {
+ // Denormals should always be enabled for f16 and f64.
+ if (!FPType || FPType != &llvm::APFloat::IEEEsingle())
+ return llvm::DenormalMode::getIEEE();
+
+ if (JA.getOffloadingDeviceKind() == Action::OFK_HIP ||
+ JA.getOffloadingDeviceKind() == Action::OFK_Cuda) {
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(JA.getOffloadingArch());
+ if (FPType && FPType == &llvm::APFloat::IEEEsingle() &&
+ DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
+ options::OPT_fno_cuda_flush_denormals_to_zero,
+ getDefaultDenormsAreZeroForTarget(Kind)))
+ return llvm::DenormalMode::getPreserveSign();
+
+ return llvm::DenormalMode::getIEEE();
+ }
+
+ const StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
+
+ // TODO: There are way too many flags that change this. Do we need to check
+ // them all?
+ bool DAZ = DriverArgs.hasArg(options::OPT_cl_denorms_are_zero) ||
+ getDefaultDenormsAreZeroForTarget(Kind);
+
+ // Outputs are flushed to zero (FTZ), preserving sign. Denormal inputs are
+ // also implicit treated as zero (DAZ).
+ return DAZ ? llvm::DenormalMode::getPreserveSign() :
+ llvm::DenormalMode::getIEEE();
+}
+
+bool AMDGPUToolChain::isWave64(const llvm::opt::ArgList &DriverArgs,
+ llvm::AMDGPU::GPUKind Kind) {
+ const unsigned ArchAttr = llvm::AMDGPU::getArchAttrAMDGCN(Kind);
+ static bool HasWave32 = (ArchAttr & llvm::AMDGPU::FEATURE_WAVE32);
+
+ return !HasWave32 || DriverArgs.hasFlag(
+ options::OPT_mwavefrontsize64, options::OPT_mno_wavefrontsize64, false);
+}
+
+
+/// ROCM Toolchain
+ROCMToolChain::ROCMToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : AMDGPUToolChain(D, Triple, Args) {
+ RocmInstallation.detectDeviceLibrary();
+}
+
void AMDGPUToolChain::addClangTargetOptions(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
@@ -115,3 +506,91 @@ void AMDGPUToolChain::addClangTargetOptions(
CC1Args.push_back("-fapply-global-visibility-to-externs");
}
}
+
+void ROCMToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ AMDGPUToolChain::addClangTargetOptions(DriverArgs, CC1Args,
+ DeviceOffloadingKind);
+
+ // For the OpenCL case where there is no offload target, accept -nostdlib to
+ // disable bitcode linking.
+ if (DeviceOffloadingKind == Action::OFK_None &&
+ DriverArgs.hasArg(options::OPT_nostdlib))
+ return;
+
+ if (DriverArgs.hasArg(options::OPT_nogpulib))
+ return;
+
+ if (!RocmInstallation.hasDeviceLibrary()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
+ return;
+ }
+
+ // Get the device name and canonicalize it
+ const StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
+ const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
+ std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
+ if (LibDeviceFile.empty()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GpuArch;
+ return;
+ }
+
+ bool Wave64 = isWave64(DriverArgs, Kind);
+
+ // TODO: There are way too many flags that change this. Do we need to check
+ // them all?
+ bool DAZ = DriverArgs.hasArg(options::OPT_cl_denorms_are_zero) ||
+ getDefaultDenormsAreZeroForTarget(Kind);
+ bool FiniteOnly = DriverArgs.hasArg(options::OPT_cl_finite_math_only);
+
+ bool UnsafeMathOpt =
+ DriverArgs.hasArg(options::OPT_cl_unsafe_math_optimizations);
+ bool FastRelaxedMath = DriverArgs.hasArg(options::OPT_cl_fast_relaxed_math);
+ bool CorrectSqrt =
+ DriverArgs.hasArg(options::OPT_cl_fp32_correctly_rounded_divide_sqrt);
+
+ // Add the OpenCL specific bitcode library.
+ CC1Args.push_back("-mlink-builtin-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(RocmInstallation.getOpenCLPath()));
+
+ // Add the generic set of libraries.
+ RocmInstallation.addCommonBitcodeLibCC1Args(
+ DriverArgs, CC1Args, LibDeviceFile, Wave64, DAZ, FiniteOnly,
+ UnsafeMathOpt, FastRelaxedMath, CorrectSqrt);
+}
+
+void RocmInstallationDetector::addCommonBitcodeLibCC1Args(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ StringRef LibDeviceFile, bool Wave64, bool DAZ, bool FiniteOnly,
+ bool UnsafeMathOpt, bool FastRelaxedMath, bool CorrectSqrt) const {
+ static const char LinkBitcodeFlag[] = "-mlink-builtin-bitcode";
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(getOCMLPath()));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(getOCKLPath()));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(getDenormalsAreZeroPath(DAZ)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(
+ getUnsafeMathPath(UnsafeMathOpt || FastRelaxedMath)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(
+ getFiniteOnlyPath(FiniteOnly || FastRelaxedMath)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(
+ DriverArgs.MakeArgString(getCorrectlyRoundedSqrtPath(CorrectSqrt)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(getWavefrontSize64Path(Wave64)));
+
+ CC1Args.push_back(LinkBitcodeFlag);
+ CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
+}
diff --git a/clang/lib/Driver/ToolChains/AMDGPU.h b/clang/lib/Driver/ToolChains/AMDGPU.h
index f4c78bea5cc9..5d44faf28b05 100644
--- a/clang/lib/Driver/ToolChains/AMDGPU.h
+++ b/clang/lib/Driver/ToolChains/AMDGPU.h
@@ -10,19 +10,24 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_AMDGPU_H
#include "Gnu.h"
+#include "ROCm.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/TargetParser.h"
+
#include <map>
namespace clang {
namespace driver {
+
namespace tools {
namespace amdgpu {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("amdgpu::Linker", "ld.lld", TC) {}
+ Linker(const ToolChain &TC) : Tool("amdgpu::Linker", "ld.lld", TC) {}
bool isLinkJob() const override { return true; }
bool hasIntegratedCPP() const override { return false; }
void ConstructJob(Compilation &C, const JobAction &JA,
@@ -40,11 +45,9 @@ void getAMDGPUTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY AMDGPUToolChain : public Generic_ELF {
-
-private:
+protected:
const std::map<options::ID, const StringRef> OptionsDefault;
-protected:
Tool *buildLinker() const override;
const StringRef getOptionDefault(options::ID OptID) const {
auto opt = OptionsDefault.find(OptID);
@@ -66,6 +69,34 @@ public:
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
+
+ /// Return whether denormals should be flushed, and treated as 0 by default
+ /// for the subtarget.
+ static bool getDefaultDenormsAreZeroForTarget(llvm::AMDGPU::GPUKind GPUKind);
+
+ llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType = nullptr) const override;
+
+ static bool isWave64(const llvm::opt::ArgList &DriverArgs,
+ llvm::AMDGPU::GPUKind Kind);
+ /// Needed for using lto.
+ bool HasNativeLLVMSupport() const override {
+ return true;
+ }
+
+ /// Needed for translating LTO options.
+ const char *getDefaultLinker() const override { return "ld.lld"; }
+};
+
+class LLVM_LIBRARY_VISIBILITY ROCMToolChain : public AMDGPUToolChain {
+public:
+ ROCMToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
};
} // end namespace toolchains
diff --git a/clang/lib/Driver/ToolChains/AVR.cpp b/clang/lib/Driver/ToolChains/AVR.cpp
index e8a3a7b38c31..092bade53c63 100644
--- a/clang/lib/Driver/ToolChains/AVR.cpp
+++ b/clang/lib/Driver/ToolChains/AVR.cpp
@@ -74,13 +74,11 @@ AVRToolChain::AVRToolChain(const Driver &D, const llvm::Triple &Triple,
// No avr-libc found and so no runtime linked.
D.Diag(diag::warn_drv_avr_libc_not_found);
} else { // We have enough information to link stdlibs
- std::string GCCRoot = GCCInstallation.getInstallPath();
+ std::string GCCRoot = std::string(GCCInstallation.getInstallPath());
std::string LibcRoot = AVRLibcRoot.getValue();
getFilePaths().push_back(LibcRoot + std::string("/lib/") +
std::string(*FamilyName));
- getFilePaths().push_back(LibcRoot + std::string("/lib/") +
- std::string(*FamilyName));
getFilePaths().push_back(GCCRoot + std::string("/") +
std::string(*FamilyName));
@@ -144,8 +142,9 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(std::string("-m") + *FamilyName));
}
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Linker), CmdArgs, Inputs));
}
llvm::Optional<std::string> AVRToolChain::findAVRLibcInstallation() const {
diff --git a/clang/lib/Driver/ToolChains/AVR.h b/clang/lib/Driver/ToolChains/AVR.h
index d244fc4f90e9..a3198b249580 100644
--- a/clang/lib/Driver/ToolChains/AVR.h
+++ b/clang/lib/Driver/ToolChains/AVR.h
@@ -40,10 +40,10 @@ private:
namespace tools {
namespace AVR {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
Linker(const llvm::Triple &Triple, const ToolChain &TC, bool LinkStdlib)
- : GnuTool("AVR::Linker", "avr-ld", TC), Triple(Triple),
+ : Tool("AVR::Linker", "avr-ld", TC), Triple(Triple),
LinkStdlib(LinkStdlib) {}
bool hasIntegratedCPP() const override { return false; }
diff --git a/clang/lib/Driver/ToolChains/Ananas.cpp b/clang/lib/Driver/ToolChains/Ananas.cpp
index 2f11c9739a0e..a4141a57accc 100644
--- a/clang/lib/Driver/ToolChains/Ananas.cpp
+++ b/clang/lib/Driver/ToolChains/Ananas.cpp
@@ -39,7 +39,8 @@ void ananas::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -103,7 +104,7 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -123,7 +124,8 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
// Ananas - Ananas tool chain which can call as(1) and ld(1) directly.
diff --git a/clang/lib/Driver/ToolChains/Ananas.h b/clang/lib/Driver/ToolChains/Ananas.h
index 5e45b47fc108..72ad3edcf056 100644
--- a/clang/lib/Driver/ToolChains/Ananas.h
+++ b/clang/lib/Driver/ToolChains/Ananas.h
@@ -19,10 +19,9 @@ namespace tools {
/// ananas -- Directly call GNU Binutils assembler and linker
namespace ananas {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- Assembler(const ToolChain &TC)
- : GnuTool("ananas::Assembler", "assembler", TC) {}
+ Assembler(const ToolChain &TC) : Tool("ananas::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -32,9 +31,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("ananas::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("ananas::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 9c27504dccf5..487c50dfc466 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -39,7 +39,7 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
// Handle CPU name is 'native'.
if (CPU == "native")
- return llvm::sys::getHostCPUName();
+ return std::string(llvm::sys::getHostCPUName());
else if (CPU.size())
return CPU;
@@ -54,7 +54,8 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
// Decode AArch64 features from string like +[no]featureA+[no]featureB+...
static bool DecodeAArch64Features(const Driver &D, StringRef text,
- std::vector<StringRef> &Features) {
+ std::vector<StringRef> &Features,
+ llvm::AArch64::ArchKind ArchKind) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
@@ -66,6 +67,11 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
D.Diag(clang::diag::err_drv_no_neon_modifier);
else
return false;
+
+ // +sve implies +f32mm if the base architecture is v8.6A
+ // it isn't the case in general that sve implies both f64mm and f32mm
+ if ((ArchKind == llvm::AArch64::ArchKind::ARMV8_6A) && Feature == "sve")
+ Features.push_back("+f32mm");
}
return true;
}
@@ -76,6 +82,7 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
std::vector<StringRef> &Features) {
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
CPU = Split.first;
+ llvm::AArch64::ArchKind ArchKind = llvm::AArch64::ArchKind::ARMV8A;
if (CPU == "native")
CPU = llvm::sys::getHostCPUName();
@@ -83,7 +90,7 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
if (CPU == "generic") {
Features.push_back("+neon");
} else {
- llvm::AArch64::ArchKind ArchKind = llvm::AArch64::parseCPUArch(CPU);
+ ArchKind = llvm::AArch64::parseCPUArch(CPU);
if (!llvm::AArch64::getArchFeatures(ArchKind, Features))
return false;
@@ -92,10 +99,11 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
return false;
}
- if (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features))
- return false;
+ if (Split.second.size() &&
+ !DecodeAArch64Features(D, Split.second, Features, ArchKind))
+ return false;
- return true;
+ return true;
}
static bool
@@ -108,7 +116,8 @@ getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
llvm::AArch64::ArchKind ArchKind = llvm::AArch64::parseArch(Split.first);
if (ArchKind == llvm::AArch64::ArchKind::INVALID ||
!llvm::AArch64::getArchFeatures(ArchKind, Features) ||
- (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features)))
+ (Split.second.size() &&
+ !DecodeAArch64Features(D, Split.second, Features, ArchKind)))
return false;
return true;
@@ -139,8 +148,9 @@ getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune,
// Handle CPU name is 'native'.
if (MtuneLowerCase == "native")
- MtuneLowerCase = llvm::sys::getHostCPUName();
- if (MtuneLowerCase == "cyclone" || MtuneLowerCase.find("apple") == 0) {
+ MtuneLowerCase = std::string(llvm::sys::getHostCPUName());
+ if (MtuneLowerCase == "cyclone" ||
+ StringRef(MtuneLowerCase).startswith("apple")) {
Features.push_back("+zcm");
Features.push_back("+zcz");
}
@@ -208,6 +218,39 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
D.Diag(diag::err_drv_invalid_mtp) << A->getAsString(Args);
}
+ // Enable/disable straight line speculation hardening.
+ if (Arg *A = Args.getLastArg(options::OPT_mharden_sls_EQ)) {
+ StringRef Scope = A->getValue();
+ bool EnableRetBr = false;
+ bool EnableBlr = false;
+ if (Scope != "none" && Scope != "all") {
+ SmallVector<StringRef, 4> Opts;
+ Scope.split(Opts, ",");
+ for (auto Opt : Opts) {
+ Opt = Opt.trim();
+ if (Opt == "retbr") {
+ EnableRetBr = true;
+ continue;
+ }
+ if (Opt == "blr") {
+ EnableBlr = true;
+ continue;
+ }
+ D.Diag(diag::err_invalid_sls_hardening)
+ << Scope << A->getAsString(Args);
+ break;
+ }
+ } else if (Scope == "all") {
+ EnableRetBr = true;
+ EnableBlr = true;
+ }
+
+ if (EnableRetBr)
+ Features.push_back("+harden-sls-retbr");
+ if (EnableBlr)
+ Features.push_back("+harden-sls-blr");
+ }
+
// En/disable crc
if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
if (A->getOption().matches(options::OPT_mcrc))
@@ -322,6 +365,10 @@ fp16_fml_fallthrough:
}
}
+ auto V8_6Pos = llvm::find(Features, "+v8.6a");
+ if (V8_6Pos != std::end(Features))
+ V8_6Pos = Features.insert(std::next(V8_6Pos), {"+i8mm", "+bf16"});
+
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
options::OPT_munaligned_access))
if (A->getOption().matches(options::OPT_mno_unaligned_access))
@@ -399,6 +446,9 @@ fp16_fml_fallthrough:
if (Args.hasArg(options::OPT_ffixed_x28))
Features.push_back("+reserve-x28");
+ if (Args.hasArg(options::OPT_ffixed_x30))
+ Features.push_back("+reserve-x30");
+
if (Args.hasArg(options::OPT_fcall_saved_x8))
Features.push_back("+call-saved-x8");
diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index a1923e731489..afe896b4a65b 100644
--- a/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -57,7 +57,7 @@ void arm::getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
static void getARMHWDivFeatures(const Driver &D, const Arg *A,
const ArgList &Args, StringRef HWDiv,
std::vector<StringRef> &Features) {
- unsigned HWDivID = llvm::ARM::parseHWDiv(HWDiv);
+ uint64_t HWDivID = llvm::ARM::parseHWDiv(HWDiv);
if (!llvm::ARM::getHWDivFeatures(HWDivID, Features))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
}
@@ -91,7 +91,7 @@ static void DecodeARMFeaturesFromCPU(const Driver &D, StringRef CPU,
CPU = CPU.split("+").first;
if (CPU != "generic") {
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseCPUArch(CPU);
- unsigned Extension = llvm::ARM::getDefaultExtensions(CPU, ArchKind);
+ uint64_t Extension = llvm::ARM::getDefaultExtensions(CPU, ArchKind);
llvm::ARM::getExtensionFeatures(Extension, Features);
}
}
@@ -137,9 +137,8 @@ bool arm::useAAPCSForMachO(const llvm::Triple &T) {
}
// Select mode for reading thread pointer (-mtp=soft/cp15).
-arm::ReadTPMode arm::getReadTPMode(const ToolChain &TC, const ArgList &Args) {
+arm::ReadTPMode arm::getReadTPMode(const Driver &D, const ArgList &Args) {
if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
- const Driver &D = TC.getDriver();
arm::ReadTPMode ThreadPointer =
llvm::StringSwitch<arm::ReadTPMode>(A->getValue())
.Case("cp15", ReadTPMode::Cp15)
@@ -156,11 +155,14 @@ arm::ReadTPMode arm::getReadTPMode(const ToolChain &TC, const ArgList &Args) {
return ReadTPMode::Soft;
}
+arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
+ return arm::getARMFloatABI(TC.getDriver(), TC.getEffectiveTriple(), Args);
+}
+
// Select the float ABI as determined by -msoft-float, -mhard-float, and
// -mfloat-abi=.
-arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
- const Driver &D = TC.getDriver();
- const llvm::Triple &Triple = TC.getEffectiveTriple();
+arm::FloatABI arm::getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args) {
auto SubArch = getARMSubArchVersionNumber(Triple);
arm::FloatABI ABI = FloatABI::Invalid;
if (Arg *A =
@@ -276,18 +278,20 @@ arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
return ABI;
}
-void arm::getARMTargetFeatures(const ToolChain &TC,
- const llvm::Triple &Triple,
- const ArgList &Args,
- ArgStringList &CmdArgs,
- std::vector<StringRef> &Features,
- bool ForAS) {
- const Driver &D = TC.getDriver();
+static bool hasIntegerMVE(const std::vector<StringRef> &F) {
+ auto MVE = llvm::find(llvm::reverse(F), "+mve");
+ auto NoMVE = llvm::find(llvm::reverse(F), "-mve");
+ return MVE != F.rend() &&
+ (NoMVE == F.rend() || std::distance(MVE, NoMVE) > 0);
+}
+void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ std::vector<StringRef> &Features, bool ForAS) {
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
- arm::FloatABI ABI = arm::getARMFloatABI(TC, Args);
- arm::ReadTPMode ThreadPointer = arm::getReadTPMode(TC, Args);
+ arm::FloatABI ABI = arm::getARMFloatABI(D, Triple, Args);
+ arm::ReadTPMode ThreadPointer = arm::getReadTPMode(D, Args);
const Arg *WaCPU = nullptr, *WaFPU = nullptr;
const Arg *WaHDiv = nullptr, *WaArch = nullptr;
@@ -459,18 +463,13 @@ fp16_fml_fallthrough:
// Disable all features relating to hardware FP, not already disabled by the
// above call.
- Features.insert(Features.end(), {"-neon", "-crypto", "-dotprod", "-fp16fml",
- "-mve", "-mve.fp", "-fpregs"});
+ Features.insert(Features.end(),
+ {"-dotprod", "-fp16fml", "-mve", "-mve.fp", "-fpregs"});
} else if (FPUID == llvm::ARM::FK_NONE) {
// -mfpu=none is *very* similar to -mfloat-abi=soft, only that it should not
// disable MVE-I.
- Features.insert(Features.end(),
- {"-neon", "-crypto", "-dotprod", "-fp16fml", "-mve.fp"});
- // Even though we remove MVE-FP, we still need to check if it was originally
- // present among the requested extensions, because it implies MVE-I, which
- // should not be disabled by -mfpu-none.
- if (!llvm::is_contained(Features, "+mve") &&
- !llvm::is_contained(Features, "+mve.fp"))
+ Features.insert(Features.end(), {"-dotprod", "-fp16fml", "-mve.fp"});
+ if (!hasIntegerMVE(Features))
Features.emplace_back("-fpregs");
}
@@ -612,14 +611,14 @@ fp16_fml_fallthrough:
const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
std::string MArch;
if (!Arch.empty())
- MArch = Arch;
+ MArch = std::string(Arch);
else
- MArch = Triple.getArchName();
+ MArch = std::string(Triple.getArchName());
MArch = StringRef(MArch).split("+").first.lower();
// Handle -march=native.
if (MArch == "native") {
- std::string CPU = llvm::sys::getHostCPUName();
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
if (CPU != "generic") {
// Translate the native cpu into the architecture suffix for that CPU.
StringRef Suffix = arm::getLLVMArchSuffixForARM(CPU, MArch, Triple);
@@ -657,12 +656,12 @@ std::string arm::getARMTargetCPU(StringRef CPU, StringRef Arch,
std::string MCPU = StringRef(CPU).split("+").first.lower();
// Handle -mcpu=native.
if (MCPU == "native")
- return llvm::sys::getHostCPUName();
+ return std::string(llvm::sys::getHostCPUName());
else
return MCPU;
}
- return getARMCPUForMArch(Arch, Triple);
+ return std::string(getARMCPUForMArch(Arch, Triple));
}
/// getLLVMArchSuffixForARM - Get the LLVM ArchKind value to use for a
diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.h b/clang/lib/Driver/ToolChains/Arch/ARM.h
index 5640f8371262..0ba1a59852aa 100644
--- a/clang/lib/Driver/ToolChains/Arch/ARM.h
+++ b/clang/lib/Driver/ToolChains/Arch/ARM.h
@@ -48,13 +48,15 @@ enum class FloatABI {
};
FloatABI getARMFloatABI(const ToolChain &TC, const llvm::opt::ArgList &Args);
-ReadTPMode getReadTPMode(const ToolChain &TC, const llvm::opt::ArgList &Args);
+FloatABI getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ReadTPMode getReadTPMode(const Driver &D, const llvm::opt::ArgList &Args);
bool useAAPCSForMachO(const llvm::Triple &T);
void getARMArchCPUFromArgs(const llvm::opt::ArgList &Args,
llvm::StringRef &Arch, llvm::StringRef &CPU,
bool FromAs = false);
-void getARMTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
+void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
std::vector<llvm::StringRef> &Features, bool ForAS);
diff --git a/clang/lib/Driver/ToolChains/Arch/PPC.cpp b/clang/lib/Driver/ToolChains/Arch/PPC.cpp
index f1baadaebf41..144e276a6bd8 100644
--- a/clang/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -26,7 +26,7 @@ std::string ppc::getPPCTargetCPU(const ArgList &Args) {
StringRef CPUName = A->getValue();
if (CPUName == "native") {
- std::string CPU = llvm::sys::getHostCPUName();
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
if (!CPU.empty() && CPU != "generic")
return CPU;
else
@@ -70,6 +70,7 @@ std::string ppc::getPPCTargetCPU(const ArgList &Args) {
.Case("power7", "pwr7")
.Case("power8", "pwr8")
.Case("power9", "pwr9")
+ .Case("power10", "pwr10")
.Case("future", "future")
.Case("pwr3", "pwr3")
.Case("pwr4", "pwr4")
@@ -80,6 +81,7 @@ std::string ppc::getPPCTargetCPU(const ArgList &Args) {
.Case("pwr7", "pwr7")
.Case("pwr8", "pwr8")
.Case("pwr9", "pwr9")
+ .Case("pwr10", "pwr10")
.Case("powerpc", "ppc")
.Case("powerpc64", "ppc64")
.Case("powerpc64le", "ppc64le")
@@ -91,14 +93,16 @@ std::string ppc::getPPCTargetCPU(const ArgList &Args) {
const char *ppc::getPPCAsmModeForCPU(StringRef Name) {
return llvm::StringSwitch<const char *>(Name)
- .Case("pwr7", "-mpower7")
- .Case("power7", "-mpower7")
- .Case("pwr8", "-mpower8")
- .Case("power8", "-mpower8")
- .Case("ppc64le", "-mpower8")
- .Case("pwr9", "-mpower9")
- .Case("power9", "-mpower9")
- .Default("-many");
+ .Case("pwr7", "-mpower7")
+ .Case("power7", "-mpower7")
+ .Case("pwr8", "-mpower8")
+ .Case("power8", "-mpower8")
+ .Case("ppc64le", "-mpower8")
+ .Case("pwr9", "-mpower9")
+ .Case("power9", "-mpower9")
+ .Case("pwr10", "-mpower10")
+ .Case("power10", "-mpower10")
+ .Default("-many");
}
void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index 8c343b8693f3..80d12e5aa8da 100644
--- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -22,6 +22,14 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+namespace {
+// Represents the major and version number components of a RISC-V extension
+struct RISCVExtensionVersion {
+ StringRef Major;
+ StringRef Minor;
+};
+} // end anonymous namespace
+
static StringRef getExtensionTypeDesc(StringRef Ext) {
if (Ext.startswith("sx"))
return "non-standard supervisor-level extension";
@@ -29,6 +37,8 @@ static StringRef getExtensionTypeDesc(StringRef Ext) {
return "standard supervisor-level extension";
if (Ext.startswith("x"))
return "non-standard user-level extension";
+ if (Ext.startswith("z"))
+ return "standard user-level extension";
return StringRef();
}
@@ -39,10 +49,29 @@ static StringRef getExtensionType(StringRef Ext) {
return "s";
if (Ext.startswith("x"))
return "x";
+ if (Ext.startswith("z"))
+ return "z";
return StringRef();
}
+// If the extension is supported as experimental, return the version of that
+// extension that the compiler currently supports.
+static Optional<RISCVExtensionVersion>
+isExperimentalExtension(StringRef Ext) {
+ if (Ext == "b" || Ext == "zbb" || Ext == "zbc" || Ext == "zbe" ||
+ Ext == "zbf" || Ext == "zbm" || Ext == "zbp" || Ext == "zbr" ||
+ Ext == "zbs" || Ext == "zbt" || Ext == "zbproposedc")
+ return RISCVExtensionVersion{"0", "92"};
+ if (Ext == "v")
+ return RISCVExtensionVersion{"0", "8"};
+ return None;
+}
+
static bool isSupportedExtension(StringRef Ext) {
+ // LLVM supports "z" extensions which are marked as experimental.
+ if (isExperimentalExtension(Ext))
+ return true;
+
// LLVM does not support "sx", "s" nor "x" extensions.
return false;
}
@@ -52,17 +81,15 @@ static bool isSupportedExtension(StringRef Ext) {
// Version number is divided into major and minor version numbers,
// separated by a 'p'. If the minor version is 0 then 'p0' can be
// omitted from the version string. E.g., rv32i2p0, rv32i2, rv32i2p1.
-static bool getExtensionVersion(const Driver &D, StringRef MArch,
- StringRef Ext, StringRef In,
+static bool getExtensionVersion(const Driver &D, const ArgList &Args,
+ StringRef MArch, StringRef Ext, StringRef In,
std::string &Major, std::string &Minor) {
- Major = In.take_while(isDigit);
+ Major = std::string(In.take_while(isDigit));
In = In.substr(Major.size());
- if (Major.empty())
- return true;
- if (In.consume_front("p")) {
- Minor = In.take_while(isDigit);
- In = In.substr(Major.size());
+ if (Major.size() && In.consume_front("p")) {
+ Minor = std::string(In.take_while(isDigit));
+ In = In.substr(Major.size() + 1);
// Expected 'p' to be followed by minor version number.
if (Minor.empty()) {
@@ -74,7 +101,53 @@ static bool getExtensionVersion(const Driver &D, StringRef MArch,
}
}
- // TODO: Handle extensions with version number.
+ // Expected multi-character extension with version number to have no
+ // subsequent characters (i.e. must either end string or be followed by
+ // an underscore).
+ if (Ext.size() > 1 && In.size()) {
+ std::string Error =
+ "multi-character extensions must be separated by underscores";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name) << MArch << Error << In;
+ return false;
+ }
+
+ // If experimental extension, require use of current version number number
+ if (auto ExperimentalExtension = isExperimentalExtension(Ext)) {
+ if (!Args.hasArg(options::OPT_menable_experimental_extensions)) {
+ std::string Error =
+ "requires '-menable-experimental-extensions' for experimental extension";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return false;
+ } else if (Major.empty() && Minor.empty()) {
+ std::string Error =
+ "experimental extension requires explicit version number";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return false;
+ }
+ auto SupportedVers = *ExperimentalExtension;
+ if (Major != SupportedVers.Major || Minor != SupportedVers.Minor) {
+ std::string Error =
+ "unsupported version number " + Major;
+ if (!Minor.empty())
+ Error += "." + Minor;
+ Error += " for experimental extension (this compiler supports "
+ + SupportedVers.Major.str() + "."
+ + SupportedVers.Minor.str() + ")";
+
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return false;
+ }
+ return true;
+ }
+
+ // Allow extensions to declare no version number
+ if (Major.empty() && Minor.empty())
+ return true;
+
+ // TODO: Handle supported extensions with version number.
std::string Error = "unsupported version number " + Major;
if (!Minor.empty())
Error += "." + Minor;
@@ -89,7 +162,7 @@ static bool getExtensionVersion(const Driver &D, StringRef MArch,
// Parse the ISA string containing non-standard user-level
// extensions, standard supervisor-level extensions and
// non-standard supervisor-level extensions.
-// These extensions start with 'x', 's', 'sx' prefixes, follow a
+// These extensions start with 'z', 'x', 's', 'sx' prefixes, follow a
// canonical order, might have a version number (major, minor)
// and are separated by a single underscore '_'.
// Set the hardware features for the extensions that are supported.
@@ -105,7 +178,7 @@ static void getExtensionFeatures(const Driver &D,
SmallVector<StringRef, 8> Split;
Exts.split(Split, StringRef("_"));
- SmallVector<StringRef, 3> Prefix{"x", "s", "sx"};
+ SmallVector<StringRef, 4> Prefix{"z", "x", "s", "sx"};
auto I = Prefix.begin();
auto E = Prefix.end();
@@ -119,8 +192,10 @@ static void getExtensionFeatures(const Driver &D,
}
StringRef Type = getExtensionType(Ext);
- StringRef Name(Ext.substr(Type.size()));
StringRef Desc = getExtensionTypeDesc(Ext);
+ auto Pos = Ext.find_if(isDigit);
+ StringRef Name(Ext.substr(0, Pos));
+ StringRef Vers(Ext.substr(Pos));
if (Type.empty()) {
D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
@@ -133,7 +208,7 @@ static void getExtensionFeatures(const Driver &D,
++I;
if (I == E) {
- std::string Error = Desc;
+ std::string Error = std::string(Desc);
Error += " not given in canonical order";
D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
<< MArch << Error << Ext;
@@ -143,35 +218,30 @@ static void getExtensionFeatures(const Driver &D,
// The order is OK, do not advance I to the next prefix
// to allow repeated extension type, e.g.: rv32ixabc_xdef.
- if (Name.empty()) {
- std::string Error = Desc;
+ if (Name.size() == Type.size()) {
+ std::string Error = std::string(Desc);
Error += " name missing after";
D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
+ << MArch << Error << Type;
return;
}
std::string Major, Minor;
- auto Pos = Name.find_if(isDigit);
- if (Pos != StringRef::npos) {
- auto Next = Name.substr(Pos);
- Name = Name.substr(0, Pos);
- if (!getExtensionVersion(D, MArch, Ext, Next, Major, Minor))
- return;
- }
+ if (!getExtensionVersion(D, Args, MArch, Name, Vers, Major, Minor))
+ return;
// Check if duplicated extension.
- if (llvm::is_contained(AllExts, Ext)) {
+ if (llvm::is_contained(AllExts, Name)) {
std::string Error = "duplicated ";
Error += Desc;
D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
+ << MArch << Error << Name;
return;
}
// Extension format is correct, keep parsing the extensions.
// TODO: Save Type, Name, Major, Minor to avoid parsing them later.
- AllExts.push_back(Ext);
+ AllExts.push_back(Name);
}
// Set target features.
@@ -186,7 +256,10 @@ static void getExtensionFeatures(const Driver &D,
<< MArch << Error << Ext;
return;
}
- Features.push_back(Args.MakeArgString("+" + Ext));
+ if (isExperimentalExtension(Ext))
+ Features.push_back(Args.MakeArgString("+experimental-" + Ext));
+ else
+ Features.push_back(Args.MakeArgString("+" + Ext));
}
}
@@ -251,28 +324,35 @@ static bool getArchFeatures(const Driver &D, StringRef MArch,
// Skip rvxxx
StringRef Exts = MArch.substr(5);
- // Remove non-standard extensions and supervisor-level extensions.
- // They have 'x', 's', 'sx' prefixes. Parse them at the end.
- // Find the very first occurrence of 's' or 'x'.
+ // Remove multi-letter standard extensions, non-standard extensions and
+ // supervisor-level extensions. They have 'z', 'x', 's', 'sx' prefixes.
+ // Parse them at the end.
+ // Find the very first occurrence of 's', 'x' or 'z'.
StringRef OtherExts;
- size_t Pos = Exts.find_first_of("sx");
+ size_t Pos = Exts.find_first_of("zsx");
if (Pos != StringRef::npos) {
OtherExts = Exts.substr(Pos);
Exts = Exts.substr(0, Pos);
}
std::string Major, Minor;
- if (!getExtensionVersion(D, MArch, std::string(1, Baseline), Exts, Major,
- Minor))
+ if (!getExtensionVersion(D, Args, MArch, std::string(1, Baseline), Exts,
+ Major, Minor))
return false;
+ // Consume the base ISA version number and any '_' between rvxxx and the
+ // first extension
+ Exts = Exts.drop_front(Major.size());
+ if (!Minor.empty())
+ Exts = Exts.drop_front(Minor.size() + 1 /*'p'*/);
+ Exts.consume_front("_");
+
// TODO: Use version number when setting target features
- // and consume the underscore '_' that might follow.
auto StdExtsItr = StdExts.begin();
auto StdExtsEnd = StdExts.end();
- for (auto I = Exts.begin(), E = Exts.end(); I != E; ++I) {
+ for (auto I = Exts.begin(), E = Exts.end(); I != E; ) {
char c = *I;
// Check ISA extensions are specified in the canonical order.
@@ -295,18 +375,15 @@ static bool getArchFeatures(const Driver &D, StringRef MArch,
// Move to next char to prevent repeated letter.
++StdExtsItr;
- if (std::next(I) != E) {
- // Skip c.
- std::string Next = std::string(std::next(I), E);
- std::string Major, Minor;
- if (!getExtensionVersion(D, MArch, std::string(1, c), Next, Major, Minor))
- return false;
-
- // TODO: Use version number when setting target features
- // and consume the underscore '_' that might follow.
- }
+ std::string Next, Major, Minor;
+ if (std::next(I) != E)
+ Next = std::string(std::next(I), E);
+ if (!getExtensionVersion(D, Args, MArch, std::string(1, c), Next, Major,
+ Minor))
+ return false;
// The order is OK, then push it into features.
+ // TODO: Use version number when setting target features
switch (c) {
default:
// Currently LLVM supports only "mafdc".
@@ -331,7 +408,22 @@ static bool getArchFeatures(const Driver &D, StringRef MArch,
case 'c':
Features.push_back("+c");
break;
+ case 'b':
+ Features.push_back("+experimental-b");
+ break;
+ case 'v':
+ Features.push_back("+experimental-v");
+ break;
}
+
+ // Consume full extension name and version, including any optional '_'
+ // between this extension and the next
+ ++I;
+ I += Major.size();
+ if (Minor.size())
+ I += Minor.size() + 1 /*'p'*/;
+ if (*I == '_')
+ ++I;
}
// Dependency check.
@@ -433,12 +525,11 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
Features.push_back("-relax");
// GCC Compatibility: -mno-save-restore is default, unless -msave-restore is
- // specified...
- if (Args.hasFlag(options::OPT_msave_restore, options::OPT_mno_save_restore, false)) {
- // ... but we don't support -msave-restore, so issue a warning.
- D.Diag(diag::warn_drv_clang_unsupported)
- << Args.getLastArg(options::OPT_msave_restore)->getAsString(Args);
- }
+ // specified.
+ if (Args.hasFlag(options::OPT_msave_restore, options::OPT_mno_save_restore, false))
+ Features.push_back("+save-restore");
+ else
+ Features.push_back("-save-restore");
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
diff --git a/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp b/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
index 2b77d59fdc66..f81bf68172de 100644
--- a/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "SystemZ.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Host.h"
@@ -16,24 +18,40 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+systemz::FloatABI systemz::getSystemZFloatABI(const Driver &D,
+ const ArgList &Args) {
+ // Hard float is the default.
+ systemz::FloatABI ABI = systemz::FloatABI::Hard;
+ if (Args.hasArg(options::OPT_mfloat_abi_EQ))
+ D.Diag(diag::err_drv_unsupported_opt)
+ << Args.getLastArg(options::OPT_mfloat_abi_EQ)->getAsString(Args);
+
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_msoft_float,
+ options::OPT_mhard_float))
+ if (A->getOption().matches(clang::driver::options::OPT_msoft_float))
+ ABI = systemz::FloatABI::Soft;
+
+ return ABI;
+}
+
std::string systemz::getSystemZTargetCPU(const ArgList &Args) {
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) {
llvm::StringRef CPUName = A->getValue();
if (CPUName == "native") {
- std::string CPU = llvm::sys::getHostCPUName();
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
if (!CPU.empty() && CPU != "generic")
return CPU;
else
return "";
}
- return CPUName;
+ return std::string(CPUName);
}
- return "z10";
+ return CLANG_SYSTEMZ_DEFAULT_ARCH;
}
-void systemz::getSystemZTargetFeatures(const ArgList &Args,
+void systemz::getSystemZTargetFeatures(const Driver &D, const ArgList &Args,
std::vector<llvm::StringRef> &Features) {
// -m(no-)htm overrides use of the transactional-execution facility.
if (Arg *A = Args.getLastArg(options::OPT_mhtm, options::OPT_mno_htm)) {
@@ -49,4 +67,8 @@ void systemz::getSystemZTargetFeatures(const ArgList &Args,
else
Features.push_back("-vector");
}
+
+ systemz::FloatABI FloatABI = systemz::getSystemZFloatABI(D, Args);
+ if (FloatABI == systemz::FloatABI::Soft)
+ Features.push_back("+soft-float");
}
diff --git a/clang/lib/Driver/ToolChains/Arch/SystemZ.h b/clang/lib/Driver/ToolChains/Arch/SystemZ.h
index 77dcbc47be5c..1e42b68a8f3c 100644
--- a/clang/lib/Driver/ToolChains/Arch/SystemZ.h
+++ b/clang/lib/Driver/ToolChains/Arch/SystemZ.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_SYSTEMZ_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_SYSTEMZ_H
+#include "clang/Driver/Driver.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/Option.h"
#include <string>
@@ -19,9 +20,16 @@ namespace driver {
namespace tools {
namespace systemz {
+enum class FloatABI {
+ Soft,
+ Hard,
+};
+
+FloatABI getSystemZFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
+
std::string getSystemZTargetCPU(const llvm::opt::ArgList &Args);
-void getSystemZTargetFeatures(const llvm::opt::ArgList &Args,
+void getSystemZTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
std::vector<llvm::StringRef> &Features);
} // end namespace systemz
diff --git a/clang/lib/Driver/ToolChains/Arch/VE.cpp b/clang/lib/Driver/ToolChains/Arch/VE.cpp
new file mode 100644
index 000000000000..fa10e4810f1c
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/Arch/VE.cpp
@@ -0,0 +1,26 @@
+//===--- VE.cpp - Tools Implementations -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "VE.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Option/ArgList.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+const char *ve::getVEAsmModeForCPU(StringRef Name, const llvm::Triple &Triple) {
+ return "";
+}
+
+void ve::getVETargetFeatures(const Driver &D, const ArgList &Args,
+ std::vector<StringRef> &Features) {}
diff --git a/clang/lib/Driver/ToolChains/Arch/VE.h b/clang/lib/Driver/ToolChains/Arch/VE.h
new file mode 100644
index 000000000000..713e3e7d042f
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/Arch/VE.h
@@ -0,0 +1,33 @@
+//===--- VE.h - VE-specific Tool Helpers ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_VE_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_VE_H
+
+#include "clang/Driver/Driver.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/Option.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace ve {
+
+void getVETargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features);
+const char *getVEAsmModeForCPU(llvm::StringRef Name,
+ const llvm::Triple &Triple);
+
+} // end namespace ve
+} // namespace tools
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_VE_H
diff --git a/clang/lib/Driver/ToolChains/Arch/X86.cpp b/clang/lib/Driver/ToolChains/Arch/X86.cpp
index d1e0c8253b79..2cc44c09917f 100644
--- a/clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -31,7 +31,7 @@ const char *x86::getX86TargetCPU(const ArgList &Args,
//
// FIXME: We should also incorporate the detected target features for use
// with -native.
- std::string CPU = llvm::sys::getHostCPUName();
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
if (!CPU.empty() && CPU != "generic")
return Args.MakeArgString(CPU);
}
@@ -94,6 +94,7 @@ const char *x86::getX86TargetCPU(const ArgList &Args,
switch (Triple.getOS()) {
case llvm::Triple::FreeBSD:
+ return "i686";
case llvm::Triple::NetBSD:
case llvm::Triple::OpenBSD:
return "i486";
@@ -146,6 +147,7 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
// flags). This is a bit hacky but keeps existing usages working. We should
// consider deprecating this and instead warn if the user requests external
// retpoline thunks and *doesn't* request some form of retpolines.
+ auto SpectreOpt = clang::driver::options::ID::OPT_INVALID;
if (Args.hasArgNoClaim(options::OPT_mretpoline, options::OPT_mno_retpoline,
options::OPT_mspeculative_load_hardening,
options::OPT_mno_speculative_load_hardening)) {
@@ -153,12 +155,14 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
false)) {
Features.push_back("+retpoline-indirect-calls");
Features.push_back("+retpoline-indirect-branches");
+ SpectreOpt = options::OPT_mretpoline;
} else if (Args.hasFlag(options::OPT_mspeculative_load_hardening,
options::OPT_mno_speculative_load_hardening,
false)) {
// On x86, speculative load hardening relies on at least using retpolines
// for indirect calls.
Features.push_back("+retpoline-indirect-calls");
+ SpectreOpt = options::OPT_mspeculative_load_hardening;
}
} else if (Args.hasFlag(options::OPT_mretpoline_external_thunk,
options::OPT_mno_retpoline_external_thunk, false)) {
@@ -166,6 +170,44 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
// eventually switch to an error here.
Features.push_back("+retpoline-indirect-calls");
Features.push_back("+retpoline-indirect-branches");
+ SpectreOpt = options::OPT_mretpoline_external_thunk;
+ }
+
+ auto LVIOpt = clang::driver::options::ID::OPT_INVALID;
+ if (Args.hasFlag(options::OPT_mlvi_hardening, options::OPT_mno_lvi_hardening,
+ false)) {
+ Features.push_back("+lvi-load-hardening");
+ Features.push_back("+lvi-cfi"); // load hardening implies CFI protection
+ LVIOpt = options::OPT_mlvi_hardening;
+ } else if (Args.hasFlag(options::OPT_mlvi_cfi, options::OPT_mno_lvi_cfi,
+ false)) {
+ Features.push_back("+lvi-cfi");
+ LVIOpt = options::OPT_mlvi_cfi;
+ }
+
+ if (Args.hasFlag(options::OPT_m_seses, options::OPT_mno_seses, false)) {
+ if (LVIOpt == options::OPT_mlvi_hardening)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << D.getOpts().getOptionName(options::OPT_mlvi_hardening)
+ << D.getOpts().getOptionName(options::OPT_m_seses);
+
+ if (SpectreOpt != clang::driver::options::ID::OPT_INVALID)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << D.getOpts().getOptionName(SpectreOpt)
+ << D.getOpts().getOptionName(options::OPT_m_seses);
+
+ Features.push_back("+seses");
+ if (!Args.hasArg(options::OPT_mno_lvi_cfi)) {
+ Features.push_back("+lvi-cfi");
+ LVIOpt = options::OPT_mlvi_cfi;
+ }
+ }
+
+ if (SpectreOpt != clang::driver::options::ID::OPT_INVALID &&
+ LVIOpt != clang::driver::options::ID::OPT_INVALID) {
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << D.getOpts().getOptionName(SpectreOpt)
+ << D.getOpts().getOptionName(LVIOpt);
}
// Now add any that the user explicitly requested on the command line,
diff --git a/clang/lib/Driver/ToolChains/BareMetal.cpp b/clang/lib/Driver/ToolChains/BareMetal.cpp
index dff0e04183ef..97cfa7d0e156 100644
--- a/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -67,7 +67,7 @@ Tool *BareMetal::buildLinker() const {
std::string BareMetal::getRuntimesDir() const {
SmallString<128> Dir(getDriver().ResourceDir);
llvm::sys::path::append(Dir, "lib", "baremetal");
- return Dir.str();
+ return std::string(Dir.str());
}
void BareMetal::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
@@ -157,7 +157,7 @@ void BareMetal::AddCXXStdlibLibArgs(const ArgList &Args,
void BareMetal::AddLinkRuntimeLib(const ArgList &Args,
ArgStringList &CmdArgs) const {
CmdArgs.push_back(Args.MakeArgString("-lclang_rt.builtins-" +
- getTriple().getArchName() + ".a"));
+ getTriple().getArchName()));
}
void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -191,7 +191,7 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this,
- Args.MakeArgString(TC.GetLinkerPath()),
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(TC.GetLinkerPath()),
+ CmdArgs, Inputs));
}
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 9b3055413e9e..9d6333bb5f1d 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "Clang.h"
+#include "AMDGPU.h"
#include "Arch/AArch64.h"
#include "Arch/ARM.h"
#include "Arch/Mips.h"
@@ -14,12 +15,12 @@
#include "Arch/RISCV.h"
#include "Arch/Sparc.h"
#include "Arch/SystemZ.h"
+#include "Arch/VE.h"
#include "Arch/X86.h"
-#include "AMDGPU.h"
#include "CommonArgs.h"
#include "Hexagon.h"
-#include "MSP430.h"
#include "InputInfo.h"
+#include "MSP430.h"
#include "PS4CPU.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
@@ -35,6 +36,7 @@
#include "llvm/Config/llvm-config.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -307,10 +309,9 @@ static void getWebAssemblyTargetFeatures(const ArgList &Args,
handleTargetFeaturesGroup(Args, Features, options::OPT_m_wasm_Features_Group);
}
-static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
+static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args, ArgStringList &CmdArgs,
- bool ForAS) {
- const Driver &D = TC.getDriver();
+ bool ForAS, bool IsAux = false) {
std::vector<StringRef> Features;
switch (Triple.getArch()) {
default:
@@ -326,7 +327,7 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- arm::getARMTargetFeatures(TC, Triple, Args, CmdArgs, Features, ForAS);
+ arm::getARMTargetFeatures(D, Triple, Args, CmdArgs, Features, ForAS);
break;
case llvm::Triple::ppc:
@@ -339,7 +340,7 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
riscv::getRISCVTargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::systemz:
- systemz::getSystemZTargetFeatures(Args, Features);
+ systemz::getSystemZTargetFeatures(D, Args, Features);
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
@@ -368,27 +369,14 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
break;
case llvm::Triple::msp430:
msp430::getMSP430TargetFeatures(D, Args, Features);
+ break;
+ case llvm::Triple::ve:
+ ve::getVETargetFeatures(D, Args, Features);
}
- // Find the last of each feature.
- llvm::StringMap<unsigned> LastOpt;
- for (unsigned I = 0, N = Features.size(); I < N; ++I) {
- StringRef Name = Features[I];
- assert(Name[0] == '-' || Name[0] == '+');
- LastOpt[Name.drop_front(1)] = I;
- }
-
- for (unsigned I = 0, N = Features.size(); I < N; ++I) {
- // If this feature was overridden, ignore it.
- StringRef Name = Features[I];
- llvm::StringMap<unsigned>::iterator LastI = LastOpt.find(Name.drop_front(1));
- assert(LastI != LastOpt.end());
- unsigned Last = LastI->second;
- if (Last != I)
- continue;
-
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back(Name.data());
+ for (auto Feature : unifyTargetFeatures(Features)) {
+ CmdArgs.push_back(IsAux ? "-aux-target-feature" : "-target-feature");
+ CmdArgs.push_back(Feature.data());
}
}
@@ -464,6 +452,11 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
}
}
+ // OPT_fignore_exceptions means exception could still be thrown,
+ // but no clean up or catch would happen in current module.
+ // So we do not set EH to false.
+ Args.AddLastArg(CmdArgs, options::OPT_fignore_exceptions);
+
if (EH)
CmdArgs.push_back("-fexceptions");
}
@@ -505,7 +498,7 @@ static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
return codegenoptions::DebugLineTablesOnly;
if (A.getOption().matches(options::OPT_gline_directives_only))
return codegenoptions::DebugDirectivesOnly;
- return codegenoptions::LimitedDebugInfo;
+ return codegenoptions::DebugInfoConstructor;
}
static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
@@ -522,7 +515,7 @@ static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
static bool useFramePointerForTargetByDefault(const ArgList &Args,
const llvm::Triple &Triple) {
- if (Args.hasArg(options::OPT_pg))
+ if (Args.hasArg(options::OPT_pg) && !Args.hasArg(options::OPT_mfentry))
return true;
switch (Triple.getArch()) {
@@ -553,6 +546,13 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
Triple.isOSHurd()) {
switch (Triple.getArch()) {
// Don't use a frame pointer on linux if optimizing for certain targets.
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ if (Triple.isAndroid())
+ return true;
+ LLVM_FALLTHROUGH;
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
case llvm::Triple::mips:
@@ -721,38 +721,6 @@ static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
}
}
-static void appendUserToPath(SmallVectorImpl<char> &Result) {
-#ifdef LLVM_ON_UNIX
- const char *Username = getenv("LOGNAME");
-#else
- const char *Username = getenv("USERNAME");
-#endif
- if (Username) {
- // Validate that LoginName can be used in a path, and get its length.
- size_t Len = 0;
- for (const char *P = Username; *P; ++P, ++Len) {
- if (!clang::isAlphanumeric(*P) && *P != '_') {
- Username = nullptr;
- break;
- }
- }
-
- if (Username && Len > 0) {
- Result.append(Username, Username + Len);
- return;
- }
- }
-
-// Fallback to user id.
-#ifdef LLVM_ON_UNIX
- std::string UID = llvm::utostr(getuid());
-#else
- // FIXME: Windows seems to have an 'SID' that might work.
- std::string UID = "9999";
-#endif
- Result.append(UID.begin(), UID.end());
-}
-
static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
const Driver &D, const InputInfo &Output,
const ArgList &Args,
@@ -808,8 +776,8 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
CmdArgs.push_back("-fprofile-instrument=clang");
if (TC.getTriple().isWindowsMSVCEnvironment()) {
// Add dependent lib for clang_rt.profile
- CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
- TC.getCompilerRT(Args, "profile")));
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRTBasename(Args, "profile")));
}
}
@@ -826,8 +794,9 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
}
if (PGOGenArg) {
if (TC.getTriple().isWindowsMSVCEnvironment()) {
- CmdArgs.push_back(Args.MakeArgString("--dependent-lib=" +
- TC.getCompilerRT(Args, "profile")));
+ // Add dependent lib for clang_rt.profile
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRTBasename(Args, "profile")));
}
if (PGOGenArg->getOption().matches(
PGOGenerateArg ? options::OPT_fprofile_generate_EQ
@@ -856,11 +825,10 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
}
}
- bool EmitCovNotes = Args.hasArg(options::OPT_ftest_coverage) ||
+ bool EmitCovNotes = Args.hasFlag(options::OPT_ftest_coverage,
+ options::OPT_fno_test_coverage, false) ||
Args.hasArg(options::OPT_coverage);
- bool EmitCovData = Args.hasFlag(options::OPT_fprofile_arcs,
- options::OPT_fno_profile_arcs, false) ||
- Args.hasArg(options::OPT_coverage);
+ bool EmitCovData = TC.needsGCovInstrumentation(Args);
if (EmitCovNotes)
CmdArgs.push_back("-femit-coverage-notes");
if (EmitCovData)
@@ -1190,12 +1158,14 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_MP);
Args.AddLastArg(CmdArgs, options::OPT_MV);
- // Add offload include arguments specific for CUDA. This must happen before
- // we -I or -include anything else, because we must pick up the CUDA headers
- // from the particular CUDA installation, rather than from e.g.
- // /usr/local/include.
+ // Add offload include arguments specific for CUDA/HIP. This must happen
+ // before we -I or -include anything else, because we must pick up the
+ // CUDA/HIP headers from the particular CUDA/ROCm installation, rather than
+ // from e.g. /usr/local/include.
if (JA.isOffloading(Action::OFK_Cuda))
getToolChain().AddCudaIncludeArgs(Args, CmdArgs);
+ if (JA.isOffloading(Action::OFK_HIP))
+ getToolChain().AddHIPIncludeArgs(Args, CmdArgs);
// If we are offloading to a target via OpenMP we need to include the
// openmp_wrappers folder which contains alternative system headers.
@@ -1212,7 +1182,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
}
CmdArgs.push_back("-include");
- CmdArgs.push_back("__clang_openmp_math_declares.h");
+ CmdArgs.push_back("__clang_openmp_device_functions.h");
}
// Add -i* options, and automatically translate to
@@ -1227,6 +1197,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
if (YcArg && JA.getKind() >= Action::PrecompileJobClass &&
JA.getKind() <= Action::AssembleJobClass) {
CmdArgs.push_back(Args.MakeArgString("-building-pch-with-obj"));
+ CmdArgs.push_back(Args.MakeArgString("-fpch-instantiate-templates"));
}
if (YcArg || YuArg) {
StringRef ThroughHeader = YcArg ? YcArg->getValue() : YuArg->getValue();
@@ -1404,20 +1375,6 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
}
}
-static bool isNoCommonDefault(const llvm::Triple &Triple) {
- switch (Triple.getArch()) {
- default:
- if (Triple.isOSFuchsia())
- return true;
- return false;
-
- case llvm::Triple::xcore:
- case llvm::Triple::wasm32:
- case llvm::Triple::wasm64:
- return true;
- }
-}
-
static bool hasMultipleInvocations(const llvm::Triple &Triple,
const ArgList &Args) {
// Supported only on Darwin where we invoke the compiler multiple times
@@ -1594,7 +1551,7 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
const ToolChain &TC = getToolChain();
// Add the target features
- getTargetFeatures(TC, EffectiveTriple, Args, CmdArgs, false);
+ getTargetFeatures(TC.getDriver(), EffectiveTriple, Args, CmdArgs, false);
// Add target specific flags.
switch (TC.getArch()) {
@@ -1662,6 +1619,10 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
case llvm::Triple::wasm64:
AddWebAssemblyTargetArgs(Args, CmdArgs);
break;
+
+ case llvm::Triple::ve:
+ AddVETargetArgs(Args, CmdArgs);
+ break;
}
}
@@ -1970,6 +1931,36 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
}
}
+static void SetRISCVSmallDataLimit(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ const Driver &D = TC.getDriver();
+ const llvm::Triple &Triple = TC.getTriple();
+ // Default small data limitation is eight.
+ const char *SmallDataLimit = "8";
+ // Get small data limitation.
+ if (Args.getLastArg(options::OPT_shared, options::OPT_fpic,
+ options::OPT_fPIC)) {
+ // Not support linker relaxation for PIC.
+ SmallDataLimit = "0";
+ if (Args.hasArg(options::OPT_G)) {
+ D.Diag(diag::warn_drv_unsupported_sdata);
+ }
+ } else if (Args.getLastArgValue(options::OPT_mcmodel_EQ)
+ .equals_lower("large") &&
+ (Triple.getArch() == llvm::Triple::riscv64)) {
+ // Not support linker relaxation for RV64 with large code model.
+ SmallDataLimit = "0";
+ if (Args.hasArg(options::OPT_G)) {
+ D.Diag(diag::warn_drv_unsupported_sdata);
+ }
+ } else if (Arg *A = Args.getLastArg(options::OPT_G)) {
+ SmallDataLimit = A->getValue();
+ }
+ // Forward the -msmall-data-limit= option.
+ CmdArgs.push_back("-msmall-data-limit");
+ CmdArgs.push_back(SmallDataLimit);
+}
+
void Clang::AddRISCVTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
const llvm::Triple &Triple = getToolChain().getTriple();
@@ -1977,6 +1968,8 @@ void Clang::AddRISCVTargetArgs(const ArgList &Args,
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName.data());
+
+ SetRISCVSmallDataLimit(getToolChain(), Args, CmdArgs);
}
void Clang::AddSparcTargetArgs(const ArgList &Args,
@@ -2003,70 +1996,30 @@ void Clang::AddSystemZTargetArgs(const ArgList &Args,
options::OPT_mno_backchain, false);
bool HasPackedStack = Args.hasFlag(options::OPT_mpacked_stack,
options::OPT_mno_packed_stack, false);
- if (HasBackchain && HasPackedStack) {
+ systemz::FloatABI FloatABI =
+ systemz::getSystemZFloatABI(getToolChain().getDriver(), Args);
+ bool HasSoftFloat = (FloatABI == systemz::FloatABI::Soft);
+ if (HasBackchain && HasPackedStack && !HasSoftFloat) {
const Driver &D = getToolChain().getDriver();
D.Diag(diag::err_drv_unsupported_opt)
- << Args.getLastArg(options::OPT_mpacked_stack)->getAsString(Args) +
- " " + Args.getLastArg(options::OPT_mbackchain)->getAsString(Args);
+ << "-mpacked-stack -mbackchain -mhard-float";
}
if (HasBackchain)
CmdArgs.push_back("-mbackchain");
if (HasPackedStack)
CmdArgs.push_back("-mpacked-stack");
-}
-
-static void addX86AlignBranchArgs(const Driver &D, const ArgList &Args,
- ArgStringList &CmdArgs) {
- if (Args.hasArg(options::OPT_mbranches_within_32B_boundaries)) {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-x86-branches-within-32B-boundaries");
- }
- if (const Arg *A = Args.getLastArg(options::OPT_malign_branch_boundary_EQ)) {
- StringRef Value = A->getValue();
- unsigned Boundary;
- if (Value.getAsInteger(10, Boundary) || Boundary < 16 ||
- !llvm::isPowerOf2_64(Boundary)) {
- D.Diag(diag::err_drv_invalid_argument_to_option)
- << Value << A->getOption().getName();
- } else {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(
- Args.MakeArgString("-x86-align-branch-boundary=" + Twine(Boundary)));
- }
- }
- if (const Arg *A = Args.getLastArg(options::OPT_malign_branch_EQ)) {
- std::string AlignBranch;
- for (StringRef T : A->getValues()) {
- if (T != "fused" && T != "jcc" && T != "jmp" && T != "call" &&
- T != "ret" && T != "indirect")
- D.Diag(diag::err_drv_invalid_malign_branch_EQ)
- << T << "fused, jcc, jmp, call, ret, indirect";
- if (!AlignBranch.empty())
- AlignBranch += '+';
- AlignBranch += T;
- }
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString("-x86-align-branch=" + AlignBranch));
- }
- if (const Arg *A =
- Args.getLastArg(options::OPT_malign_branch_prefix_size_EQ)) {
- StringRef Value = A->getValue();
- unsigned PrefixSize;
- if (Value.getAsInteger(10, PrefixSize) || PrefixSize > 5) {
- D.Diag(diag::err_drv_invalid_argument_to_option)
- << Value << A->getOption().getName();
- } else {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString("-x86-align-branch-prefix-size=" +
- Twine(PrefixSize)));
- }
+ if (HasSoftFloat) {
+ // Floating point operations and argument passing are soft.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
}
}
void Clang::AddX86TargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
const Driver &D = getToolChain().getDriver();
- addX86AlignBranchArgs(D, Args, CmdArgs);
+ addX86AlignBranchArgs(D, Args, CmdArgs, /*IsLTO=*/false);
if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) ||
Args.hasArg(options::OPT_mkernel) ||
@@ -2167,6 +2120,12 @@ void Clang::AddWebAssemblyTargetArgs(const ArgList &Args,
}
}
+void Clang::AddVETargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const {
+ // Floating point operations and argument passing are hard.
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("hard");
+}
+
void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
StringRef Target, const InputInfo &Output,
const InputInfo &Input, const ArgList &Args) const {
@@ -2421,7 +2380,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
CmdArgs.push_back(Value.data());
} else {
RenderDebugEnablingArgs(Args, CmdArgs,
- codegenoptions::LimitedDebugInfo,
+ codegenoptions::DebugInfoConstructor,
DwarfVersion, llvm::DebuggerKind::Default);
}
} else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
@@ -2480,7 +2439,8 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
bool OFastEnabled, const ArgList &Args,
- ArgStringList &CmdArgs) {
+ ArgStringList &CmdArgs,
+ const JobAction &JA) {
// Handle various floating point optimization flags, mapping them to the
// appropriate LLVM code generation flags. This is complicated by several
// "umbrella" flags, so we do this by stepping through the flags incrementally
@@ -2502,10 +2462,17 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
StringRef FPModel = "";
// -ffp-exception-behavior options: strict, maytrap, ignore
StringRef FPExceptionBehavior = "";
- StringRef DenormalFPMath = "";
+ const llvm::DenormalMode DefaultDenormalFPMath =
+ TC.getDefaultDenormalModeForType(Args, JA);
+ const llvm::DenormalMode DefaultDenormalFP32Math =
+ TC.getDefaultDenormalModeForType(Args, JA, &llvm::APFloat::IEEEsingle());
+
+ llvm::DenormalMode DenormalFPMath = DefaultDenormalFPMath;
+ llvm::DenormalMode DenormalFP32Math = DefaultDenormalFP32Math;
StringRef FPContract = "";
bool StrictFPModel = false;
+
if (const Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
CmdArgs.push_back("-mlimit-float-precision");
CmdArgs.push_back(A->getValue());
@@ -2527,8 +2494,13 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
ReciprocalMath = false;
SignedZeros = true;
// -fno_fast_math restores default denormal and fpcontract handling
- DenormalFPMath = "";
FPContract = "";
+ DenormalFPMath = llvm::DenormalMode::getIEEE();
+
+ // FIXME: The target may have picked a non-IEEE default mode here based on
+ // -cl-denorms-are-zero. Should the target consider -fp-model interaction?
+ DenormalFP32Math = llvm::DenormalMode::getIEEE();
+
StringRef Val = A->getValue();
if (OFastEnabled && !Val.equals("fast")) {
// Only -ffp-model=fast is compatible with OFast, ignore.
@@ -2562,6 +2534,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
optID = options::OPT_frounding_math;
FPExceptionBehavior = "strict";
FPModel = Val;
+ FPContract = "off";
TrappingMath = true;
} else
D.Diag(diag::err_drv_unsupported_option_argument)
@@ -2621,7 +2594,19 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
break;
case options::OPT_fdenormal_fp_math_EQ:
- DenormalFPMath = A->getValue();
+ DenormalFPMath = llvm::parseDenormalFPAttribute(A->getValue());
+ if (!DenormalFPMath.isValid()) {
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+ break;
+
+ case options::OPT_fdenormal_fp_math_f32_EQ:
+ DenormalFP32Math = llvm::parseDenormalFPAttribute(A->getValue());
+ if (!DenormalFP32Math.isValid()) {
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
break;
// Validate and pass through -ffp-contract option.
@@ -2690,8 +2675,10 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
SignedZeros = true;
TrappingMath = true;
FPExceptionBehavior = "strict";
- // -fno_unsafe_math_optimizations restores default denormal handling
- DenormalFPMath = "";
+
+ // The target may have opted to flush by default, so force IEEE.
+ DenormalFPMath = llvm::DenormalMode::getIEEE();
+ DenormalFP32Math = llvm::DenormalMode::getIEEE();
break;
case options::OPT_Ofast:
@@ -2724,7 +2711,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
TrappingMath = false;
RoundingFPMath = false;
// -fno_fast_math restores default denormal and fpcontract handling
- DenormalFPMath = "";
+ DenormalFPMath = DefaultDenormalFPMath;
+ DenormalFP32Math = llvm::DenormalMode::getIEEE();
FPContract = "";
break;
}
@@ -2734,7 +2722,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (HonorINFs && HonorNaNs &&
!AssociativeMath && !ReciprocalMath &&
SignedZeros && TrappingMath && RoundingFPMath &&
- DenormalFPMath.empty() && FPContract.empty())
+ (FPContract.equals("off") || FPContract.empty()) &&
+ DenormalFPMath == llvm::DenormalMode::getIEEE() &&
+ DenormalFP32Math == llvm::DenormalMode::getIEEE())
// OK: Current Arg doesn't conflict with -ffp-model=strict
;
else {
@@ -2780,9 +2770,21 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
} else if (TrappingMathPresent)
CmdArgs.push_back("-fno-trapping-math");
- if (!DenormalFPMath.empty())
- CmdArgs.push_back(
- Args.MakeArgString("-fdenormal-fp-math=" + DenormalFPMath));
+ // The default is IEEE.
+ if (DenormalFPMath != llvm::DenormalMode::getIEEE()) {
+ llvm::SmallString<64> DenormFlag;
+ llvm::raw_svector_ostream ArgStr(DenormFlag);
+ ArgStr << "-fdenormal-fp-math=" << DenormalFPMath;
+ CmdArgs.push_back(Args.MakeArgString(ArgStr.str()));
+ }
+
+ // Add f32 specific denormal mode flag if it's different.
+ if (DenormalFP32Math != DenormalFPMath) {
+ llvm::SmallString<64> DenormFlag;
+ llvm::raw_svector_ostream ArgStr(DenormFlag);
+ ArgStr << "-fdenormal-fp-math-f32=" << DenormalFP32Math;
+ CmdArgs.push_back(Args.MakeArgString(ArgStr.str()));
+ }
if (!FPContract.empty())
CmdArgs.push_back(Args.MakeArgString("-ffp-contract=" + FPContract));
@@ -2957,6 +2959,22 @@ static void RenderSSPOptions(const ToolChain &TC, const ArgList &Args,
}
}
+static void RenderSCPOptions(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ const llvm::Triple &EffectiveTriple = TC.getEffectiveTriple();
+
+ if (!EffectiveTriple.isOSLinux())
+ return;
+
+ if (!EffectiveTriple.isX86() && !EffectiveTriple.isSystemZ() &&
+ !EffectiveTriple.isPPC64())
+ return;
+
+ if (Args.hasFlag(options::OPT_fstack_clash_protection,
+ options::OPT_fnostack_clash_protection, false))
+ CmdArgs.push_back("-fstack-clash-protection");
+}
+
static void RenderTrivialAutoVarInitOptions(const Driver &D,
const ToolChain &TC,
const ArgList &Args,
@@ -2999,9 +3017,26 @@ static void RenderTrivialAutoVarInitOptions(const Driver &D,
CmdArgs.push_back(
Args.MakeArgString("-ftrivial-auto-var-init=" + TrivialAutoVarInit));
}
+
+ if (Arg *A =
+ Args.getLastArg(options::OPT_ftrivial_auto_var_init_stop_after)) {
+ if (!Args.hasArg(options::OPT_ftrivial_auto_var_init) ||
+ StringRef(
+ Args.getLastArg(options::OPT_ftrivial_auto_var_init)->getValue()) ==
+ "uninitialized")
+ D.Diag(diag::err_drv_trivial_auto_var_init_stop_after_missing_dependency);
+ A->claim();
+ StringRef Val = A->getValue();
+ if (std::stoi(Val.str()) <= 0)
+ D.Diag(diag::err_drv_trivial_auto_var_init_stop_after_invalid_value);
+ CmdArgs.push_back(
+ Args.MakeArgString("-ftrivial-auto-var-init-stop-after=" + Val));
+ }
}
static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs) {
+ // cl-denorms-are-zero is not forwarded. It is translated into a generic flag
+ // for denormal flushing handling based on the target.
const unsigned ForwardedArguments[] = {
options::OPT_cl_opt_disable,
options::OPT_cl_strict_aliasing,
@@ -3012,7 +3047,6 @@ static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs) {
options::OPT_cl_fast_relaxed_math,
options::OPT_cl_mad_enable,
options::OPT_cl_no_signed_zeros,
- options::OPT_cl_denorms_are_zero,
options::OPT_cl_fp32_correctly_rounded_divide_sqrt,
options::OPT_cl_uniform_work_group_size
};
@@ -3136,11 +3170,13 @@ static void RenderBuiltinOptions(const ToolChain &TC, const llvm::Triple &T,
CmdArgs.push_back("-fno-math-builtin");
}
-void Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
- llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false, Result);
- llvm::sys::path::append(Result, "org.llvm.clang.");
- appendUserToPath(Result);
- llvm::sys::path::append(Result, "ModuleCache");
+bool Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
+ if (llvm::sys::path::cache_directory(Result)) {
+ llvm::sys::path::append(Result, "clang");
+ llvm::sys::path::append(Result, "ModuleCache");
+ return true;
+ }
+ return false;
}
static void RenderModulesOptions(Compilation &C, const Driver &D,
@@ -3197,6 +3233,7 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
if (Arg *A = Args.getLastArg(options::OPT_fmodules_cache_path))
Path = A->getValue();
+ bool HasPath = true;
if (C.isForDiagnostics()) {
// When generating crash reports, we want to emit the modules along with
// the reproduction sources, so we ignore any provided module path.
@@ -3205,12 +3242,16 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
llvm::sys::path::append(Path, "modules");
} else if (Path.empty()) {
// No module path was provided: use the default.
- Driver::getDefaultModuleCachePath(Path);
+ HasPath = Driver::getDefaultModuleCachePath(Path);
}
- const char Arg[] = "-fmodules-cache-path=";
- Path.insert(Path.begin(), Arg, Arg + strlen(Arg));
- CmdArgs.push_back(Args.MakeArgString(Path));
+ // `HasPath` will only be false if getDefaultModuleCachePath() fails.
+ // That being said, that failure is unlikely and not caching is harmless.
+ if (HasPath) {
+ const char Arg[] = "-fmodules-cache-path=";
+ Path.insert(Path.begin(), Arg, Arg + strlen(Arg));
+ CmdArgs.push_back(Args.MakeArgString(Path));
+ }
}
if (HaveModules) {
@@ -3469,9 +3510,9 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
CmdArgs.push_back("-fno-diagnostics-fixit-info");
// Enable -fdiagnostics-show-option by default.
- if (Args.hasFlag(options::OPT_fdiagnostics_show_option,
- options::OPT_fno_diagnostics_show_option))
- CmdArgs.push_back("-fdiagnostics-show-option");
+ if (!Args.hasFlag(options::OPT_fdiagnostics_show_option,
+ options::OPT_fno_diagnostics_show_option, true))
+ CmdArgs.push_back("-fno-diagnostics-show-option");
if (const Arg *A =
Args.getLastArg(options::OPT_fdiagnostics_show_category_EQ)) {
@@ -3574,8 +3615,7 @@ static DwarfFissionKind getDebugFissionKind(const Driver &D,
static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
const llvm::Triple &T, const ArgList &Args,
- bool EmitCodeView, bool IsWindowsMSVC,
- ArgStringList &CmdArgs,
+ bool EmitCodeView, ArgStringList &CmdArgs,
codegenoptions::DebugInfoKind &DebugInfoKind,
DwarfFissionKind &DwarfFission) {
if (Args.hasFlag(options::OPT_fdebug_info_for_profiling,
@@ -3613,7 +3653,7 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (const Arg *A =
Args.getLastArg(options::OPT_g_Group, options::OPT_gsplit_dwarf,
options::OPT_gsplit_dwarf_EQ)) {
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ DebugInfoKind = codegenoptions::DebugInfoConstructor;
// If the last option explicitly specified a debug-info level, use it.
if (checkDebugInfoOption(A, Args, D, TC) &&
@@ -3708,10 +3748,9 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// not to include any column info.
if (const Arg *A = Args.getLastArg(options::OPT_gcolumn_info))
(void)checkDebugInfoOption(A, Args, D, TC);
- if (Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info,
- /*Default=*/!EmitCodeView &&
- DebuggerTuning != llvm::DebuggerKind::SCE))
- CmdArgs.push_back("-dwarf-column-info");
+ if (!Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info,
+ !EmitCodeView && DebuggerTuning != llvm::DebuggerKind::SCE))
+ CmdArgs.push_back("-gno-column-info");
// FIXME: Move backend command line options to the module.
// If -gline-tables-only or -gline-directives-only is the last option it wins.
@@ -3719,7 +3758,7 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (checkDebugInfoOption(A, Args, D, TC)) {
if (DebugInfoKind != codegenoptions::DebugLineTablesOnly &&
DebugInfoKind != codegenoptions::DebugDirectivesOnly) {
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ DebugInfoKind = codegenoptions::DebugInfoConstructor;
CmdArgs.push_back("-dwarf-ext-refs");
CmdArgs.push_back("-fmodule-format=obj");
}
@@ -3739,7 +3778,9 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
TC.GetDefaultStandaloneDebug());
if (const Arg *A = Args.getLastArg(options::OPT_fstandalone_debug))
(void)checkDebugInfoOption(A, Args, D, TC);
- if (DebugInfoKind == codegenoptions::LimitedDebugInfo && NeedFullDebug)
+ if ((DebugInfoKind == codegenoptions::LimitedDebugInfo ||
+ DebugInfoKind == codegenoptions::DebugInfoConstructor) &&
+ NeedFullDebug)
DebugInfoKind = codegenoptions::FullDebugInfo;
if (Args.hasFlag(options::OPT_gembed_source, options::OPT_gno_embed_source,
@@ -3912,7 +3953,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- const llvm::Triple *AuxTriple = IsCuda ? TC.getAuxTriple() : nullptr;
+ const llvm::Triple *AuxTriple =
+ (IsCuda || IsHIP) ? TC.getAuxTriple() : nullptr;
bool IsWindowsMSVC = RawTriple.isWindowsMSVCEnvironment();
bool IsIAMCU = RawTriple.isOSIAMCU();
@@ -3977,6 +4019,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(NormalizedTriple));
}
+ if (Args.hasFlag(options::OPT_fsycl, options::OPT_fno_sycl, false)) {
+ CmdArgs.push_back("-fsycl");
+ CmdArgs.push_back("-fsycl-is-device");
+
+ if (Arg *A = Args.getLastArg(options::OPT_sycl_std_EQ)) {
+ A->render(Args, CmdArgs);
+ } else {
+ // Ensure the default version in SYCL mode is 1.2.1 (aka 2017)
+ CmdArgs.push_back("-sycl-std=2017");
+ }
+ }
+
if (IsOpenMPDevice) {
// We have to pass the triple of the host if compiling for an OpenMP device.
std::string NormalizedTriple =
@@ -3990,9 +4044,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Triple.isOSWindows() && (Triple.getArch() == llvm::Triple::arm ||
Triple.getArch() == llvm::Triple::thumb)) {
unsigned Offset = Triple.getArch() == llvm::Triple::arm ? 4 : 6;
- unsigned Version;
- Triple.getArchName().substr(Offset).getAsInteger(10, Version);
- if (Version < 7)
+ unsigned Version = 0;
+ bool Failure =
+ Triple.getArchName().substr(Offset).consumeInteger(10, Version);
+ if (Failure || Version < 7)
D.Diag(diag::err_target_unsupported_arch) << Triple.getArchName()
<< TripleStr;
}
@@ -4083,7 +4138,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
StringRef ArgStr =
Args.hasArg(options::OPT_interface_stub_version_EQ)
? Args.getLastArgValue(options::OPT_interface_stub_version_EQ)
- : "experimental-ifs-v1";
+ : "experimental-ifs-v2";
CmdArgs.push_back("-emit-interface-stubs");
CmdArgs.push_back(
Args.MakeArgString(Twine("-interface-stub-version=") + ArgStr.str()));
@@ -4153,8 +4208,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_function_sections,
options::OPT_fdata_sections,
options::OPT_fno_data_sections,
+ options::OPT_fbasic_block_sections_EQ,
+ options::OPT_funique_internal_linkage_names,
+ options::OPT_fno_unique_internal_linkage_names,
options::OPT_funique_section_names,
options::OPT_fno_unique_section_names,
+ options::OPT_funique_basic_block_section_names,
+ options::OPT_fno_unique_basic_block_section_names,
options::OPT_mrestrict_it,
options::OPT_mno_restrict_it,
options::OPT_mstackrealign,
@@ -4195,7 +4255,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mdisable-tail-calls");
RenderFloatingPointOptions(TC, D, isOptimizationLevelFast(Args), Args,
- CmdArgs);
+ CmdArgs, JA);
// Render ABI arguments
switch (TC.getArch()) {
@@ -4240,8 +4300,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
II.getInputArg().renderAsInput(Args, CmdArgs);
}
- C.addCommand(std::make_unique<Command>(JA, *this, D.getClangProgramPath(),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileUTF8(),
+ D.getClangProgramPath(), CmdArgs, Inputs));
return;
}
@@ -4266,8 +4327,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Discard value names in assert builds unless otherwise specified.
if (Args.hasFlag(options::OPT_fdiscard_value_names,
- options::OPT_fno_discard_value_names, !IsAssertBuild))
+ options::OPT_fno_discard_value_names, !IsAssertBuild)) {
+ if (Args.hasArg(options::OPT_fdiscard_value_names) &&
+ (std::any_of(Inputs.begin(), Inputs.end(),
+ [](const clang::driver::InputInfo &II) {
+ return types::isLLVMIR(II.getType());
+ }))) {
+ D.Diag(diag::warn_ignoring_fdiscard_for_bitcode);
+ }
CmdArgs.push_back("-discard-value-names");
+ }
// Set the main file name, so that debug info works even with
// -save-temps.
@@ -4321,14 +4390,24 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
bool IsPIE;
std::tie(RelocationModel, PICLevel, IsPIE) = ParsePICArgs(TC, Args);
- const char *RMName = RelocationModelName(RelocationModel);
+ bool IsROPI = RelocationModel == llvm::Reloc::ROPI ||
+ RelocationModel == llvm::Reloc::ROPI_RWPI;
+ bool IsRWPI = RelocationModel == llvm::Reloc::RWPI ||
+ RelocationModel == llvm::Reloc::ROPI_RWPI;
- if ((RelocationModel == llvm::Reloc::ROPI ||
- RelocationModel == llvm::Reloc::ROPI_RWPI) &&
- types::isCXX(Input.getType()) &&
+ if (Args.hasArg(options::OPT_mcmse) &&
+ !Args.hasArg(options::OPT_fallow_unsupported)) {
+ if (IsROPI)
+ D.Diag(diag::err_cmse_pi_are_incompatible) << IsROPI;
+ if (IsRWPI)
+ D.Diag(diag::err_cmse_pi_are_incompatible) << !IsRWPI;
+ }
+
+ if (IsROPI && types::isCXX(Input.getType()) &&
!Args.hasArg(options::OPT_fallow_unsupported))
D.Diag(diag::err_drv_ropi_incompatible_with_cxx);
+ const char *RMName = RelocationModelName(RelocationModel);
if (RMName) {
CmdArgs.push_back("-mrelocation-model");
CmdArgs.push_back(RMName);
@@ -4352,15 +4431,27 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
- CmdArgs.push_back("-mthread-model");
- if (Arg *A = Args.getLastArg(options::OPT_mthread_model)) {
- if (!TC.isThreadModelSupported(A->getValue()))
- D.Diag(diag::err_drv_invalid_thread_model_for_target)
- << A->getValue() << A->getAsString(Args);
- CmdArgs.push_back(A->getValue());
+ // The default is -fno-semantic-interposition. We render it just because we
+ // require explicit -fno-semantic-interposition to infer dso_local.
+ if (Arg *A = Args.getLastArg(options::OPT_fsemantic_interposition,
+ options::OPT_fno_semantic_interposition))
+ if (RelocationModel != llvm::Reloc::Static && !IsPIE)
+ A->render(Args, CmdArgs);
+
+ {
+ std::string Model;
+ if (Arg *A = Args.getLastArg(options::OPT_mthread_model)) {
+ if (!TC.isThreadModelSupported(A->getValue()))
+ D.Diag(diag::err_drv_invalid_thread_model_for_target)
+ << A->getValue() << A->getAsString(Args);
+ Model = A->getValue();
+ } else
+ Model = TC.getThreadModel();
+ if (Model != "posix") {
+ CmdArgs.push_back("-mthread-model");
+ CmdArgs.push_back(Args.MakeArgString(Model));
+ }
}
- else
- CmdArgs.push_back(Args.MakeArgString(TC.getThreadModel()));
Args.AddLastArg(CmdArgs, options::OPT_fveclib);
@@ -4413,6 +4504,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
+ if (Arg *A = Args.getLastArg(options::OPT_maix_struct_return,
+ options::OPT_msvr4_struct_return)) {
+ if (TC.getArch() != llvm::Triple::ppc) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << RawTriple.str();
+ } else if (A->getOption().matches(options::OPT_maix_struct_return)) {
+ CmdArgs.push_back("-maix-struct-return");
+ } else {
+ assert(A->getOption().matches(options::OPT_msvr4_struct_return));
+ CmdArgs.push_back("-msvr4-struct-return");
+ }
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_fpcc_struct_return,
options::OPT_freg_struct_return)) {
if (TC.getArch() != llvm::Triple::x86) {
@@ -4429,6 +4533,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false))
CmdArgs.push_back("-fdefault-calling-conv=stdcall");
+ if (Args.hasArg(options::OPT_fenable_matrix)) {
+ // enable-matrix is needed by both the LangOpts and by LLVM.
+ CmdArgs.push_back("-fenable-matrix");
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-matrix");
+ }
+
CodeGenOptions::FramePointerKind FPKeepKind =
getFramePointerKind(Args, RawTriple);
const char *FPKeepKindStr = nullptr;
@@ -4447,8 +4558,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(FPKeepKindStr);
if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
- options::OPT_fno_zero_initialized_in_bss))
- CmdArgs.push_back("-mno-zero-initialized-in-bss");
+ options::OPT_fno_zero_initialized_in_bss, true))
+ CmdArgs.push_back("-fno-zero-initialized-in-bss");
bool OFastEnabled = isOptimizationLevelFast(Args);
// If -Ofast is the optimization level, then -fstrict-aliasing should be
@@ -4495,7 +4606,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fsplit_stack))
CmdArgs.push_back("-split-stacks");
- RenderFloatingPointOptions(TC, D, OFastEnabled, Args, CmdArgs);
+ RenderFloatingPointOptions(TC, D, OFastEnabled, Args, CmdArgs, JA);
+
+ if (Arg *A = Args.getLastArg(options::OPT_mdouble_EQ)) {
+ if (TC.getArch() == llvm::Triple::avr)
+ A->render(Args, CmdArgs);
+ else
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
if (Arg *A = Args.getLastArg(options::OPT_LongDouble_Group)) {
if (TC.getTriple().isX86())
@@ -4511,9 +4630,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Decide whether to use verbose asm. Verbose assembly is the default on
// toolchains which have the integrated assembler on by default.
bool IsIntegratedAssemblerDefault = TC.IsIntegratedAssemblerDefault();
- if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
- IsIntegratedAssemblerDefault))
- CmdArgs.push_back("-masm-verbose");
+ if (!Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
+ IsIntegratedAssemblerDefault))
+ CmdArgs.push_back("-fno-verbose-asm");
if (!TC.useIntegratedAs())
CmdArgs.push_back("-no-integrated-as");
@@ -4529,8 +4648,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Enable -mconstructor-aliases except on darwin, where we have to work around
// a linker bug (see <rdar://problem/7651567>), and CUDA device code, where
- // aliases aren't supported.
- if (!RawTriple.isOSDarwin() && !RawTriple.isNVPTX())
+ // aliases aren't supported. Similarly, aliases aren't yet supported for AIX.
+ if (!RawTriple.isOSDarwin() && !RawTriple.isNVPTX() && !RawTriple.isOSAIX())
CmdArgs.push_back("-mconstructor-aliases");
// Darwin's kernel doesn't support guard variables; just die if we
@@ -4539,7 +4658,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fforbid-guard-variables");
if (Args.hasFlag(options::OPT_mms_bitfields, options::OPT_mno_ms_bitfields,
- false)) {
+ Triple.isWindowsGNUEnvironment())) {
CmdArgs.push_back("-mms-bitfields");
}
@@ -4575,14 +4694,36 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AsynchronousUnwindTables))
CmdArgs.push_back("-munwind-tables");
+ // Prepare `-aux-target-cpu` and `-aux-target-feature` unless
+ // `--gpu-use-aux-triple-only` is specified.
+ if (!Args.getLastArg(options::OPT_gpu_use_aux_triple_only) &&
+ ((IsCuda && JA.isDeviceOffloading(Action::OFK_Cuda)) ||
+ (IsHIP && JA.isDeviceOffloading(Action::OFK_HIP)))) {
+ const ArgList &HostArgs =
+ C.getArgsForToolChain(nullptr, StringRef(), Action::OFK_None);
+ std::string HostCPU =
+ getCPUName(HostArgs, *TC.getAuxTriple(), /*FromAs*/ false);
+ if (!HostCPU.empty()) {
+ CmdArgs.push_back("-aux-target-cpu");
+ CmdArgs.push_back(Args.MakeArgString(HostCPU));
+ }
+ getTargetFeatures(D, *TC.getAuxTriple(), HostArgs, CmdArgs,
+ /*ForAS*/ false, /*IsAux*/ true);
+ }
+
TC.addClangTargetOptions(Args, CmdArgs, JA.getOffloadingDeviceKind());
// FIXME: Handle -mtune=.
(void)Args.hasArg(options::OPT_mtune_EQ);
if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
- CmdArgs.push_back("-mcode-model");
- CmdArgs.push_back(A->getValue());
+ StringRef CM = A->getValue();
+ if (CM == "small" || CM == "kernel" || CM == "medium" || CM == "large" ||
+ CM == "tiny")
+ A->render(Args, CmdArgs);
+ else
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << CM << A->getOption().getName();
}
if (Arg *A = Args.getLastArg(options::OPT_mtls_size_EQ)) {
@@ -4617,8 +4758,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AddClangCLArgs(Args, InputType, CmdArgs, &DebugInfoKind, &EmitCodeView);
DwarfFissionKind DwarfFission;
- RenderDebugOptions(TC, D, RawTriple, Args, EmitCodeView, IsWindowsMSVC,
- CmdArgs, DebugInfoKind, DwarfFission);
+ RenderDebugOptions(TC, D, RawTriple, Args, EmitCodeView, CmdArgs,
+ DebugInfoKind, DwarfFission);
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
@@ -4664,11 +4805,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddAllArgs(CmdArgs, options::OPT_v);
- Args.AddLastArg(CmdArgs, options::OPT_H);
+
+ if (Args.getLastArg(options::OPT_H)) {
+ CmdArgs.push_back("-H");
+ CmdArgs.push_back("-sys-header-deps");
+ }
+
if (D.CCPrintHeaders && !D.CCGenDiagnostics) {
CmdArgs.push_back("-header-include-file");
CmdArgs.push_back(D.CCPrintHeadersFilename ? D.CCPrintHeadersFilename
: "-");
+ CmdArgs.push_back("-sys-header-deps");
}
Args.AddLastArg(CmdArgs, options::OPT_P);
Args.AddLastArg(CmdArgs, options::OPT_print_ivar_layout);
@@ -4679,6 +4826,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
: "-");
}
+ // Give the gen diagnostics more chances to succeed, by avoiding intentional
+ // crashes.
+ if (D.CCGenDiagnostics)
+ CmdArgs.push_back("-disable-pragma-debug-crash");
+
bool UseSeparateSections = isUseSeparateSections(Triple);
if (Args.hasFlag(options::OPT_ffunction_sections,
@@ -4686,6 +4838,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-ffunction-sections");
}
+ if (Arg *A = Args.getLastArg(options::OPT_fbasic_block_sections_EQ)) {
+ StringRef Val = A->getValue();
+ if (Val != "all" && Val != "labels" && Val != "none" &&
+ !(Val.startswith("list=") && llvm::sys::fs::exists(Val.substr(5))))
+ D.Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else
+ A->render(Args, CmdArgs);
+ }
+
if (Args.hasFlag(options::OPT_fdata_sections, options::OPT_fno_data_sections,
UseSeparateSections)) {
CmdArgs.push_back("-fdata-sections");
@@ -4695,14 +4857,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_unique_section_names, true))
CmdArgs.push_back("-fno-unique-section-names");
+ if (Args.hasFlag(options::OPT_funique_internal_linkage_names,
+ options::OPT_fno_unique_internal_linkage_names, false))
+ CmdArgs.push_back("-funique-internal-linkage-names");
+
+ if (Args.hasFlag(options::OPT_funique_basic_block_section_names,
+ options::OPT_fno_unique_basic_block_section_names, false))
+ CmdArgs.push_back("-funique-basic-block-section-names");
+
Args.AddLastArg(CmdArgs, options::OPT_finstrument_functions,
options::OPT_finstrument_functions_after_inlining,
options::OPT_finstrument_function_entry_bare);
- // NVPTX doesn't support PGO or coverage. There's no runtime support for
- // sampling, overhead of call arc collection is way too high and there's no
- // way to collect the output.
- if (!Triple.isNVPTX())
+ // NVPTX/AMDGCN doesn't support PGO or coverage. There's no runtime support
+ // for sampling, overhead of call arc collection is way too high and there's
+ // no way to collect the output.
+ if (!Triple.isNVPTX() && !Triple.isAMDGCN())
addPGOAndCoverageFlags(TC, C, D, Output, Args, CmdArgs);
Args.AddLastArg(CmdArgs, options::OPT_fclang_abi_compat_EQ);
@@ -4821,6 +4991,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_ftrigraphs,
options::OPT_fno_trigraphs);
+
+ // HIP headers has minimum C++ standard requirements. Therefore set the
+ // default language standard.
+ if (IsHIP)
+ CmdArgs.push_back(IsWindowsMSVC ? "-std=c++14" : "-std=c++11");
}
// GCC's behavior for -Wwrite-strings is a bit strange:
@@ -4964,15 +5139,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Pass -fmessage-length=.
- CmdArgs.push_back("-fmessage-length");
+ unsigned MessageLength = 0;
if (Arg *A = Args.getLastArg(options::OPT_fmessage_length_EQ)) {
- CmdArgs.push_back(A->getValue());
+ StringRef V(A->getValue());
+ if (V.getAsInteger(0, MessageLength))
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << V << A->getOption().getName();
} else {
// If -fmessage-length=N was not specified, determine whether this is a
// terminal and, if so, implicitly define -fmessage-length appropriately.
- unsigned N = llvm::sys::Process::StandardErrColumns();
- CmdArgs.push_back(Args.MakeArgString(Twine(N)));
+ MessageLength = llvm::sys::Process::StandardErrColumns();
}
+ if (MessageLength != 0)
+ CmdArgs.push_back(
+ Args.MakeArgString("-fmessage-length=" + Twine(MessageLength)));
// -fvisibility= and -fvisibility-ms-compat are of a piece.
if (const Arg *A = Args.getLastArg(options::OPT_fvisibility_EQ,
@@ -5001,7 +5181,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
Args.AddLastArg(CmdArgs, options::OPT_femulated_tls,
options::OPT_fno_emulated_tls);
- Args.AddLastArg(CmdArgs, options::OPT_fkeep_static_consts);
// AltiVec-like language extensions aren't relevant for assembling.
if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm)
@@ -5047,6 +5226,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_openmp_cuda_mode, /*Default=*/false))
CmdArgs.push_back("-fopenmp-cuda-mode");
+ // When in OpenMP offloading mode with NVPTX target, forward
+ // cuda-parallel-target-regions flag
+ if (Args.hasFlag(options::OPT_fopenmp_cuda_parallel_target_regions,
+ options::OPT_fno_openmp_cuda_parallel_target_regions,
+ /*Default=*/true))
+ CmdArgs.push_back("-fopenmp-cuda-parallel-target-regions");
+
// When in OpenMP offloading mode with NVPTX target, check if full runtime
// is required.
if (Args.hasFlag(options::OPT_fopenmp_cuda_force_full_runtime,
@@ -5077,20 +5263,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ)) {
StringRef S0 = A->getValue(), S = S0;
- unsigned Size, Start = 0;
+ unsigned Size, Offset = 0;
if (!Triple.isAArch64() && Triple.getArch() != llvm::Triple::x86 &&
Triple.getArch() != llvm::Triple::x86_64)
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
else if (S.consumeInteger(10, Size) ||
(!S.empty() && (!S.consume_front(",") ||
- S.consumeInteger(10, Start) || !S.empty())))
+ S.consumeInteger(10, Offset) || !S.empty())))
D.Diag(diag::err_drv_invalid_argument_to_option)
<< S0 << A->getOption().getName();
- else if (Start)
+ else if (Size < Offset)
D.Diag(diag::err_drv_unsupported_fpatchable_function_entry_argument);
- else
+ else {
CmdArgs.push_back(Args.MakeArgString(A->getSpelling() + Twine(Size)));
+ CmdArgs.push_back(Args.MakeArgString(
+ "-fpatchable-function-entry-offset=" + Twine(Offset)));
+ }
}
if (TC.SupportsProfiling()) {
@@ -5164,11 +5353,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_pthread);
- if (Args.hasFlag(options::OPT_mspeculative_load_hardening, options::OPT_mno_speculative_load_hardening,
- false))
+ if (Args.hasFlag(options::OPT_mspeculative_load_hardening,
+ options::OPT_mno_speculative_load_hardening, false))
CmdArgs.push_back(Args.MakeArgString("-mspeculative-load-hardening"));
RenderSSPOptions(TC, Args, CmdArgs, KernelOrKext);
+ RenderSCPOptions(TC, Args, CmdArgs);
RenderTrivialAutoVarInitOptions(D, TC, Args, CmdArgs);
// Translate -mstackrealign
@@ -5214,8 +5404,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -cl options to -cc1
RenderOpenCLOptions(Args, CmdArgs);
- if (Args.hasFlag(options::OPT_fhip_new_launch_api,
- options::OPT_fno_hip_new_launch_api, false))
+ if (IsHIP && Args.hasFlag(options::OPT_fhip_new_launch_api,
+ options::OPT_fno_hip_new_launch_api, true))
CmdArgs.push_back("-fhip-new-launch-api");
if (Arg *A = Args.getLastArg(options::OPT_fcf_protection_EQ)) {
@@ -5299,7 +5489,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fuse-cxa-atexit is default.
if (!Args.hasFlag(
options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
- !RawTriple.isOSWindows() &&
+ !RawTriple.isOSAIX() && !RawTriple.isOSWindows() &&
TC.getArch() != llvm::Triple::xcore &&
((RawTriple.getVendor() != llvm::Triple::MipsTechnologies) ||
RawTriple.hasEnvironment())) ||
@@ -5311,16 +5501,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RawTriple.isOSDarwin() && !KernelOrKext))
CmdArgs.push_back("-fregister-global-dtors-with-atexit");
- // -fms-extensions=0 is default.
- if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
- IsWindowsMSVC))
- CmdArgs.push_back("-fms-extensions");
-
// -fno-use-line-directives is default.
if (Args.hasFlag(options::OPT_fuse_line_directives,
options::OPT_fno_use_line_directives, false))
CmdArgs.push_back("-fuse-line-directives");
+ // -fms-extensions=0 is default.
+ if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
+ IsWindowsMSVC))
+ CmdArgs.push_back("-fms-extensions");
+
// -fms-compatibility=0 is default.
bool IsMSVCCompat = Args.hasFlag(
options::OPT_fms_compatibility, options::OPT_fno_ms_compatibility,
@@ -5434,11 +5624,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_fpch_validate_input_files_content,
options::OPT_fno_pch_validate_input_files_content, false))
CmdArgs.push_back("-fvalidate-ast-input-files-content");
+ if (Args.hasFlag(options::OPT_fpch_instantiate_templates,
+ options::OPT_fno_pch_instantiate_templates, false))
+ CmdArgs.push_back("-fpch-instantiate-templates");
Args.AddLastArg(CmdArgs, options::OPT_fexperimental_new_pass_manager,
options::OPT_fno_experimental_new_pass_manager);
- ObjCRuntime Runtime = AddObjCRuntimeArgs(Args, CmdArgs, rewriteKind);
+ ObjCRuntime Runtime = AddObjCRuntimeArgs(Args, Inputs, CmdArgs, rewriteKind);
RenderObjCOptions(TC, D, RawTriple, Args, Runtime, rewriteKind != RK_None,
Input, CmdArgs);
@@ -5558,11 +5751,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasFlag(options::OPT_Qy, options::OPT_Qn, true))
CmdArgs.push_back("-Qn");
- // -fcommon is the default unless compiling kernel code or the target says so
- bool NoCommonDefault = KernelOrKext || isNoCommonDefault(RawTriple);
- if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common,
- !NoCommonDefault))
- CmdArgs.push_back("-fno-common");
+ // -fno-common is the default, set -fcommon only when that flag is set.
+ if (Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common, false))
+ CmdArgs.push_back("-fcommon");
// -fsigned-bitfields is default, and clang doesn't yet support
// -funsigned-bitfields.
@@ -5680,6 +5871,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_dM);
Args.AddLastArg(CmdArgs, options::OPT_dD);
+ Args.AddLastArg(CmdArgs, options::OPT_fmax_tokens_EQ);
+
// Handle serialized diagnostics.
if (Arg *A = Args.getLastArg(options::OPT__serialize_diags)) {
CmdArgs.push_back("-serialize-diagnostic-file");
@@ -5749,7 +5942,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// FIXME: -fembed-bitcode -save-temps will save optimized bitcode instead of
// pristine IR generated by the frontend. Ideally, a new compile action should
// be added so both IR can be captured.
- if (C.getDriver().isSaveTempsEnabled() &&
+ if ((C.getDriver().isSaveTempsEnabled() ||
+ JA.isHostOffloading(Action::OFK_OpenMP)) &&
!(C.getDriver().embedBitcodeInObject() && !C.getDriver().isUsingLTO()) &&
isa<CompileJobAction>(JA))
CmdArgs.push_back("-disable-llvm-passes");
@@ -5779,7 +5973,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Arg->render(Args, OriginalArgs);
SmallString<256> Flags;
- Flags += Exec;
+ EscapeSpacesAndBackslashes(Exec, Flags);
for (const char *OriginalArg : OriginalArgs) {
SmallString<128> EscapedArg;
EscapeSpacesAndBackslashes(OriginalArg, EscapedArg);
@@ -5891,10 +6085,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (SplitLTOUnit)
CmdArgs.push_back("-fsplit-lto-unit");
- if (Arg *A = Args.getLastArg(options::OPT_fexperimental_isel,
- options::OPT_fno_experimental_isel)) {
+ if (Arg *A = Args.getLastArg(options::OPT_fglobal_isel,
+ options::OPT_fno_global_isel)) {
CmdArgs.push_back("-mllvm");
- if (A->getOption().matches(options::OPT_fexperimental_isel)) {
+ if (A->getOption().matches(options::OPT_fglobal_isel)) {
CmdArgs.push_back("-global-isel=1");
// GISel is on by default on AArch64 -O0, so don't bother adding
@@ -5913,9 +6107,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-global-isel-abort=2");
if (!IsArchSupported)
- D.Diag(diag::warn_drv_experimental_isel_incomplete) << Triple.getArchName();
+ D.Diag(diag::warn_drv_global_isel_incomplete) << Triple.getArchName();
else
- D.Diag(diag::warn_drv_experimental_isel_incomplete_opt);
+ D.Diag(diag::warn_drv_global_isel_incomplete_opt);
}
} else {
CmdArgs.push_back("-global-isel=0");
@@ -5939,6 +6133,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fforce-enable-int128");
}
+ if (Args.hasFlag(options::OPT_fkeep_static_consts,
+ options::OPT_fno_keep_static_consts, false))
+ CmdArgs.push_back("-fkeep-static-consts");
+
if (Args.hasFlag(options::OPT_fcomplete_member_pointers,
options::OPT_fno_complete_member_pointers, false))
CmdArgs.push_back("-fcomplete-member-pointers");
@@ -5950,11 +6148,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_moutline,
options::OPT_mno_outline)) {
if (A->getOption().matches(options::OPT_moutline)) {
- // We only support -moutline in AArch64 right now. If we're not compiling
- // for AArch64, emit a warning and ignore the flag. Otherwise, add the
- // proper mllvm flags.
- if (Triple.getArch() != llvm::Triple::aarch64 &&
- Triple.getArch() != llvm::Triple::aarch64_32) {
+ // We only support -moutline in AArch64 and ARM targets right now. If
+ // we're not compiling for these, emit a warning and ignore the flag.
+ // Otherwise, add the proper mllvm flags.
+ if (!(Triple.isARM() || Triple.isThumb() ||
+ Triple.getArch() == llvm::Triple::aarch64 ||
+ Triple.getArch() == llvm::Triple::aarch64_32)) {
D.Diag(diag::warn_drv_moutline_unsupported_opt) << Triple.getArchName();
} else {
CmdArgs.push_back("-mllvm");
@@ -6026,30 +6225,33 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
auto CLCommand =
getCLFallback()->GetCommand(C, JA, Output, Inputs, Args, LinkingOutput);
C.addCommand(std::make_unique<FallbackCommand>(
- JA, *this, Exec, CmdArgs, Inputs, std::move(CLCommand)));
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ std::move(CLCommand)));
} else if (Args.hasArg(options::OPT__SLASH_fallback) &&
isa<PrecompileJobAction>(JA)) {
// In /fallback builds, run the main compilation even if the pch generation
// fails, so that the main compilation's fallback to cl.exe runs.
- C.addCommand(std::make_unique<ForceSuccessCommand>(JA, *this, Exec,
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<ForceSuccessCommand>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
} else if (D.CC1Main && !D.CCGenDiagnostics) {
// Invoke the CC1 directly in this process
- C.addCommand(
- std::make_unique<CC1Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<CC1Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
} else {
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
// Make the compile command echo its inputs for /showFilenames.
if (Output.getType() == types::TY_Object &&
Args.hasFlag(options::OPT__SLASH_showFilenames,
options::OPT__SLASH_showFilenames_, false)) {
- C.getJobs().getJobs().back()->setPrintInputFilenames(true);
+ C.getJobs().getJobs().back()->PrintInputFilenames = true;
}
if (Arg *A = Args.getLastArg(options::OPT_pg))
- if (FPKeepKind == CodeGenOptions::FramePointerKind::None)
+ if (FPKeepKind == CodeGenOptions::FramePointerKind::None &&
+ !Args.hasArg(options::OPT_mfentry))
D.Diag(diag::err_drv_argument_not_allowed_with) << "-fomit-frame-pointer"
<< A->getAsString(Args);
@@ -6072,7 +6274,7 @@ Clang::Clang(const ToolChain &TC)
// CAUTION! The first constructor argument ("clang") is not arbitrary,
// as it is for other tools. Some operations on a Tool actually test
// whether that tool is Clang based on the Tool's Name as a string.
- : Tool("clang", "clang frontend", TC, RF_Full) {}
+ : Tool("clang", "clang frontend", TC) {}
Clang::~Clang() {}
@@ -6080,6 +6282,7 @@ Clang::~Clang() {}
///
/// Returns true if the runtime is non-fragile.
ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
+ const InputInfoList &inputs,
ArgStringList &cmdArgs,
RewriteKind rewriteKind) const {
// Look for the controlling runtime option.
@@ -6203,8 +6406,11 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
runtime = ObjCRuntime(ObjCRuntime::GCC, VersionTuple());
}
- cmdArgs.push_back(
- args.MakeArgString("-fobjc-runtime=" + runtime.getAsString()));
+ if (llvm::any_of(inputs, [](const InputInfo &input) {
+ return types::isObjC(input.getType());
+ }))
+ cmdArgs.push_back(
+ args.MakeArgString("-fobjc-runtime=" + runtime.getAsString()));
return runtime;
}
@@ -6274,6 +6480,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
codegenoptions::DebugInfoKind *DebugInfoKind,
bool *EmitCodeView) const {
unsigned RTOptionID = options::OPT__SLASH_MT;
+ bool isNVPTX = getToolChain().getTriple().isNVPTX();
if (Args.hasArg(options::OPT__SLASH_LDd))
// The /LDd option implies /MTd. The dependent lib part can be overridden,
@@ -6326,7 +6533,13 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("--dependent-lib=oldnames");
}
- Args.AddLastArg(CmdArgs, options::OPT_show_includes);
+ if (Arg *ShowIncludes =
+ Args.getLastArg(options::OPT__SLASH_showIncludes,
+ options::OPT__SLASH_showIncludes_user)) {
+ CmdArgs.push_back("--show-includes");
+ if (ShowIncludes->getOption().matches(options::OPT__SLASH_showIncludes))
+ CmdArgs.push_back("-sys-header-deps");
+ }
// This controls whether or not we emit RTTI data for polymorphic types.
if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR,
@@ -6335,8 +6548,8 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
// This controls whether or not we emit stack-protector instrumentation.
// In MSVC, Buffer Security Check (/GS) is on by default.
- if (Args.hasFlag(options::OPT__SLASH_GS, options::OPT__SLASH_GS_,
- /*Default=*/true)) {
+ if (!isNVPTX && Args.hasFlag(options::OPT__SLASH_GS, options::OPT__SLASH_GS_,
+ /*Default=*/true)) {
CmdArgs.push_back("-stack-protector");
CmdArgs.push_back(Args.MakeArgString(Twine(LangOptions::SSPStrong)));
}
@@ -6347,7 +6560,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
options::OPT_gline_tables_only)) {
*EmitCodeView = true;
if (DebugInfoArg->getOption().matches(options::OPT__SLASH_Z7))
- *DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ *DebugInfoKind = codegenoptions::DebugInfoConstructor;
else
*DebugInfoKind = codegenoptions::DebugLineTablesOnly;
} else {
@@ -6356,7 +6569,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
const Driver &D = getToolChain().getDriver();
EHFlags EH = parseClangCLEHFlags(D, Args);
- if (EH.Synch || EH.Asynch) {
+ if (!isNVPTX && (EH.Synch || EH.Asynch)) {
if (types::isCXX(InputType))
CmdArgs.push_back("-fcxx-exceptions");
CmdArgs.push_back("-fexceptions");
@@ -6425,7 +6638,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
options::OPT__SLASH_Gregcall)) {
unsigned DCCOptId = CCArg->getOption().getID();
const char *DCCFlag = nullptr;
- bool ArchSupported = true;
+ bool ArchSupported = !isNVPTX;
llvm::Triple::ArchType Arch = getToolChain().getArch();
switch (DCCOptId) {
case options::OPT__SLASH_Gd:
@@ -6531,7 +6744,8 @@ void ClangAs::AddMIPSTargetArgs(const ArgList &Args,
void ClangAs::AddX86TargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- addX86AlignBranchArgs(getToolChain().getDriver(), Args, CmdArgs);
+ addX86AlignBranchArgs(getToolChain().getDriver(), Args, CmdArgs,
+ /*IsLTO=*/false);
if (Arg *A = Args.getLastArg(options::OPT_masm_EQ)) {
StringRef Value = A->getValue();
@@ -6601,7 +6815,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
// Add the target features
- getTargetFeatures(getToolChain(), Triple, Args, CmdArgs, true);
+ getTargetFeatures(D, Triple, Args, CmdArgs, true);
// Ignore explicit -force_cpusubtype_ALL option.
(void)Args.hasArg(options::OPT_force__cpusubtype__ALL);
@@ -6643,7 +6857,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// the guard for source type, however there is a test which asserts
// that some assembler invocation receives no -debug-info-kind,
// and it's not clear whether that test is just overly restrictive.
- DebugInfoKind = (WantDebug ? codegenoptions::LimitedDebugInfo
+ DebugInfoKind = (WantDebug ? codegenoptions::DebugInfoConstructor
: codegenoptions::NoDebugInfo);
// Add the -fdebug-compilation-dir flag if needed.
addDebugCompDirArg(Args, CmdArgs, C.getDriver().getVFS());
@@ -6686,7 +6900,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
SmallString<256> Flags;
const char *Exec = getToolChain().getDriver().getClangProgramPath();
- Flags += Exec;
+ EscapeSpacesAndBackslashes(Exec, Flags);
for (const char *OriginalArg : OriginalArgs) {
SmallString<128> EscapedArg;
EscapeSpacesAndBackslashes(OriginalArg, EscapedArg);
@@ -6763,7 +6977,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Input.getFilename());
const char *Exec = getToolChain().getDriver().getClangProgramPath();
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
// Begin OffloadBundler
@@ -6847,7 +7062,7 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
// All the inputs are encoded as commands.
C.addCommand(std::make_unique<Command>(
- JA, *this,
+ JA, *this, ResponseFileSupport::None(),
TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
CmdArgs, None));
}
@@ -6913,7 +7128,7 @@ void OffloadBundler::ConstructJobMultipleOutputs(
// All the inputs are encoded as commands.
C.addCommand(std::make_unique<Command>(
- JA, *this,
+ JA, *this, ResponseFileSupport::None(),
TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
CmdArgs, None));
}
@@ -6943,7 +7158,7 @@ void OffloadWrapper::ConstructJob(Compilation &C, const JobAction &JA,
}
C.addCommand(std::make_unique<Command>(
- JA, *this,
+ JA, *this, ResponseFileSupport::None(),
Args.MakeArgString(getToolChain().GetProgramPath(getShortName())),
CmdArgs, Inputs));
}
diff --git a/clang/lib/Driver/ToolChains/Clang.h b/clang/lib/Driver/ToolChains/Clang.h
index b345c02489d4..a607e3c27de9 100644
--- a/clang/lib/Driver/ToolChains/Clang.h
+++ b/clang/lib/Driver/ToolChains/Clang.h
@@ -73,10 +73,13 @@ private:
llvm::opt::ArgStringList &CmdArgs) const;
void AddWebAssemblyTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ void AddVETargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
enum RewriteKind { RK_None, RK_Fragile, RK_NonFragile };
ObjCRuntime AddObjCRuntimeArgs(const llvm::opt::ArgList &args,
+ const InputInfoList &inputs,
llvm::opt::ArgStringList &cmdArgs,
RewriteKind rewrite) const;
@@ -118,7 +121,7 @@ public:
class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
public:
ClangAs(const ToolChain &TC)
- : Tool("clang::as", "clang integrated assembler", TC, RF_Full) {}
+ : Tool("clang::as", "clang integrated assembler", TC) {}
void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
void AddX86TargetArgs(const llvm::opt::ArgList &Args,
diff --git a/clang/lib/Driver/ToolChains/CloudABI.cpp b/clang/lib/Driver/ToolChains/CloudABI.cpp
index cf1d0d551e57..8dcfd4951bbf 100644
--- a/clang/lib/Driver/ToolChains/CloudABI.cpp
+++ b/clang/lib/Driver/ToolChains/CloudABI.cpp
@@ -75,7 +75,7 @@ void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -92,7 +92,8 @@ void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
// CloudABI - CloudABI tool chain which can call ld(1) directly.
@@ -102,7 +103,7 @@ CloudABI::CloudABI(const Driver &D, const llvm::Triple &Triple,
: Generic_ELF(D, Triple, Args) {
SmallString<128> P(getDriver().Dir);
llvm::sys::path::append(P, "..", getTriple().str(), "lib");
- getFilePaths().push_back(P.str());
+ getFilePaths().push_back(std::string(P.str()));
}
void CloudABI::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
diff --git a/clang/lib/Driver/ToolChains/CloudABI.h b/clang/lib/Driver/ToolChains/CloudABI.h
index cc381c2b1e1f..98bf23127706 100644
--- a/clang/lib/Driver/ToolChains/CloudABI.h
+++ b/clang/lib/Driver/ToolChains/CloudABI.h
@@ -19,9 +19,9 @@ namespace tools {
/// cloudabi -- Directly call GNU Binutils linker
namespace cloudabi {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("cloudabi::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("cloudabi::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 37ec73468570..1cac5a0822a4 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -12,6 +12,7 @@
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/SystemZ.h"
+#include "Arch/VE.h"
#include "Arch/X86.h"
#include "HIP.h"
#include "Hexagon.h"
@@ -50,6 +51,7 @@
#include "llvm/Support/Program.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLParser.h"
@@ -82,6 +84,31 @@ void tools::handleTargetFeaturesGroup(const ArgList &Args,
}
}
+std::vector<StringRef>
+tools::unifyTargetFeatures(const std::vector<StringRef> &Features) {
+ std::vector<StringRef> UnifiedFeatures;
+ // Find the last of each feature.
+ llvm::StringMap<unsigned> LastOpt;
+ for (unsigned I = 0, N = Features.size(); I < N; ++I) {
+ StringRef Name = Features[I];
+ assert(Name[0] == '-' || Name[0] == '+');
+ LastOpt[Name.drop_front(1)] = I;
+ }
+
+ for (unsigned I = 0, N = Features.size(); I < N; ++I) {
+ // If this feature was overridden, ignore it.
+ StringRef Name = Features[I];
+ llvm::StringMap<unsigned>::iterator LastI = LastOpt.find(Name.drop_front(1));
+ assert(LastI != LastOpt.end());
+ unsigned Last = LastI->second;
+ if (Last != I)
+ continue;
+
+ UnifiedFeatures.push_back(Name);
+ }
+ return UnifiedFeatures;
+}
+
void tools::addDirectoryList(const ArgList &Args, ArgStringList &CmdArgs,
const char *ArgName, const char *EnvVar) {
const char *DirList = ::getenv(EnvVar);
@@ -91,7 +118,7 @@ void tools::addDirectoryList(const ArgList &Args, ArgStringList &CmdArgs,
return; // Nothing to do.
StringRef Name(ArgName);
- if (Name.equals("-I") || Name.equals("-L"))
+ if (Name.equals("-I") || Name.equals("-L") || Name.empty())
CombinedArg = true;
StringRef Dirs(DirList);
@@ -151,14 +178,12 @@ void tools::AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
for (const auto &II : Inputs) {
- // If the current tool chain refers to an OpenMP or HIP offloading host, we
- // should ignore inputs that refer to OpenMP or HIP offloading devices -
+ // If the current tool chain refers to an OpenMP offloading host, we
+ // should ignore inputs that refer to OpenMP offloading devices -
// they will be embedded according to a proper linker script.
if (auto *IA = II.getAction())
if ((JA.isHostOffloading(Action::OFK_OpenMP) &&
- IA->isDeviceOffloading(Action::OFK_OpenMP)) ||
- (JA.isHostOffloading(Action::OFK_HIP) &&
- IA->isDeviceOffloading(Action::OFK_HIP)))
+ IA->isDeviceOffloading(Action::OFK_OpenMP)))
continue;
if (!TC.HasNativeLLVMSupport() && types::isLLVMIR(II.getType()))
@@ -278,7 +303,7 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
StringRef CPUName;
StringRef ABIName;
mips::getMipsCPUAndABI(Args, T, CPUName, ABIName);
- return CPUName;
+ return std::string(CPUName);
}
case llvm::Triple::nvptx:
@@ -293,15 +318,19 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
std::string TargetCPUName = ppc::getPPCTargetCPU(Args);
// LLVM may default to generating code for the native CPU,
// but, like gcc, we default to a more generic option for
- // each architecture. (except on Darwin)
- if (TargetCPUName.empty() && !T.isOSDarwin()) {
- if (T.getArch() == llvm::Triple::ppc64)
- TargetCPUName = "ppc64";
- else if (T.getArch() == llvm::Triple::ppc64le)
- TargetCPUName = "ppc64le";
- else
- TargetCPUName = "ppc";
- }
+ // each architecture. (except on AIX)
+ if (!TargetCPUName.empty())
+ return TargetCPUName;
+
+ if (T.isOSAIX())
+ TargetCPUName = "pwr4";
+ else if (T.getArch() == llvm::Triple::ppc64le)
+ TargetCPUName = "ppc64le";
+ else if (T.getArch() == llvm::Triple::ppc64)
+ TargetCPUName = "ppc64";
+ else
+ TargetCPUName = "ppc";
+
return TargetCPUName;
}
@@ -334,18 +363,18 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
- return getWebAssemblyTargetCPU(Args);
+ return std::string(getWebAssemblyTargetCPU(Args));
}
}
-unsigned tools::getLTOParallelism(const ArgList &Args, const Driver &D) {
- unsigned Parallelism = 0;
+llvm::StringRef tools::getLTOParallelism(const ArgList &Args, const Driver &D) {
Arg *LtoJobsArg = Args.getLastArg(options::OPT_flto_jobs_EQ);
- if (LtoJobsArg &&
- StringRef(LtoJobsArg->getValue()).getAsInteger(10, Parallelism))
- D.Diag(diag::err_drv_invalid_int_value) << LtoJobsArg->getAsString(Args)
- << LtoJobsArg->getValue();
- return Parallelism;
+ if (!LtoJobsArg)
+ return {};
+ if (!llvm::get_threadpool_strategy(LtoJobsArg->getValue()))
+ D.Diag(diag::err_drv_invalid_int_value)
+ << LtoJobsArg->getAsString(Args) << LtoJobsArg->getValue();
+ return LtoJobsArg->getValue();
}
// CloudABI uses -ffunction-sections and -fdata-sections by default.
@@ -353,28 +382,32 @@ bool tools::isUseSeparateSections(const llvm::Triple &Triple) {
return Triple.getOS() == llvm::Triple::CloudABI;
}
-void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
+void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfo &Input, bool IsThinLTO) {
- // Tell the linker to load the plugin. This has to come before AddLinkerInputs
- // as gold requires -plugin to come before any -plugin-opt that -Wl might
- // forward.
- CmdArgs.push_back("-plugin");
+ const char *Linker = Args.MakeArgString(ToolChain.GetLinkerPath());
+ const Driver &D = ToolChain.getDriver();
+ if (llvm::sys::path::filename(Linker) != "ld.lld" &&
+ llvm::sys::path::stem(Linker) != "ld.lld") {
+ // Tell the linker to load the plugin. This has to come before
+ // AddLinkerInputs as gold requires -plugin to come before any -plugin-opt
+ // that -Wl might forward.
+ CmdArgs.push_back("-plugin");
#if defined(_WIN32)
- const char *Suffix = ".dll";
+ const char *Suffix = ".dll";
#elif defined(__APPLE__)
- const char *Suffix = ".dylib";
+ const char *Suffix = ".dylib";
#else
- const char *Suffix = ".so";
+ const char *Suffix = ".so";
#endif
- SmallString<1024> Plugin;
- llvm::sys::path::native(Twine(ToolChain.getDriver().Dir) +
- "/../lib" CLANG_LIBDIR_SUFFIX "/LLVMgold" +
- Suffix,
- Plugin);
- CmdArgs.push_back(Args.MakeArgString(Plugin));
+ SmallString<1024> Plugin;
+ llvm::sys::path::native(
+ Twine(D.Dir) + "/../lib" CLANG_LIBDIR_SUFFIX "/LLVMgold" + Suffix,
+ Plugin);
+ CmdArgs.push_back(Args.MakeArgString(Plugin));
+ }
// Try to pass driver level flags relevant to LTO code generation down to
// the plugin.
@@ -385,13 +418,19 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=mcpu=") + CPU));
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ // The optimization level matches
+ // CompilerInvocation.cpp:getOptimizationLevel().
StringRef OOpt;
if (A->getOption().matches(options::OPT_O4) ||
A->getOption().matches(options::OPT_Ofast))
OOpt = "3";
- else if (A->getOption().matches(options::OPT_O))
+ else if (A->getOption().matches(options::OPT_O)) {
OOpt = A->getValue();
- else if (A->getOption().matches(options::OPT_O0))
+ if (OOpt == "g")
+ OOpt = "1";
+ else if (OOpt == "s" || OOpt == "z")
+ OOpt = "2";
+ } else if (A->getOption().matches(options::OPT_O0))
OOpt = "0";
if (!OOpt.empty())
CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=O") + OOpt));
@@ -406,7 +445,8 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
if (IsThinLTO)
CmdArgs.push_back("-plugin-opt=thinlto");
- if (unsigned Parallelism = getLTOParallelism(Args, ToolChain.getDriver()))
+ StringRef Parallelism = getLTOParallelism(Args, D);
+ if (!Parallelism.empty())
CmdArgs.push_back(
Args.MakeArgString("-plugin-opt=jobs=" + Twine(Parallelism)));
@@ -437,7 +477,7 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
if (Arg *A = getLastProfileSampleUseArg(Args)) {
StringRef FName = A->getValue();
if (!llvm::sys::fs::exists(FName))
- ToolChain.getDriver().Diag(diag::err_drv_no_such_file) << FName;
+ D.Diag(diag::err_drv_no_such_file) << FName;
else
CmdArgs.push_back(
Args.MakeArgString(Twine("-plugin-opt=sample-profile=") + FName));
@@ -480,17 +520,21 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
}
// Setup statistics file output.
- SmallString<128> StatsFile =
- getStatsFileName(Args, Output, Input, ToolChain.getDriver());
+ SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
if (!StatsFile.empty())
CmdArgs.push_back(
Args.MakeArgString(Twine("-plugin-opt=stats-file=") + StatsFile));
+
+ addX86AlignBranchArgs(D, Args, CmdArgs, /*IsLTO=*/true);
}
void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
+ // Enable -frtlib-add-rpath by default for the case of VE.
+ const bool IsVE = TC.getTriple().isVE();
+ bool DefaultValue = IsVE;
if (!Args.hasFlag(options::OPT_frtlib_add_rpath,
- options::OPT_fno_rtlib_add_rpath, false))
+ options::OPT_fno_rtlib_add_rpath, DefaultValue))
return;
std::string CandidateRPath = TC.getArchSpecificLibPath();
@@ -583,6 +627,11 @@ static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
ArgStringList &CmdArgs) {
+ // Fuchsia never needs these. Any sanitizer runtimes with system
+ // dependencies use the `.deplibs` feature instead.
+ if (TC.getTriple().isOSFuchsia())
+ return;
+
// Force linking against the system libraries sanitizers depends on
// (see PR15823 why this is necessary).
CmdArgs.push_back("--no-as-needed");
@@ -642,17 +691,21 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
StaticRuntimes.push_back("stats_client");
// Collect static runtimes.
- if (Args.hasArg(options::OPT_shared) || SanArgs.needsSharedRt()) {
- // Don't link static runtimes into DSOs or if -shared-libasan.
+ if (Args.hasArg(options::OPT_shared)) {
+ // Don't link static runtimes into DSOs.
return;
}
- if (SanArgs.needsAsanRt() && SanArgs.linkRuntimes()) {
+
+ // Each static runtime that has a DSO counterpart above is excluded below,
+ // but runtimes that exist only as static are not affected by needsSharedRt.
+
+ if (!SanArgs.needsSharedRt() && SanArgs.needsAsanRt() && SanArgs.linkRuntimes()) {
StaticRuntimes.push_back("asan");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("asan_cxx");
}
- if (SanArgs.needsHwasanRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsHwasanRt() && SanArgs.linkRuntimes()) {
StaticRuntimes.push_back("hwasan");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("hwasan_cxx");
@@ -671,7 +724,7 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("tsan_cxx");
}
- if (SanArgs.needsUbsanRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsUbsanRt() && SanArgs.linkRuntimes()) {
if (SanArgs.requiresMinimalRuntime()) {
StaticRuntimes.push_back("ubsan_minimal");
} else {
@@ -684,18 +737,20 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
NonWholeStaticRuntimes.push_back("safestack");
RequiredSymbols.push_back("__safestack_init");
}
- if (SanArgs.needsCfiRt() && SanArgs.linkRuntimes())
- StaticRuntimes.push_back("cfi");
- if (SanArgs.needsCfiDiagRt() && SanArgs.linkRuntimes()) {
- StaticRuntimes.push_back("cfi_diag");
- if (SanArgs.linkCXXRuntimes())
- StaticRuntimes.push_back("ubsan_standalone_cxx");
+ if (!(SanArgs.needsSharedRt() && SanArgs.needsUbsanRt() && SanArgs.linkRuntimes())) {
+ if (SanArgs.needsCfiRt() && SanArgs.linkRuntimes())
+ StaticRuntimes.push_back("cfi");
+ if (SanArgs.needsCfiDiagRt() && SanArgs.linkRuntimes()) {
+ StaticRuntimes.push_back("cfi_diag");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("ubsan_standalone_cxx");
+ }
}
if (SanArgs.needsStatsRt() && SanArgs.linkRuntimes()) {
NonWholeStaticRuntimes.push_back("stats");
RequiredSymbols.push_back("__sanitizer_stats_register");
}
- if (SanArgs.needsScudoRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsScudoRt() && SanArgs.linkRuntimes()) {
if (SanArgs.requiresMinimalRuntime()) {
StaticRuntimes.push_back("scudo_minimal");
if (SanArgs.linkCXXRuntimes())
@@ -751,7 +806,7 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back("--export-dynamic");
if (SanArgs.hasCrossDsoCfi() && !AddExportDynamic)
- CmdArgs.push_back("-export-dynamic-symbol=__cfi_check");
+ CmdArgs.push_back("--export-dynamic-symbol=__cfi_check");
return !StaticRuntimes.empty() || !NonWholeStaticRuntimes.empty();
}
@@ -834,10 +889,12 @@ void tools::SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
InputInfo II(types::TY_Object, Output.getFilename(), Output.getFilename());
// First extract the dwo sections.
- C.addCommand(std::make_unique<Command>(JA, T, Exec, ExtractArgs, II));
+ C.addCommand(std::make_unique<Command>(
+ JA, T, ResponseFileSupport::AtFileCurCP(), Exec, ExtractArgs, II));
// Then remove them from the original .o file.
- C.addCommand(std::make_unique<Command>(JA, T, Exec, StripArgs, II));
+ C.addCommand(std::make_unique<Command>(
+ JA, T, ResponseFileSupport::AtFileCurCP(), Exec, StripArgs, II));
}
// Claim options we don't want to warn if they are unused. We do this for
@@ -1211,7 +1268,14 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
case ToolChain::UNW_CompilerRT:
if (LGT == LibGccType::StaticLibGcc)
CmdArgs.push_back("-l:libunwind.a");
- else
+ else if (TC.getTriple().isOSCygMing()) {
+ if (LGT == LibGccType::SharedLibGcc)
+ CmdArgs.push_back("-l:libunwind.dll.a");
+ else
+ // Let the linker choose between libunwind.dll.a and libunwind.a
+ // depending on what's available, and depending on the -static flag
+ CmdArgs.push_back("-lunwind");
+ } else
CmdArgs.push_back("-l:libunwind.so");
break;
}
@@ -1263,114 +1327,6 @@ void tools::AddRunTimeLibs(const ToolChain &TC, const Driver &D,
}
}
-/// Add HIP linker script arguments at the end of the argument list so that
-/// the fat binary is built by embedding the device images into the host. The
-/// linker script also defines a symbol required by the code generation so that
-/// the image can be retrieved at runtime. This should be used only in tool
-/// chains that support linker scripts.
-void tools::AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
- const InputInfo &Output,
- const InputInfoList &Inputs, const ArgList &Args,
- ArgStringList &CmdArgs, const JobAction &JA,
- const Tool &T) {
-
- // If this is not a HIP host toolchain, we don't need to do anything.
- if (!JA.isHostOffloading(Action::OFK_HIP))
- return;
-
- InputInfoList DeviceInputs;
- for (const auto &II : Inputs) {
- const Action *A = II.getAction();
- // Is this a device linking action?
- if (A && isa<LinkJobAction>(A) && A->isDeviceOffloading(Action::OFK_HIP)) {
- DeviceInputs.push_back(II);
- }
- }
-
- if (DeviceInputs.empty())
- return;
-
- // Create temporary linker script. Keep it if save-temps is enabled.
- const char *LKS;
- std::string Name = llvm::sys::path::filename(Output.getFilename());
- if (C.getDriver().isSaveTempsEnabled()) {
- LKS = C.getArgs().MakeArgString(Name + ".lk");
- } else {
- auto TmpName = C.getDriver().GetTemporaryPath(Name, "lk");
- LKS = C.addTempFile(C.getArgs().MakeArgString(TmpName));
- }
-
- // Add linker script option to the command.
- CmdArgs.push_back("-T");
- CmdArgs.push_back(LKS);
-
- // Create a buffer to write the contents of the linker script.
- std::string LksBuffer;
- llvm::raw_string_ostream LksStream(LksBuffer);
-
- // Get the HIP offload tool chain.
- auto *HIPTC = static_cast<const toolchains::CudaToolChain *>(
- C.getSingleOffloadToolChain<Action::OFK_HIP>());
- assert(HIPTC->getTriple().getArch() == llvm::Triple::amdgcn &&
- "Wrong platform");
- (void)HIPTC;
-
- const char *BundleFile;
- if (C.getDriver().isSaveTempsEnabled()) {
- BundleFile = C.getArgs().MakeArgString(Name + ".hipfb");
- } else {
- auto TmpName = C.getDriver().GetTemporaryPath(Name, "hipfb");
- BundleFile = C.addTempFile(C.getArgs().MakeArgString(TmpName));
- }
- AMDGCN::constructHIPFatbinCommand(C, JA, BundleFile, DeviceInputs, Args, T);
-
- // Add commands to embed target binaries. We ensure that each section and
- // image is 16-byte aligned. This is not mandatory, but increases the
- // likelihood of data to be aligned with a cache block in several main host
- // machines.
- LksStream << "/*\n";
- LksStream << " HIP Offload Linker Script\n";
- LksStream << " *** Automatically generated by Clang ***\n";
- LksStream << "*/\n";
- LksStream << "TARGET(binary)\n";
- LksStream << "INPUT(" << BundleFile << ")\n";
- LksStream << "SECTIONS\n";
- LksStream << "{\n";
- LksStream << " .hip_fatbin :\n";
- LksStream << " ALIGN(0x10)\n";
- LksStream << " {\n";
- LksStream << " PROVIDE_HIDDEN(__hip_fatbin = .);\n";
- LksStream << " " << BundleFile << "\n";
- LksStream << " }\n";
- LksStream << " /DISCARD/ :\n";
- LksStream << " {\n";
- LksStream << " * ( __CLANG_OFFLOAD_BUNDLE__* )\n";
- LksStream << " }\n";
- LksStream << "}\n";
- LksStream << "INSERT BEFORE .data\n";
- LksStream.flush();
-
- // Dump the contents of the linker script if the user requested that. We
- // support this option to enable testing of behavior with -###.
- if (C.getArgs().hasArg(options::OPT_fhip_dump_offload_linker_script))
- llvm::errs() << LksBuffer;
-
- // If this is a dry run, do not create the linker script file.
- if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH))
- return;
-
- // Open script file and write the contents.
- std::error_code EC;
- llvm::raw_fd_ostream Lksf(LKS, EC, llvm::sys::fs::OF_None);
-
- if (EC) {
- C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
- return;
- }
-
- Lksf << LksBuffer;
-}
-
SmallString<128> tools::getStatsFileName(const llvm::opt::ArgList &Args,
const InputInfo &Output,
const InputInfo &Input,
@@ -1399,3 +1355,53 @@ void tools::addMultilibFlag(bool Enabled, const char *const Flag,
Multilib::flags_list &Flags) {
Flags.push_back(std::string(Enabled ? "+" : "-") + Flag);
}
+
+void tools::addX86AlignBranchArgs(const Driver &D, const ArgList &Args,
+ ArgStringList &CmdArgs, bool IsLTO) {
+ auto addArg = [&, IsLTO](const Twine &Arg) {
+ if (IsLTO) {
+ CmdArgs.push_back(Args.MakeArgString("-plugin-opt=" + Arg));
+ } else {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString(Arg));
+ }
+ };
+
+ if (Args.hasArg(options::OPT_mbranches_within_32B_boundaries)) {
+ addArg(Twine("-x86-branches-within-32B-boundaries"));
+ }
+ if (const Arg *A = Args.getLastArg(options::OPT_malign_branch_boundary_EQ)) {
+ StringRef Value = A->getValue();
+ unsigned Boundary;
+ if (Value.getAsInteger(10, Boundary) || Boundary < 16 ||
+ !llvm::isPowerOf2_64(Boundary)) {
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << Value << A->getOption().getName();
+ } else {
+ addArg("-x86-align-branch-boundary=" + Twine(Boundary));
+ }
+ }
+ if (const Arg *A = Args.getLastArg(options::OPT_malign_branch_EQ)) {
+ std::string AlignBranch;
+ for (StringRef T : A->getValues()) {
+ if (T != "fused" && T != "jcc" && T != "jmp" && T != "call" &&
+ T != "ret" && T != "indirect")
+ D.Diag(diag::err_drv_invalid_malign_branch_EQ)
+ << T << "fused, jcc, jmp, call, ret, indirect";
+ if (!AlignBranch.empty())
+ AlignBranch += '+';
+ AlignBranch += T;
+ }
+ addArg("-x86-align-branch=" + Twine(AlignBranch));
+ }
+ if (const Arg *A = Args.getLastArg(options::OPT_mpad_max_prefix_size_EQ)) {
+ StringRef Value = A->getValue();
+ unsigned PrefixSize;
+ if (Value.getAsInteger(10, PrefixSize)) {
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << Value << A->getOption().getName();
+ } else {
+ addArg("-x86-pad-max-prefix-size=" + Twine(PrefixSize));
+ }
+ }
+}
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.h b/clang/lib/Driver/ToolChains/CommonArgs.h
index 84b9d2cf59b4..29dedec9b09c 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -45,12 +45,6 @@ void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
llvm::opt::ArgStringList &CmdArgs,
const llvm::opt::ArgList &Args);
-void AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs, const JobAction &JA,
- const Tool &T);
-
const char *SplitDebugName(const llvm::opt::ArgList &Args,
const InputInfo &Input, const InputInfo &Output);
@@ -58,7 +52,7 @@ void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
const JobAction &JA, const llvm::opt::ArgList &Args,
const InputInfo &Output, const char *OutFile);
-void AddGoldPlugin(const ToolChain &ToolChain, const llvm::opt::ArgList &Args,
+void addLTOOptions(const ToolChain &ToolChain, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfo &Input, bool IsThinLTO);
@@ -88,12 +82,18 @@ llvm::opt::Arg *getLastProfileSampleUseArg(const llvm::opt::ArgList &Args);
bool isObjCAutoRefCount(const llvm::opt::ArgList &Args);
-unsigned getLTOParallelism(const llvm::opt::ArgList &Args, const Driver &D);
+llvm::StringRef getLTOParallelism(const llvm::opt::ArgList &Args,
+ const Driver &D);
bool areOptimizationsEnabled(const llvm::opt::ArgList &Args);
bool isUseSeparateSections(const llvm::Triple &Triple);
+/// \p EnvVar is split by system delimiter for environment variables.
+/// If \p ArgName is "-I", "-L", or an empty string, each entry from \p EnvVar
+/// is prefixed by \p ArgName then added to \p Args. Otherwise, for each
+/// entry of \p EnvVar, \p ArgName is added to \p Args first, then the entry
+/// itself is added.
void addDirectoryList(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs, const char *ArgName,
const char *EnvVar);
@@ -106,10 +106,20 @@ void AddTargetFeature(const llvm::opt::ArgList &Args,
std::string getCPUName(const llvm::opt::ArgList &Args, const llvm::Triple &T,
bool FromAs = false);
+/// Iterate \p Args and convert -mxxx to +xxx and -mno-xxx to -xxx and
+/// append it to \p Features.
+///
+/// Note: Since \p Features may contain default values before calling
+/// this function, or may be appended with entries to override arguments,
+/// entries in \p Features are not unique.
void handleTargetFeaturesGroup(const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features,
llvm::opt::OptSpecifier Group);
+/// If there are multiple +xxx or -xxx features, keep the last one.
+std::vector<StringRef>
+unifyTargetFeatures(const std::vector<StringRef> &Features);
+
/// Handles the -save-stats option and returns the filename to save statistics
/// to.
SmallString<128> getStatsFileName(const llvm::opt::ArgList &Args,
@@ -121,6 +131,8 @@ SmallString<128> getStatsFileName(const llvm::opt::ArgList &Args,
void addMultilibFlag(bool Enabled, const char *const Flag,
Multilib::flags_list &Flags);
+void addX86AlignBranchArgs(const Driver &D, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs, bool IsLTO);
} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/clang/lib/Driver/ToolChains/CrossWindows.cpp b/clang/lib/Driver/ToolChains/CrossWindows.cpp
index dbf6114eb2ec..127a8a5f24cc 100644
--- a/clang/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/clang/lib/Driver/ToolChains/CrossWindows.cpp
@@ -57,7 +57,8 @@ void tools::CrossWindows::Assembler::ConstructJob(
const std::string Assembler = TC.GetProgramPath("as");
Exec = Args.MakeArgString(Assembler);
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void tools::CrossWindows::Linker::ConstructJob(
@@ -202,7 +203,8 @@ void tools::CrossWindows::Linker::ConstructJob(
Exec = Args.MakeArgString(TC.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
CrossWindowsToolChain::CrossWindowsToolChain(const Driver &D,
diff --git a/clang/lib/Driver/ToolChains/CrossWindows.h b/clang/lib/Driver/ToolChains/CrossWindows.h
index 7267a35d48b9..df9a7f71bf9f 100644
--- a/clang/lib/Driver/ToolChains/CrossWindows.h
+++ b/clang/lib/Driver/ToolChains/CrossWindows.h
@@ -33,8 +33,7 @@ public:
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : Tool("CrossWindows::Linker", "ld", TC, RF_Full) {}
+ Linker(const ToolChain &TC) : Tool("CrossWindows::Linker", "ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp
index 02871d2ce411..110a0bca9bc1 100644
--- a/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -18,9 +18,11 @@
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
+#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
@@ -32,37 +34,33 @@ using namespace llvm::opt;
// Parses the contents of version.txt in an CUDA installation. It should
// contain one line of the from e.g. "CUDA Version 7.5.2".
-static CudaVersion ParseCudaVersionFile(llvm::StringRef V) {
+void CudaInstallationDetector::ParseCudaVersionFile(llvm::StringRef V) {
+ Version = CudaVersion::UNKNOWN;
if (!V.startswith("CUDA Version "))
- return CudaVersion::UNKNOWN;
+ return;
V = V.substr(strlen("CUDA Version "));
- int Major = -1, Minor = -1;
- auto First = V.split('.');
- auto Second = First.second.split('.');
- if (First.first.getAsInteger(10, Major) ||
- Second.first.getAsInteger(10, Minor))
- return CudaVersion::UNKNOWN;
-
- if (Major == 7 && Minor == 0) {
- // This doesn't appear to ever happen -- version.txt doesn't exist in the
- // CUDA 7 installs I've seen. But no harm in checking.
- return CudaVersion::CUDA_70;
+ SmallVector<StringRef,4> VersionParts;
+ V.split(VersionParts, '.');
+ if (VersionParts.size() < 2)
+ return;
+ DetectedVersion = join_items(".", VersionParts[0], VersionParts[1]);
+ Version = CudaStringToVersion(DetectedVersion);
+ if (Version != CudaVersion::UNKNOWN) {
+ // TODO(tra): remove the warning once we have all features of 10.2 and 11.0
+ // implemented.
+ DetectedVersionIsNotSupported = Version > CudaVersion::LATEST_SUPPORTED;
+ return;
}
- if (Major == 7 && Minor == 5)
- return CudaVersion::CUDA_75;
- if (Major == 8 && Minor == 0)
- return CudaVersion::CUDA_80;
- if (Major == 9 && Minor == 0)
- return CudaVersion::CUDA_90;
- if (Major == 9 && Minor == 1)
- return CudaVersion::CUDA_91;
- if (Major == 9 && Minor == 2)
- return CudaVersion::CUDA_92;
- if (Major == 10 && Minor == 0)
- return CudaVersion::CUDA_100;
- if (Major == 10 && Minor == 1)
- return CudaVersion::CUDA_101;
- return CudaVersion::UNKNOWN;
+
+ Version = CudaVersion::LATEST_SUPPORTED;
+ DetectedVersionIsNotSupported = true;
+}
+
+void CudaInstallationDetector::WarnIfUnsupportedVersion() {
+ if (DetectedVersionIsNotSupported)
+ D.Diag(diag::warn_drv_unknown_cuda_version)
+ << DetectedVersion
+ << CudaVersionToString(CudaVersion::LATEST_SUPPORTED);
}
CudaInstallationDetector::CudaInstallationDetector(
@@ -80,6 +78,7 @@ CudaInstallationDetector::CudaInstallationDetector(
// In decreasing order so we prefer newer versions to older versions.
std::initializer_list<const char *> Versions = {"8.0", "7.5", "7.0"};
+ auto &FS = D.getVFS();
if (Args.hasArg(clang::driver::options::OPT_cuda_path_EQ)) {
Candidates.emplace_back(
@@ -106,8 +105,9 @@ CudaInstallationDetector::CudaInstallationDetector(
StringRef ptxasDir = llvm::sys::path::parent_path(ptxasAbsolutePath);
if (llvm::sys::path::filename(ptxasDir) == "bin")
- Candidates.emplace_back(llvm::sys::path::parent_path(ptxasDir),
- /*StrictChecking=*/true);
+ Candidates.emplace_back(
+ std::string(llvm::sys::path::parent_path(ptxasDir)),
+ /*StrictChecking=*/true);
}
}
@@ -115,7 +115,7 @@ CudaInstallationDetector::CudaInstallationDetector(
for (const char *Ver : Versions)
Candidates.emplace_back(D.SysRoot + "/usr/local/cuda-" + Ver);
- Distro Dist(D.getVFS(), llvm::Triple(llvm::sys::getProcessTriple()));
+ Distro Dist(FS, llvm::Triple(llvm::sys::getProcessTriple()));
if (Dist.IsDebian() || Dist.IsUbuntu())
// Special case for Debian to have nvidia-cuda-toolkit work
// out of the box. More info on http://bugs.debian.org/882505
@@ -126,14 +126,13 @@ CudaInstallationDetector::CudaInstallationDetector(
for (const auto &Candidate : Candidates) {
InstallPath = Candidate.Path;
- if (InstallPath.empty() || !D.getVFS().exists(InstallPath))
+ if (InstallPath.empty() || !FS.exists(InstallPath))
continue;
BinPath = InstallPath + "/bin";
IncludePath = InstallPath + "/include";
LibDevicePath = InstallPath + "/nvvm/libdevice";
- auto &FS = D.getVFS();
if (!(FS.exists(IncludePath) && FS.exists(BinPath)))
continue;
bool CheckLibDevice = (!NoCudaLib || Candidate.StrictChecking);
@@ -160,25 +159,26 @@ CudaInstallationDetector::CudaInstallationDetector(
// version.txt isn't present.
Version = CudaVersion::CUDA_70;
} else {
- Version = ParseCudaVersionFile((*VersionFile)->getBuffer());
+ ParseCudaVersionFile((*VersionFile)->getBuffer());
}
if (Version >= CudaVersion::CUDA_90) {
// CUDA-9+ uses single libdevice file for all GPU variants.
std::string FilePath = LibDevicePath + "/libdevice.10.bc";
if (FS.exists(FilePath)) {
- for (const char *GpuArchName :
- {"sm_30", "sm_32", "sm_35", "sm_37", "sm_50", "sm_52", "sm_53",
- "sm_60", "sm_61", "sm_62", "sm_70", "sm_72", "sm_75"}) {
- const CudaArch GpuArch = StringToCudaArch(GpuArchName);
- if (Version >= MinVersionForCudaArch(GpuArch) &&
- Version <= MaxVersionForCudaArch(GpuArch))
- LibDeviceMap[GpuArchName] = FilePath;
+ for (int Arch = (int)CudaArch::SM_30, E = (int)CudaArch::LAST; Arch < E;
+ ++Arch) {
+ CudaArch GpuArch = static_cast<CudaArch>(Arch);
+ if (!IsNVIDIAGpuArch(GpuArch))
+ continue;
+ std::string GpuArchName(CudaArchToString(GpuArch));
+ LibDeviceMap[GpuArchName] = FilePath;
}
}
} else {
std::error_code EC;
- for (llvm::sys::fs::directory_iterator LI(LibDevicePath, EC), LE;
+ for (llvm::vfs::directory_iterator LI = FS.dir_begin(LibDevicePath, EC),
+ LE;
!EC && LI != LE; LI = LI.increment(EC)) {
StringRef FilePath = LI->path();
StringRef FileName = llvm::sys::path::filename(FilePath);
@@ -194,27 +194,27 @@ CudaInstallationDetector::CudaInstallationDetector(
// capability. NVCC's choice of the libdevice library version is
// rather peculiar and depends on the CUDA version.
if (GpuArch == "compute_20") {
- LibDeviceMap["sm_20"] = FilePath;
- LibDeviceMap["sm_21"] = FilePath;
- LibDeviceMap["sm_32"] = FilePath;
+ LibDeviceMap["sm_20"] = std::string(FilePath);
+ LibDeviceMap["sm_21"] = std::string(FilePath);
+ LibDeviceMap["sm_32"] = std::string(FilePath);
} else if (GpuArch == "compute_30") {
- LibDeviceMap["sm_30"] = FilePath;
+ LibDeviceMap["sm_30"] = std::string(FilePath);
if (Version < CudaVersion::CUDA_80) {
- LibDeviceMap["sm_50"] = FilePath;
- LibDeviceMap["sm_52"] = FilePath;
- LibDeviceMap["sm_53"] = FilePath;
+ LibDeviceMap["sm_50"] = std::string(FilePath);
+ LibDeviceMap["sm_52"] = std::string(FilePath);
+ LibDeviceMap["sm_53"] = std::string(FilePath);
}
- LibDeviceMap["sm_60"] = FilePath;
- LibDeviceMap["sm_61"] = FilePath;
- LibDeviceMap["sm_62"] = FilePath;
+ LibDeviceMap["sm_60"] = std::string(FilePath);
+ LibDeviceMap["sm_61"] = std::string(FilePath);
+ LibDeviceMap["sm_62"] = std::string(FilePath);
} else if (GpuArch == "compute_35") {
- LibDeviceMap["sm_35"] = FilePath;
- LibDeviceMap["sm_37"] = FilePath;
+ LibDeviceMap["sm_35"] = std::string(FilePath);
+ LibDeviceMap["sm_37"] = std::string(FilePath);
} else if (GpuArch == "compute_50") {
if (Version >= CudaVersion::CUDA_80) {
- LibDeviceMap["sm_50"] = FilePath;
- LibDeviceMap["sm_52"] = FilePath;
- LibDeviceMap["sm_53"] = FilePath;
+ LibDeviceMap["sm_50"] = std::string(FilePath);
+ LibDeviceMap["sm_52"] = std::string(FilePath);
+ LibDeviceMap["sm_53"] = std::string(FilePath);
}
}
}
@@ -242,7 +242,7 @@ void CudaInstallationDetector::AddCudaIncludeArgs(
CC1Args.push_back(DriverArgs.MakeArgString(P));
}
- if (DriverArgs.hasArg(options::OPT_nocudainc))
+ if (DriverArgs.hasArg(options::OPT_nogpuinc))
return;
if (!isValid()) {
@@ -423,7 +423,11 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
Exec = A->getValue();
else
Exec = Args.MakeArgString(TC.GetProgramPath("ptxas"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this,
+ ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
+ "--options-file"},
+ Exec, CmdArgs, Inputs));
}
static bool shouldIncludePTX(const ArgList &Args, const char *gpu_arch) {
@@ -477,10 +481,9 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
continue;
// We need to pass an Arch of the form "sm_XX" for cubin files and
// "compute_XX" for ptx.
- const char *Arch =
- (II.getType() == types::TY_PP_Asm)
- ? CudaVirtualArchToString(VirtualArchForCudaArch(gpu_arch))
- : gpu_arch_str;
+ const char *Arch = (II.getType() == types::TY_PP_Asm)
+ ? CudaArchToVirtualArchString(gpu_arch)
+ : gpu_arch_str;
CmdArgs.push_back(Args.MakeArgString(llvm::Twine("--image=profile=") +
Arch + ",file=" + II.getFilename()));
}
@@ -489,7 +492,11 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(A));
const char *Exec = Args.MakeArgString(TC.GetProgramPath("fatbinary"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this,
+ ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
+ "--options-file"},
+ Exec, CmdArgs, Inputs));
}
void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -566,7 +573,11 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("nvlink"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this,
+ ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
+ "--options-file"},
+ Exec, CmdArgs, Inputs));
}
/// CUDA toolchain. Our assembler is ptxas, and our "linker" is fatbinary,
@@ -578,8 +589,10 @@ CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple,
const Action::OffloadKind OK)
: ToolChain(D, Triple, Args), HostTC(HostTC),
CudaInstallation(D, HostTC.getTriple(), Args), OK(OK) {
- if (CudaInstallation.isValid())
- getProgramPaths().push_back(CudaInstallation.getBinPath());
+ if (CudaInstallation.isValid()) {
+ CudaInstallation.WarnIfUnsupportedVersion();
+ getProgramPaths().push_back(std::string(CudaInstallation.getBinPath()));
+ }
// Lookup binaries into the driver directory, this is used to
// discover the clang-offload-bundler executable.
getProgramPaths().push_back(getDriver().Dir);
@@ -596,7 +609,7 @@ std::string CudaToolChain::getInputFilename(const InputInfo &Input) const {
// these particular file names.
SmallString<256> Filename(ToolChain::getInputFilename(Input));
llvm::sys::path::replace_extension(Filename, "cubin");
- return Filename.str();
+ return std::string(Filename.str());
}
void CudaToolChain::addClangTargetOptions(
@@ -614,10 +627,6 @@ void CudaToolChain::addClangTargetOptions(
if (DeviceOffloadingKind == Action::OFK_Cuda) {
CC1Args.push_back("-fcuda-is-device");
- if (DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
- options::OPT_fno_cuda_flush_denormals_to_zero, false))
- CC1Args.push_back("-fcuda-flush-denormals-to-zero");
-
if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
@@ -648,24 +657,30 @@ void CudaToolChain::addClangTargetOptions(
// by new PTX version, so we need to raise PTX level to enable them in NVPTX
// back-end.
const char *PtxFeature = nullptr;
- switch(CudaInstallation.version()) {
- case CudaVersion::CUDA_101:
- PtxFeature = "+ptx64";
- break;
- case CudaVersion::CUDA_100:
- PtxFeature = "+ptx63";
- break;
- case CudaVersion::CUDA_92:
- PtxFeature = "+ptx61";
- break;
- case CudaVersion::CUDA_91:
- PtxFeature = "+ptx61";
- break;
- case CudaVersion::CUDA_90:
- PtxFeature = "+ptx60";
- break;
- default:
- PtxFeature = "+ptx42";
+ switch (CudaInstallation.version()) {
+ case CudaVersion::CUDA_110:
+ PtxFeature = "+ptx70";
+ break;
+ case CudaVersion::CUDA_102:
+ PtxFeature = "+ptx65";
+ break;
+ case CudaVersion::CUDA_101:
+ PtxFeature = "+ptx64";
+ break;
+ case CudaVersion::CUDA_100:
+ PtxFeature = "+ptx63";
+ break;
+ case CudaVersion::CUDA_92:
+ PtxFeature = "+ptx61";
+ break;
+ case CudaVersion::CUDA_91:
+ PtxFeature = "+ptx61";
+ break;
+ case CudaVersion::CUDA_90:
+ PtxFeature = "+ptx60";
+ break;
+ default:
+ PtxFeature = "+ptx42";
}
CC1Args.append({"-target-feature", PtxFeature});
if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,
@@ -718,6 +733,21 @@ void CudaToolChain::addClangTargetOptions(
}
}
+llvm::DenormalMode CudaToolChain::getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType) const {
+ if (JA.getOffloadingDeviceKind() == Action::OFK_Cuda) {
+ if (FPType && FPType == &llvm::APFloat::IEEEsingle() &&
+ DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
+ options::OPT_fno_cuda_flush_denormals_to_zero,
+ false))
+ return llvm::DenormalMode::getPreserveSign();
+ }
+
+ assert(JA.getOffloadingDeviceKind() != Action::OFK_Host);
+ return llvm::DenormalMode::getIEEE();
+}
+
bool CudaToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
const Option &O = A->getOption();
return (O.matches(options::OPT_gN_Group) &&
@@ -748,7 +778,7 @@ void CudaToolChain::adjustDebugInfoKind(
void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Check our CUDA version if we're going to include the CUDA headers.
- if (!DriverArgs.hasArg(options::OPT_nocudainc) &&
+ if (!DriverArgs.hasArg(options::OPT_nogpuinc) &&
!DriverArgs.hasArg(options::OPT_no_cuda_version_check)) {
StringRef Arch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
assert(!Arch.empty() && "Must have an explicit GPU arch.");
@@ -793,36 +823,6 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
}
for (Arg *A : Args) {
- if (A->getOption().matches(options::OPT_Xarch__)) {
- // Skip this argument unless the architecture matches BoundArch
- if (BoundArch.empty() || A->getValue(0) != BoundArch)
- continue;
-
- unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
- unsigned Prev = Index;
- std::unique_ptr<Arg> XarchArg(Opts.ParseOneArg(Args, Index));
-
- // If the argument parsing failed or more than one argument was
- // consumed, the -Xarch_ argument's parameter tried to consume
- // extra arguments. Emit an error and ignore.
- //
- // We also want to disallow any options which would alter the
- // driver behavior; that isn't going to work in our model. We
- // use isDriverOption() as an approximation, although things
- // like -O4 are going to slip through.
- if (!XarchArg || Index > Prev + 1) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
- << A->getAsString(Args);
- continue;
- } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
- << A->getAsString(Args);
- continue;
- }
- XarchArg->setBaseArg(A);
- A = XarchArg.release();
- DAL->AddSynthesizedArg(A);
- }
DAL->append(A);
}
diff --git a/clang/lib/Driver/ToolChains/Cuda.h b/clang/lib/Driver/ToolChains/Cuda.h
index 4ee8b6f1fea9..873eb7338a30 100644
--- a/clang/lib/Driver/ToolChains/Cuda.h
+++ b/clang/lib/Driver/ToolChains/Cuda.h
@@ -30,6 +30,8 @@ private:
const Driver &D;
bool IsValid = false;
CudaVersion Version = CudaVersion::UNKNOWN;
+ std::string DetectedVersion;
+ bool DetectedVersionIsNotSupported = false;
std::string InstallPath;
std::string BinPath;
std::string LibPath;
@@ -75,6 +77,10 @@ public:
std::string getLibDeviceFile(StringRef Gpu) const {
return LibDeviceMap.lookup(Gpu);
}
+ void WarnIfUnsupportedVersion();
+
+private:
+ void ParseCudaVersionFile(llvm::StringRef V);
};
namespace tools {
@@ -83,9 +89,7 @@ namespace NVPTX {
// Run ptxas, the NVPTX assembler.
class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- Assembler(const ToolChain &TC)
- : Tool("NVPTX::Assembler", "ptxas", TC, RF_Full, llvm::sys::WEM_UTF8,
- "--options-file") {}
+ Assembler(const ToolChain &TC) : Tool("NVPTX::Assembler", "ptxas", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -99,9 +103,7 @@ class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
// assembly into a single output file.
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : Tool("NVPTX::Linker", "fatbinary", TC, RF_Full, llvm::sys::WEM_UTF8,
- "--options-file") {}
+ Linker(const ToolChain &TC) : Tool("NVPTX::Linker", "fatbinary", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -114,8 +116,7 @@ class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
class LLVM_LIBRARY_VISIBILITY OpenMPLinker : public Tool {
public:
OpenMPLinker(const ToolChain &TC)
- : Tool("NVPTX::OpenMPLinker", "nvlink", TC, RF_Full, llvm::sys::WEM_UTF8,
- "--options-file") {}
+ : Tool("NVPTX::OpenMPLinker", "nvlink", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -149,6 +150,10 @@ public:
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
+ llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType = nullptr) const override;
+
// Never try to use the integrated assembler with CUDA; always fork out to
// ptxas.
bool useIntegratedAs() const override { return false; }
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index 344a14fe1ea7..7b879f8cb652 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -23,6 +23,7 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <cstdlib> // ::getenv
@@ -147,7 +148,8 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// asm_final spec is empty.
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void darwin::MachOTool::anchor() {}
@@ -201,16 +203,11 @@ static bool shouldLinkerNotDedup(bool IsLinkerOnlyAction, const ArgList &Args) {
void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
ArgStringList &CmdArgs,
- const InputInfoList &Inputs) const {
+ const InputInfoList &Inputs,
+ unsigned Version[5]) const {
const Driver &D = getToolChain().getDriver();
const toolchains::MachO &MachOTC = getMachOToolChain();
- unsigned Version[5] = {0, 0, 0, 0, 0};
- if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
- if (!Driver::GetReleaseVersion(A->getValue(), Version))
- D.Diag(diag::err_drv_invalid_version_number) << A->getAsString(Args);
- }
-
// Newer linkers support -demangle. Pass it if supported and not disabled by
// the user.
if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
@@ -335,7 +332,7 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
Args.AddAllArgs(CmdArgs, options::OPT_init);
// Add the deployment target.
- if (!Version[0] || Version[0] >= 520)
+ if (Version[0] >= 520)
MachOTC.addPlatformVersionArgs(Args, CmdArgs);
else
MachOTC.addMinVersionArgs(Args, CmdArgs);
@@ -429,6 +426,75 @@ static bool isObjCRuntimeLinked(const ArgList &Args) {
return Args.hasArg(options::OPT_fobjc_link_runtime);
}
+static bool checkRemarksOptions(const Driver &D, const ArgList &Args,
+ const llvm::Triple &Triple) {
+ // When enabling remarks, we need to error if:
+ // * The remark file is specified but we're targeting multiple architectures,
+ // which means more than one remark file is being generated.
+ bool hasMultipleInvocations =
+ Args.getAllArgValues(options::OPT_arch).size() > 1;
+ bool hasExplicitOutputFile =
+ Args.getLastArg(options::OPT_foptimization_record_file_EQ);
+ if (hasMultipleInvocations && hasExplicitOutputFile) {
+ D.Diag(diag::err_drv_invalid_output_with_multiple_archs)
+ << "-foptimization-record-file";
+ return false;
+ }
+ return true;
+}
+
+static void renderRemarksOptions(const ArgList &Args, ArgStringList &CmdArgs,
+ const llvm::Triple &Triple,
+ const InputInfo &Output, const JobAction &JA) {
+ StringRef Format = "yaml";
+ if (const Arg *A = Args.getLastArg(options::OPT_fsave_optimization_record_EQ))
+ Format = A->getValue();
+
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-lto-pass-remarks-output");
+ CmdArgs.push_back("-mllvm");
+
+ const Arg *A = Args.getLastArg(options::OPT_foptimization_record_file_EQ);
+ if (A) {
+ CmdArgs.push_back(A->getValue());
+ } else {
+ assert(Output.isFilename() && "Unexpected ld output.");
+ SmallString<128> F;
+ F = Output.getFilename();
+ F += ".opt.";
+ F += Format;
+
+ CmdArgs.push_back(Args.MakeArgString(F));
+ }
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_foptimization_record_passes_EQ)) {
+ CmdArgs.push_back("-mllvm");
+ std::string Passes =
+ std::string("-lto-pass-remarks-filter=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Passes));
+ }
+
+ if (!Format.empty()) {
+ CmdArgs.push_back("-mllvm");
+ Twine FormatArg = Twine("-lto-pass-remarks-format=") + Format;
+ CmdArgs.push_back(Args.MakeArgString(FormatArg));
+ }
+
+ if (getLastProfileUseArg(Args)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-lto-pass-remarks-with-hotness");
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ)) {
+ CmdArgs.push_back("-mllvm");
+ std::string Opt =
+ std::string("-lto-pass-remarks-hotness-threshold=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Opt));
+ }
+ }
+}
+
void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -455,63 +521,26 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("touch"));
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, None));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::None(), Exec, CmdArgs, None));
return;
}
+ unsigned Version[5] = {0, 0, 0, 0, 0};
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
+ if (!Driver::GetReleaseVersion(A->getValue(), Version))
+ getToolChain().getDriver().Diag(diag::err_drv_invalid_version_number)
+ << A->getAsString(Args);
+ }
+
// I'm not sure why this particular decomposition exists in gcc, but
// we follow suite for ease of comparison.
- AddLinkArgs(C, Args, CmdArgs, Inputs);
+ AddLinkArgs(C, Args, CmdArgs, Inputs, Version);
- // For LTO, pass the name of the optimization record file and other
- // opt-remarks flags.
- if (Args.hasFlag(options::OPT_fsave_optimization_record,
- options::OPT_fsave_optimization_record_EQ,
- options::OPT_fno_save_optimization_record, false)) {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-lto-pass-remarks-output");
- CmdArgs.push_back("-mllvm");
-
- SmallString<128> F;
- F = Output.getFilename();
- F += ".opt.";
- if (const Arg *A =
- Args.getLastArg(options::OPT_fsave_optimization_record_EQ))
- F += A->getValue();
- else
- F += "yaml";
-
- CmdArgs.push_back(Args.MakeArgString(F));
-
- if (getLastProfileUseArg(Args)) {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-lto-pass-remarks-with-hotness");
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ)) {
- CmdArgs.push_back("-mllvm");
- std::string Opt =
- std::string("-lto-pass-remarks-hotness-threshold=") + A->getValue();
- CmdArgs.push_back(Args.MakeArgString(Opt));
- }
- }
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_foptimization_record_passes_EQ)) {
- CmdArgs.push_back("-mllvm");
- std::string Passes =
- std::string("-lto-pass-remarks-filter=") + A->getValue();
- CmdArgs.push_back(Args.MakeArgString(Passes));
- }
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_fsave_optimization_record_EQ)) {
- CmdArgs.push_back("-mllvm");
- std::string Format =
- std::string("-lto-pass-remarks-format=") + A->getValue();
- CmdArgs.push_back(Args.MakeArgString(Format));
- }
- }
+ if (willEmitRemarks(Args) &&
+ checkRemarksOptions(getToolChain().getDriver(), Args,
+ getToolChain().getTriple()))
+ renderRemarksOptions(Args, CmdArgs, getToolChain().getTriple(), Output, JA);
// Propagate the -moutline flag to the linker in LTO.
if (Arg *A =
@@ -605,10 +634,12 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getMachOToolChain().addProfileRTLibs(Args, CmdArgs);
- if (unsigned Parallelism =
- getLTOParallelism(Args, getToolChain().getDriver())) {
+ StringRef Parallelism = getLTOParallelism(Args, getToolChain().getDriver());
+ if (!Parallelism.empty()) {
CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString("-threads=" + Twine(Parallelism)));
+ unsigned NumThreads =
+ llvm::get_threadpool_strategy(Parallelism)->compute_thread_count();
+ CmdArgs.push_back(Args.MakeArgString("-threads=" + Twine(NumThreads)));
}
if (getToolChain().ShouldLinkCXXStdlib(Args))
@@ -655,9 +686,16 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ ResponseFileSupport ResponseSupport = ResponseFileSupport::AtFileUTF8();
+ if (Version[0] < 607) {
+ // For older versions of the linker, use the legacy filelist method instead.
+ ResponseSupport = {ResponseFileSupport::RF_FileList, llvm::sys::WEM_UTF8,
+ "-filelist"};
+ }
+
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- std::unique_ptr<Command> Cmd =
- std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs);
+ std::unique_ptr<Command> Cmd = std::make_unique<Command>(
+ JA, *this, ResponseSupport, Exec, CmdArgs, Inputs);
Cmd->setInputFileList(std::move(InputFileList));
C.addCommand(std::move(Cmd));
}
@@ -681,7 +719,8 @@ void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("lipo"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
@@ -701,7 +740,8 @@ void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dsymutil"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
@@ -724,7 +764,8 @@ void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("dwarfdump"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
MachO::MachO(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
@@ -738,7 +779,7 @@ MachO::MachO(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
/// Darwin - Darwin tool chain for i386 and x86_64.
Darwin::Darwin(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: MachO(D, Triple, Args), TargetInitialized(false),
- CudaInstallation(D, Triple, Args) {}
+ CudaInstallation(D, Triple, Args), RocmInstallation(D, Triple, Args) {}
types::ID MachO::LookupTypeForExtension(StringRef Ext) const {
types::ID Ty = ToolChain::LookupTypeForExtension(Ext);
@@ -790,6 +831,11 @@ void Darwin::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void Darwin::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
// This is just a MachO name translation routine and there's no
// way to join this into ARMTargetParser without breaking all
// other assumptions. Maybe MachO should consider standardising
@@ -913,6 +959,10 @@ DarwinClang::DarwinClang(const Driver &D, const llvm::Triple &Triple,
: Darwin(D, Triple, Args) {}
void DarwinClang::addClangWarningOptions(ArgStringList &CC1Args) const {
+ // Always error about undefined 'TARGET_OS_*' macros.
+ CC1Args.push_back("-Wundef-prefix=TARGET_OS_");
+ CC1Args.push_back("-Werror=undef-prefix");
+
// For modern targets, promote certain warnings to errors.
if (isTargetWatchOSBased() || getTriple().isArch64Bit()) {
// Always enable -Wdeprecated-objc-isa-usage and promote it
@@ -944,6 +994,8 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
// Avoid linking compatibility stubs on i386 mac.
if (isTargetMacOS() && getArch() == llvm::Triple::x86)
return;
+ if (isTargetAppleSiliconMac())
+ return;
ObjCRuntime runtime = getDefaultObjCRuntime(/*nonfragile*/ true);
@@ -1068,8 +1120,8 @@ StringRef Darwin::getPlatformFamily() const {
StringRef Darwin::getSDKName(StringRef isysroot) {
// Assume SDK has path: SOME_PATH/SDKs/PlatformXX.YY.sdk
- auto BeginSDK = llvm::sys::path::begin(isysroot);
- auto EndSDK = llvm::sys::path::end(isysroot);
+ auto BeginSDK = llvm::sys::path::rbegin(isysroot);
+ auto EndSDK = llvm::sys::path::rend(isysroot);
for (auto IT = BeginSDK; IT != EndSDK; ++IT) {
StringRef SDK = *IT;
if (SDK.endswith(".sdk"))
@@ -1131,7 +1183,8 @@ static void addSectalignToPage(const ArgList &Args, ArgStringList &CmdArgs,
void Darwin::addProfileRTLibs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- if (!needsProfileRT(Args)) return;
+ if (!needsProfileRT(Args) && !needsGCovInstrumentation(Args))
+ return;
AddLinkRuntimeLib(Args, CmdArgs, "profile",
RuntimeLinkOptions(RLO_AlwaysLink | RLO_FirstLink));
@@ -1146,6 +1199,7 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
addExportedSymbol(CmdArgs, "___gcov_flush");
addExportedSymbol(CmdArgs, "_flush_fn_list");
addExportedSymbol(CmdArgs, "_writeout_fn_list");
+ addExportedSymbol(CmdArgs, "_reset_fn_list");
} else {
addExportedSymbol(CmdArgs, "___llvm_profile_filename");
addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");
@@ -1270,17 +1324,17 @@ static std::string getSystemOrSDKMacOSVersion(StringRef MacOSSDKVersion) {
unsigned Major, Minor, Micro;
llvm::Triple SystemTriple(llvm::sys::getProcessTriple());
if (!SystemTriple.isMacOSX())
- return MacOSSDKVersion;
+ return std::string(MacOSSDKVersion);
SystemTriple.getMacOSXVersion(Major, Minor, Micro);
VersionTuple SystemVersion(Major, Minor, Micro);
bool HadExtra;
if (!Driver::GetReleaseVersion(MacOSSDKVersion, Major, Minor, Micro,
HadExtra))
- return MacOSSDKVersion;
+ return std::string(MacOSSDKVersion);
VersionTuple SDKVersion(Major, Minor, Micro);
if (SDKVersion > SystemVersion)
return SystemVersion.getAsString();
- return MacOSSDKVersion;
+ return std::string(MacOSSDKVersion);
}
namespace {
@@ -1320,7 +1374,7 @@ struct DarwinPlatform {
void setOSVersion(StringRef S) {
assert(Kind == TargetArg && "Unexpected kind!");
- OSVersion = S;
+ OSVersion = std::string(S);
}
bool hasOSVersion() const { return HasOSVersion; }
@@ -1577,7 +1631,7 @@ inferDeploymentTargetFromSDK(DerivedArgList &Args,
size_t StartVer = SDK.find_first_of("0123456789");
size_t EndVer = SDK.find_last_of("0123456789");
if (StartVer != StringRef::npos && EndVer > StartVer)
- Version = SDK.slice(StartVer, EndVer + 1);
+ Version = std::string(SDK.slice(StartVer, EndVer + 1));
}
if (Version.empty())
return None;
@@ -1643,8 +1697,16 @@ inferDeploymentTargetFromArch(DerivedArgList &Args, const Darwin &Toolchain,
llvm::Triple::OSType OSTy = llvm::Triple::UnknownOS;
StringRef MachOArchName = Toolchain.getMachOArchName(Args);
- if (MachOArchName == "armv7" || MachOArchName == "armv7s" ||
- MachOArchName == "arm64")
+ if (MachOArchName == "arm64") {
+#if __arm64__
+ // A clang running on an Apple Silicon mac defaults
+ // to building for mac when building for arm64 rather than
+ // defaulting to iOS.
+ OSTy = llvm::Triple::MacOSX;
+#else
+ OSTy = llvm::Triple::IOS;
+#endif
+ } else if (MachOArchName == "armv7" || MachOArchName == "armv7s")
OSTy = llvm::Triple::IOS;
else if (MachOArchName == "armv7k" || MachOArchName == "arm64_32")
OSTy = llvm::Triple::WatchOS;
@@ -1793,7 +1855,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
if (Platform == MacOS) {
if (!Driver::GetReleaseVersion(OSTarget->getOSVersion(), Major, Minor,
Micro, HadExtra) ||
- HadExtra || Major != 10 || Minor >= 100 || Micro >= 100)
+ HadExtra || Major < 10 || Major >= 100 || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
<< OSTarget->getAsString(Args, Opts);
} else if (Platform == IPhoneOS) {
@@ -1870,7 +1932,10 @@ void DarwinClang::AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs
bool NoStdInc = DriverArgs.hasArg(options::OPT_nostdinc);
bool NoStdlibInc = DriverArgs.hasArg(options::OPT_nostdlibinc);
- bool NoBuiltinInc = DriverArgs.hasArg(options::OPT_nobuiltininc);
+ bool NoBuiltinInc = DriverArgs.hasFlag(
+ options::OPT_nobuiltininc, options::OPT_ibuiltininc, /*Default=*/false);
+ bool ForceBuiltinInc = DriverArgs.hasFlag(
+ options::OPT_ibuiltininc, options::OPT_nobuiltininc, /*Default=*/false);
// Add <sysroot>/usr/local/include
if (!NoStdInc && !NoStdlibInc) {
@@ -1880,7 +1945,7 @@ void DarwinClang::AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs
}
// Add the Clang builtin headers (<resource>/include)
- if (!NoStdInc && !NoBuiltinInc) {
+ if (!(NoStdInc && !ForceBuiltinInc) && !NoBuiltinInc) {
SmallString<128> P(D.ResourceDir);
llvm::sys::path::append(P, "include");
addSystemInclude(DriverArgs, CC1Args, P);
@@ -1896,7 +1961,7 @@ void DarwinClang::AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs
CIncludeDirs.split(dirs, ":");
for (llvm::StringRef dir : dirs) {
llvm::StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? llvm::StringRef(Sysroot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : llvm::StringRef(Sysroot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
} else {
@@ -2129,32 +2194,7 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
continue;
Arg *OriginalArg = A;
- unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
- unsigned Prev = Index;
- std::unique_ptr<Arg> XarchArg(Opts.ParseOneArg(Args, Index));
-
- // If the argument parsing failed or more than one argument was
- // consumed, the -Xarch_ argument's parameter tried to consume
- // extra arguments. Emit an error and ignore.
- //
- // We also want to disallow any options which would alter the
- // driver behavior; that isn't going to work in our model. We
- // use isDriverOption() as an approximation, although things
- // like -O4 are going to slip through.
- if (!XarchArg || Index > Prev + 1) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
- << A->getAsString(Args);
- continue;
- } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
- << A->getAsString(Args);
- continue;
- }
-
- XarchArg->setBaseArg(A);
-
- A = XarchArg.release();
- DAL->AddSynthesizedArg(A);
+ TranslateXarchArgs(Args, A, DAL);
// Linker input arguments require custom handling. The problem is that we
// have already constructed the phase actions, so we can not treat them as
@@ -2369,6 +2409,10 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
OS << "-target-sdk-version=" << SDKInfo->getVersion();
CC1Args.push_back(DriverArgs.MakeArgString(OS.str()));
}
+
+ // Enable compatibility mode for NSItemProviderCompletionHandler in
+ // Foundation/NSItemProvider.h.
+ CC1Args.push_back("-fcompatibility-qualified-id-block-type-checking");
}
DerivedArgList *
@@ -2511,6 +2555,9 @@ void Darwin::addMinVersionArgs(const ArgList &Args,
CmdArgs.push_back("-macosx_version_min");
}
+ VersionTuple MinTgtVers = getEffectiveTriple().getMinimumSupportedOSVersion();
+ if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
+ TargetVersion = MinTgtVers;
CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
}
@@ -2543,6 +2590,9 @@ void Darwin::addPlatformVersionArgs(const llvm::opt::ArgList &Args,
PlatformName += "-simulator";
CmdArgs.push_back(Args.MakeArgString(PlatformName));
VersionTuple TargetVersion = getTargetVersion().withoutBuild();
+ VersionTuple MinTgtVers = getEffectiveTriple().getMinimumSupportedOSVersion();
+ if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
+ TargetVersion = MinTgtVers;
CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
if (SDKInfo) {
VersionTuple SDKVersion = SDKInfo->getVersion().withoutBuild();
@@ -2553,98 +2603,102 @@ void Darwin::addPlatformVersionArgs(const llvm::opt::ArgList &Args,
}
}
-void Darwin::addStartObjectFileArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
- // Derived from startfile spec.
- if (Args.hasArg(options::OPT_dynamiclib)) {
- // Derived from darwin_dylib1 spec.
- if (isTargetWatchOSBased()) {
- ; // watchOS does not need dylib1.o.
- } else if (isTargetIOSSimulator()) {
- ; // iOS simulator does not need dylib1.o.
- } else if (isTargetIPhoneOS()) {
- if (isIPhoneOSVersionLT(3, 1))
- CmdArgs.push_back("-ldylib1.o");
+// Add additional link args for the -dynamiclib option.
+static void addDynamicLibLinkArgs(const Darwin &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // Derived from darwin_dylib1 spec.
+ if (D.isTargetIPhoneOS()) {
+ if (D.isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-ldylib1.o");
+ return;
+ }
+
+ if (!D.isTargetMacOS())
+ return;
+ if (D.isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-ldylib1.o");
+ else if (D.isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-ldylib1.10.5.o");
+}
+
+// Add additional link args for the -bundle option.
+static void addBundleLinkArgs(const Darwin &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (Args.hasArg(options::OPT_static))
+ return;
+ // Derived from darwin_bundle1 spec.
+ if ((D.isTargetIPhoneOS() && D.isIPhoneOSVersionLT(3, 1)) ||
+ (D.isTargetMacOS() && D.isMacosxVersionLT(10, 6)))
+ CmdArgs.push_back("-lbundle1.o");
+}
+
+// Add additional link args for the -pg option.
+static void addPgProfilingLinkArgs(const Darwin &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (D.isTargetMacOS() && D.isMacosxVersionLT(10, 9)) {
+ if (Args.hasArg(options::OPT_static) || Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload)) {
+ CmdArgs.push_back("-lgcrt0.o");
} else {
- if (isMacosxVersionLT(10, 5))
- CmdArgs.push_back("-ldylib1.o");
- else if (isMacosxVersionLT(10, 6))
- CmdArgs.push_back("-ldylib1.10.5.o");
+ CmdArgs.push_back("-lgcrt1.o");
+
+ // darwin_crt2 spec is empty.
}
+ // By default on OS X 10.8 and later, we don't link with a crt1.o
+ // file and the linker knows to use _main as the entry point. But,
+ // when compiling with -pg, we need to link with the gcrt1.o file,
+ // so pass the -no_new_main option to tell the linker to use the
+ // "start" symbol as the entry point.
+ if (!D.isMacosxVersionLT(10, 8))
+ CmdArgs.push_back("-no_new_main");
} else {
- if (Args.hasArg(options::OPT_bundle)) {
- if (!Args.hasArg(options::OPT_static)) {
- // Derived from darwin_bundle1 spec.
- if (isTargetWatchOSBased()) {
- ; // watchOS does not need bundle1.o.
- } else if (isTargetIOSSimulator()) {
- ; // iOS simulator does not need bundle1.o.
- } else if (isTargetIPhoneOS()) {
- if (isIPhoneOSVersionLT(3, 1))
- CmdArgs.push_back("-lbundle1.o");
- } else {
- if (isMacosxVersionLT(10, 6))
- CmdArgs.push_back("-lbundle1.o");
- }
- }
- } else {
- if (Args.hasArg(options::OPT_pg) && SupportsProfiling()) {
- if (isTargetMacOS() && isMacosxVersionLT(10, 9)) {
- if (Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_object) ||
- Args.hasArg(options::OPT_preload)) {
- CmdArgs.push_back("-lgcrt0.o");
- } else {
- CmdArgs.push_back("-lgcrt1.o");
-
- // darwin_crt2 spec is empty.
- }
- // By default on OS X 10.8 and later, we don't link with a crt1.o
- // file and the linker knows to use _main as the entry point. But,
- // when compiling with -pg, we need to link with the gcrt1.o file,
- // so pass the -no_new_main option to tell the linker to use the
- // "start" symbol as the entry point.
- if (isTargetMacOS() && !isMacosxVersionLT(10, 8))
- CmdArgs.push_back("-no_new_main");
- } else {
- getDriver().Diag(diag::err_drv_clang_unsupported_opt_pg_darwin)
- << isTargetMacOS();
- }
- } else {
- if (Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_object) ||
- Args.hasArg(options::OPT_preload)) {
- CmdArgs.push_back("-lcrt0.o");
- } else {
- // Derived from darwin_crt1 spec.
- if (isTargetWatchOSBased()) {
- ; // watchOS does not need crt1.o.
- } else if (isTargetIOSSimulator()) {
- ; // iOS simulator does not need crt1.o.
- } else if (isTargetIPhoneOS()) {
- if (getArch() == llvm::Triple::aarch64)
- ; // iOS does not need any crt1 files for arm64
- else if (isIPhoneOSVersionLT(3, 1))
- CmdArgs.push_back("-lcrt1.o");
- else if (isIPhoneOSVersionLT(6, 0))
- CmdArgs.push_back("-lcrt1.3.1.o");
- } else {
- if (isMacosxVersionLT(10, 5))
- CmdArgs.push_back("-lcrt1.o");
- else if (isMacosxVersionLT(10, 6))
- CmdArgs.push_back("-lcrt1.10.5.o");
- else if (isMacosxVersionLT(10, 8))
- CmdArgs.push_back("-lcrt1.10.6.o");
-
- // darwin_crt2 spec is empty.
- }
- }
- }
- }
+ D.getDriver().Diag(diag::err_drv_clang_unsupported_opt_pg_darwin)
+ << D.isTargetMacOS();
}
+}
+
+static void addDefaultCRTLinkArgs(const Darwin &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // Derived from darwin_crt1 spec.
+ if (D.isTargetIPhoneOS()) {
+ if (D.getArch() == llvm::Triple::aarch64)
+ ; // iOS does not need any crt1 files for arm64
+ else if (D.isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-lcrt1.o");
+ else if (D.isIPhoneOSVersionLT(6, 0))
+ CmdArgs.push_back("-lcrt1.3.1.o");
+ return;
+ }
+
+ if (!D.isTargetMacOS())
+ return;
+ if (D.isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-lcrt1.o");
+ else if (D.isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-lcrt1.10.5.o");
+ else if (D.isMacosxVersionLT(10, 8))
+ CmdArgs.push_back("-lcrt1.10.6.o");
+ // darwin_crt2 spec is empty.
+}
+
+void Darwin::addStartObjectFileArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Derived from startfile spec.
+ if (Args.hasArg(options::OPT_dynamiclib))
+ addDynamicLibLinkArgs(*this, Args, CmdArgs);
+ else if (Args.hasArg(options::OPT_bundle))
+ addBundleLinkArgs(*this, Args, CmdArgs);
+ else if (Args.hasArg(options::OPT_pg) && SupportsProfiling())
+ addPgProfilingLinkArgs(*this, Args, CmdArgs);
+ else if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload))
+ CmdArgs.push_back("-lcrt0.o");
+ else
+ addDefaultCRTLinkArgs(*this, Args, CmdArgs);
- if (!isTargetIPhoneOS() && Args.hasArg(options::OPT_shared_libgcc) &&
- !isTargetWatchOS() && isMacosxVersionLT(10, 5)) {
+ if (isTargetMacOS() && Args.hasArg(options::OPT_shared_libgcc) &&
+ isMacosxVersionLT(10, 5)) {
const char *Str = Args.MakeArgString(GetFilePath("crt3.o"));
CmdArgs.push_back(Str);
}
@@ -2667,6 +2721,7 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::Function;
+ Res |= SanitizerKind::ObjCCast;
// Prior to 10.9, macOS shipped a version of the C++ standard library without
// C++11 support. The same is true of iOS prior to version 5. These OS'es are
@@ -2687,4 +2742,5 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
void Darwin::printVerboseInfo(raw_ostream &OS) const {
CudaInstallation.print(OS);
+ RocmInstallation.print(OS);
}
diff --git a/clang/lib/Driver/ToolChains/Darwin.h b/clang/lib/Driver/ToolChains/Darwin.h
index 1b193a4c4eb9..64c252efea7d 100644
--- a/clang/lib/Driver/ToolChains/Darwin.h
+++ b/clang/lib/Driver/ToolChains/Darwin.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_DARWIN_H
#include "Cuda.h"
+#include "ROCm.h"
#include "clang/Driver/DarwinSDKInfo.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -40,13 +41,8 @@ protected:
}
public:
- MachOTool(
- const char *Name, const char *ShortName, const ToolChain &TC,
- ResponseFileSupport ResponseSupport = RF_None,
- llvm::sys::WindowsEncodingMethod ResponseEncoding = llvm::sys::WEM_UTF8,
- const char *ResponseFlag = "@")
- : Tool(Name, ShortName, TC, ResponseSupport, ResponseEncoding,
- ResponseFlag) {}
+ MachOTool(const char *Name, const char *ShortName, const ToolChain &TC)
+ : Tool(Name, ShortName, TC) {}
};
class LLVM_LIBRARY_VISIBILITY Assembler : public MachOTool {
@@ -66,12 +62,10 @@ class LLVM_LIBRARY_VISIBILITY Linker : public MachOTool {
bool NeedsTempPath(const InputInfoList &Inputs) const;
void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
- const InputInfoList &Inputs) const;
+ const InputInfoList &Inputs, unsigned Version[5]) const;
public:
- Linker(const ToolChain &TC)
- : MachOTool("darwin::Linker", "linker", TC, RF_FileList,
- llvm::sys::WEM_UTF8, "-filelist") {}
+ Linker(const ToolChain &TC) : MachOTool("darwin::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -300,6 +294,7 @@ public:
mutable Optional<DarwinSDKInfo> SDKInfo;
CudaInstallationDetector CudaInstallation;
+ RocmInstallationDetector RocmInstallation;
private:
void AddDeploymentTarget(llvm::opt::DerivedArgList &Args) const;
@@ -357,6 +352,7 @@ protected:
const_cast<Darwin *>(this)->setTripleEnvironment(llvm::Triple::Simulator);
}
+public:
bool isTargetIPhoneOS() const {
assert(TargetInitialized && "Target not initialized!");
return (TargetPlatform == IPhoneOS || TargetPlatform == TvOS) &&
@@ -409,6 +405,17 @@ protected:
return TargetPlatform == MacOS;
}
+ bool isTargetMacOSBased() const {
+ assert(TargetInitialized && "Target not initialized!");
+ // FIXME (Alex L): Add remaining MacCatalyst suppport.
+ return TargetPlatform == MacOS;
+ }
+
+ bool isTargetAppleSiliconMac() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return isTargetMacOSBased() && getArch() == llvm::Triple::aarch64;
+ }
+
bool isTargetInitialized() const { return TargetInitialized; }
VersionTuple getTargetVersion() const {
@@ -422,11 +429,20 @@ protected:
return TargetVersion < VersionTuple(V0, V1, V2);
}
+ /// Returns true if the minimum supported macOS version for the slice that's
+ /// being built is less than the specified version. If there's no minimum
+ /// supported macOS version, the deployment target version is compared to the
+ /// specifed version instead.
bool isMacosxVersionLT(unsigned V0, unsigned V1 = 0, unsigned V2 = 0) const {
- assert(isTargetMacOS() && "Unexpected call for non OS X target!");
- return TargetVersion < VersionTuple(V0, V1, V2);
+ assert(isTargetMacOS() && getTriple().isMacOSX() &&
+ "Unexpected call for non OS X target!");
+ VersionTuple MinVers = getTriple().getMinimumSupportedOSVersion();
+ return (!MinVers.empty() && MinVers > TargetVersion
+ ? MinVers
+ : TargetVersion) < VersionTuple(V0, V1, V2);
}
+protected:
/// Return true if c++17 aligned allocation/deallocation functions are not
/// implemented in the c++ standard library of the deployment target we are
/// targeting.
@@ -461,6 +477,8 @@ public:
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
bool UseObjCMixedDispatch() const override {
// This is only used with the non-fragile ABI and non-legacy dispatch.
diff --git a/clang/lib/Driver/ToolChains/DragonFly.cpp b/clang/lib/Driver/ToolChains/DragonFly.cpp
index 424331fbc6fe..88dd0c899d8a 100644
--- a/clang/lib/Driver/ToolChains/DragonFly.cpp
+++ b/clang/lib/Driver/ToolChains/DragonFly.cpp
@@ -45,7 +45,8 @@ void dragonfly::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -169,7 +170,8 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
diff --git a/clang/lib/Driver/ToolChains/DragonFly.h b/clang/lib/Driver/ToolChains/DragonFly.h
index 7e76904f1055..3ed5acefaefb 100644
--- a/clang/lib/Driver/ToolChains/DragonFly.h
+++ b/clang/lib/Driver/ToolChains/DragonFly.h
@@ -18,10 +18,10 @@ namespace driver {
namespace tools {
/// dragonfly -- Directly call GNU Binutils assembler and linker
namespace dragonfly {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
Assembler(const ToolChain &TC)
- : GnuTool("dragonfly::Assembler", "assembler", TC) {}
+ : Tool("dragonfly::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -31,9 +31,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("dragonfly::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("dragonfly::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp
index 9b9eb81fa111..80f6db7ea642 100644
--- a/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/clang/lib/Driver/ToolChains/Flang.cpp
@@ -70,10 +70,10 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
const auto& D = C.getDriver();
const char* Exec = Args.MakeArgString(D.GetProgramPath("flang", TC));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
-Flang::Flang(const ToolChain &TC)
- : Tool("flang", "flang frontend", TC, RF_Full) {}
+Flang::Flang(const ToolChain &TC) : Tool("flang", "flang frontend", TC) {}
Flang::~Flang() {}
diff --git a/clang/lib/Driver/ToolChains/FreeBSD.cpp b/clang/lib/Driver/ToolChains/FreeBSD.cpp
index c5c6f530f48c..909ac5e99212 100644
--- a/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -99,7 +99,8 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::sparcel:
case llvm::Triple::sparcv9: {
std::string CPU = getCPUName(Args, getToolChain().getTriple());
- CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
+ CmdArgs.push_back(
+ sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
}
@@ -127,7 +128,8 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -275,7 +277,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -357,7 +359,8 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
@@ -388,6 +391,12 @@ unsigned FreeBSD::GetDefaultDwarfVersion() const {
return 4;
}
+void FreeBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/v1");
+}
+
void FreeBSD::addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
@@ -416,6 +425,11 @@ void FreeBSD::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void FreeBSD::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
Tool *FreeBSD::buildAssembler() const {
return new tools::freebsd::Assembler(*this);
}
diff --git a/clang/lib/Driver/ToolChains/FreeBSD.h b/clang/lib/Driver/ToolChains/FreeBSD.h
index 84bdbfd9a312..abc0876cef26 100644
--- a/clang/lib/Driver/ToolChains/FreeBSD.h
+++ b/clang/lib/Driver/ToolChains/FreeBSD.h
@@ -19,10 +19,10 @@ namespace tools {
/// freebsd -- Directly call GNU Binutils assembler and linker
namespace freebsd {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
Assembler(const ToolChain &TC)
- : GnuTool("freebsd::Assembler", "assembler", TC) {}
+ : Tool("freebsd::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -32,9 +32,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("freebsd::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("freebsd::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -59,16 +59,20 @@ public:
bool IsObjCNonFragileABIDefault() const override { return true; }
CXXStdlibType GetDefaultCXXStdlibType() const override;
- void addLibStdCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
+ void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
- llvm::ExceptionHandling GetExceptionModel(
- const llvm::opt::ArgList &Args) const override;
+ llvm::ExceptionHandling
+ GetExceptionModel(const llvm::opt::ArgList &Args) const override;
bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override;
bool isPIEDefault() const override;
SanitizerMask getSupportedSanitizers() const override;
diff --git a/clang/lib/Driver/ToolChains/Fuchsia.cpp b/clang/lib/Driver/ToolChains/Fuchsia.cpp
index 808d0408d0d4..94e025e3055a 100644
--- a/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -15,6 +15,7 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VirtualFileSystem.h"
@@ -47,6 +48,9 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.ClaimAllArgs(options::OPT_w);
CmdArgs.push_back("-z");
+ CmdArgs.push_back("max-page-size=4096");
+
+ CmdArgs.push_back("-z");
CmdArgs.push_back("now");
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
@@ -56,6 +60,7 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("rodynamic");
CmdArgs.push_back("-z");
CmdArgs.push_back("separate-loadable-segments");
+ CmdArgs.push_back("--pack-dyn-relocs=relr");
}
if (!D.SysRoot.empty())
@@ -111,7 +116,7 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -159,7 +164,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lc");
}
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
/// Fuchsia - Fuchsia tool chain which can call as(1) and ld(1) directly.
@@ -174,7 +180,7 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
if (!D.SysRoot.empty()) {
SmallString<128> P(D.SysRoot);
llvm::sys::path::append(P, "lib");
- getFilePaths().push_back(P.str());
+ getFilePaths().push_back(std::string(P.str()));
}
auto FilePaths = [&](const Multilib &M) -> std::vector<std::string> {
@@ -183,7 +189,7 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
if (auto CXXStdlibPath = getCXXStdlibPath()) {
SmallString<128> P(*CXXStdlibPath);
llvm::sys::path::append(P, M.gccSuffix());
- FP.push_back(P.str());
+ FP.push_back(std::string(P.str()));
}
}
return FP;
@@ -289,7 +295,7 @@ void Fuchsia::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(dirs, ":");
for (StringRef dir : dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : StringRef(D.SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
return;
@@ -340,6 +346,7 @@ SanitizerMask Fuchsia::getSupportedSanitizers() const {
Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
+ Res |= SanitizerKind::Leak;
Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Scudo;
return Res;
@@ -360,3 +367,13 @@ SanitizerMask Fuchsia::getDefaultSanitizers() const {
}
return Res;
}
+
+void Fuchsia::addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ // Add linker option -u__llvm_profile_runtime to cause runtime
+ // initialization module to be linked in.
+ if (needsProfileRT(Args))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
+ ToolChain::addProfileRTLibs(Args, CmdArgs);
+}
diff --git a/clang/lib/Driver/ToolChains/Fuchsia.h b/clang/lib/Driver/ToolChains/Fuchsia.h
index fee0e018f3ce..3159a54bda06 100644
--- a/clang/lib/Driver/ToolChains/Fuchsia.h
+++ b/clang/lib/Driver/ToolChains/Fuchsia.h
@@ -69,6 +69,9 @@ public:
SanitizerMask getSupportedSanitizers() const override;
SanitizerMask getDefaultSanitizers() const override;
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
RuntimeLibType
GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
CXXStdlibType
diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp
index da197e476621..c8a7fce07ef1 100644
--- a/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -35,8 +35,7 @@ using namespace clang;
using namespace llvm::opt;
using tools::addMultilibFlag;
-
-void tools::GnuTool::anchor() {}
+using tools::addPathIfExists;
static bool forwardToGCC(const Option &O) {
// Don't forward inputs from the original command line. They are added from
@@ -189,7 +188,8 @@ void tools::gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
GCCName = "gcc";
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void tools::gcc::Preprocessor::RenderExtraToolArgs(
@@ -304,12 +304,14 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
if (T.getEnvironment() == llvm::Triple::GNUX32)
return "elf32_x86_64";
return "elf_x86_64";
+ case llvm::Triple::ve:
+ return "elf64ve";
default:
return nullptr;
}
}
-static bool getPIE(const ArgList &Args, const toolchains::Linux &ToolChain) {
+static bool getPIE(const ArgList &Args, const ToolChain &TC) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_static) ||
Args.hasArg(options::OPT_r) || Args.hasArg(options::OPT_static_pie))
return false;
@@ -317,17 +319,16 @@ static bool getPIE(const ArgList &Args, const toolchains::Linux &ToolChain) {
Arg *A = Args.getLastArg(options::OPT_pie, options::OPT_no_pie,
options::OPT_nopie);
if (!A)
- return ToolChain.isPIEDefault();
+ return TC.isPIEDefault();
return A->getOption().matches(options::OPT_pie);
}
-static bool getStaticPIE(const ArgList &Args,
- const toolchains::Linux &ToolChain) {
+static bool getStaticPIE(const ArgList &Args, const ToolChain &TC) {
bool HasStaticPIE = Args.hasArg(options::OPT_static_pie);
// -no-pie is an alias for -nopie. So, handling -nopie takes care of
// -no-pie as well.
if (HasStaticPIE && Args.hasArg(options::OPT_nopie)) {
- const Driver &D = ToolChain.getDriver();
+ const Driver &D = TC.getDriver();
const llvm::opt::OptTable &Opts = D.getOpts();
const char *StaticPIEName = Opts.getOptionName(options::OPT_static_pie);
const char *NoPIEName = Opts.getOptionName(options::OPT_nopie);
@@ -341,13 +342,55 @@ static bool getStatic(const ArgList &Args) {
!Args.hasArg(options::OPT_static_pie);
}
+void tools::gnutools::StaticLibTool::ConstructJob(
+ Compilation &C, const JobAction &JA, const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // GNU ar tool command "ar <options> <output_file> <input_files>".
+ ArgStringList CmdArgs;
+ // Create and insert file members with a deterministic index.
+ CmdArgs.push_back("rcsD");
+ CmdArgs.push_back(Output.getFilename());
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+
+ // Delete old output archive file if it already exists before generating a new
+ // archive file.
+ auto OutputFileName = Output.getFilename();
+ if (Output.isFilename() && llvm::sys::fs::exists(OutputFileName)) {
+ if (std::error_code EC = llvm::sys::fs::remove(OutputFileName)) {
+ D.Diag(diag::err_drv_unable_to_remove_file) << EC.message();
+ return;
+ }
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetStaticLibToolPath());
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
+}
+
void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const toolchains::Linux &ToolChain =
- static_cast<const toolchains::Linux &>(getToolChain());
+ // FIXME: The Linker class constructor takes a ToolChain and not a
+ // Generic_ELF, so the static_cast might return a reference to a invalid
+ // instance (see PR45061). Ideally, the Linker constructor needs to take a
+ // Generic_ELF instead.
+ const toolchains::Generic_ELF &ToolChain =
+ static_cast<const toolchains::Generic_ELF &>(getToolChain());
const Driver &D = ToolChain.getDriver();
const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
@@ -355,6 +398,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const llvm::Triple::ArchType Arch = ToolChain.getArch();
const bool isAndroid = ToolChain.getTriple().isAndroid();
const bool IsIAMCU = ToolChain.getTriple().isOSIAMCU();
+ const bool IsVE = ToolChain.getTriple().isVE();
const bool IsPIE = getPIE(Args, ToolChain);
const bool IsStaticPIE = getStaticPIE(Args, ToolChain);
const bool IsStatic = getStatic(Args);
@@ -418,8 +462,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (isAndroid)
CmdArgs.push_back("--warn-shared-textrel");
- for (const auto &Opt : ToolChain.ExtraOpts)
- CmdArgs.push_back(Opt.c_str());
+ ToolChain.addExtraOpts(CmdArgs);
CmdArgs.push_back("--eh-frame-hdr");
@@ -446,10 +489,9 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-export-dynamic");
if (!Args.hasArg(options::OPT_shared) && !IsStaticPIE) {
- const std::string Loader =
- D.DyldPrefix + ToolChain.getDynamicLinker(Args);
CmdArgs.push_back("-dynamic-linker");
- CmdArgs.push_back(Args.MakeArgString(Loader));
+ CmdArgs.push_back(Args.MakeArgString(Twine(D.DyldPrefix) +
+ ToolChain.getDynamicLinker(Args)));
}
}
@@ -475,6 +517,11 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
}
+ if (IsVE) {
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("max-page-size=0x4000000");
+ }
+
if (IsIAMCU)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
else if (HasCRTBeginEndFiles) {
@@ -502,7 +549,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
// Add crtfastmath.o if available and fast math is enabled.
- ToolChain.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
+ ToolChain.addFastMathRuntimeIfAvailable(Args, CmdArgs);
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
@@ -512,7 +559,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
@@ -623,12 +670,11 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- // Add HIP offloading linker script args if required.
- AddHIPLinkerScript(getToolChain(), C, Output, Inputs, Args, CmdArgs, JA,
- *this);
+ Args.AddAllArgs(CmdArgs, options::OPT_T);
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void tools::gnutools::Assembler::ConstructJob(Compilation &C,
@@ -646,6 +692,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
llvm::Reloc::Model RelocationModel;
unsigned PICLevel;
bool IsPIE;
+ const char *DefaultAssembler = "as";
std::tie(RelocationModel, PICLevel, IsPIE) =
ParsePICArgs(getToolChain(), Args);
@@ -866,6 +913,8 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
CmdArgs.push_back(Args.MakeArgString("-march=" + CPUName));
break;
}
+ case llvm::Triple::ve:
+ DefaultAssembler = "nas";
}
for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
@@ -890,8 +939,10 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
for (const auto &II : Inputs)
CmdArgs.push_back(II.getFilename());
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(DefaultAssembler));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
// Handle the debug info splitting at object creation time if we're
// creating an object.
@@ -1527,7 +1578,7 @@ static void findRISCVBareMetalMultilibs(const Driver &D,
};
// currently only support the set of multilibs like riscv-gnu-toolchain does.
// TODO: support MULTILIB_REUSE
- SmallVector<RiscvMultilib, 8> RISCVMultilibSet = {
+ constexpr RiscvMultilib RISCVMultilibSet[] = {
{"rv32i", "ilp32"}, {"rv32im", "ilp32"}, {"rv32iac", "ilp32"},
{"rv32imac", "ilp32"}, {"rv32imafc", "ilp32f"}, {"rv64imac", "lp64"},
{"rv64imafdc", "lp64d"}};
@@ -1767,7 +1818,7 @@ Generic_GCC::GCCVersion Generic_GCC::GCCVersion::Parse(StringRef VersionText) {
StringRef MinorStr = Second.first;
if (Second.second.empty()) {
if (size_t EndNumber = MinorStr.find_first_not_of("0123456789")) {
- GoodVersion.PatchSuffix = MinorStr.substr(EndNumber);
+ GoodVersion.PatchSuffix = std::string(MinorStr.substr(EndNumber));
MinorStr = MinorStr.slice(0, EndNumber);
}
}
@@ -1793,7 +1844,7 @@ Generic_GCC::GCCVersion Generic_GCC::GCCVersion::Parse(StringRef VersionText) {
if (PatchText.slice(0, EndNumber).getAsInteger(10, GoodVersion.Patch) ||
GoodVersion.Patch < 0)
return BadVersion;
- GoodVersion.PatchSuffix = PatchText.substr(EndNumber);
+ GoodVersion.PatchSuffix = std::string(PatchText.substr(EndNumber));
}
}
@@ -1848,7 +1899,7 @@ void Generic_GCC::GCCInstallationDetector::init(
if (GCCToolchainDir.back() == '/')
GCCToolchainDir = GCCToolchainDir.drop_back(); // remove the /
- Prefixes.push_back(GCCToolchainDir);
+ Prefixes.push_back(std::string(GCCToolchainDir));
} else {
// If we have a SysRoot, try that first.
if (!D.SysRoot.empty()) {
@@ -1975,6 +2026,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
// Non-Solaris is much simpler - most systems just go with "/usr".
if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux) {
// Yet, still look for RHEL devtoolsets.
+ Prefixes.push_back("/opt/rh/devtoolset-9/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-8/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-7/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-6/root/usr");
@@ -2090,6 +2142,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const RISCV64Triples[] = {"riscv64-unknown-linux-gnu",
"riscv64-linux-gnu",
"riscv64-unknown-elf",
+ "riscv64-redhat-linux",
"riscv64-suse-linux"};
static const char *const SPARCv8LibDirs[] = {"/lib32", "/lib"};
@@ -2460,7 +2513,7 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
StringRef VersionText = llvm::sys::path::filename(LI->path());
GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
if (CandidateVersion.Major != -1) // Filter obviously bad entries.
- if (!CandidateGCCInstallPaths.insert(LI->path()).second)
+ if (!CandidateGCCInstallPaths.insert(std::string(LI->path())).second)
continue; // Saw this path before; no need to look at it again.
if (CandidateVersion.isOlderThan(4, 1, 1))
continue;
@@ -2572,7 +2625,7 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), GCCInstallation(D),
- CudaInstallation(D, Triple, Args) {
+ CudaInstallation(D, Triple, Args), RocmInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
@@ -2605,6 +2658,7 @@ void Generic_GCC::printVerboseInfo(raw_ostream &OS) const {
// Print the information about how we detected the GCC installation.
GCCInstallation.print(OS);
CudaInstallation.print(OS);
+ RocmInstallation.print(OS);
}
bool Generic_GCC::IsUnwindTablesDefault(const ArgList &Args) const {
@@ -2666,6 +2720,140 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
}
}
+static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
+ const Multilib &Multilib,
+ StringRef InstallPath,
+ ToolChain::path_list &Paths) {
+ if (const auto &PathsCallback = Multilibs.filePathsCallback())
+ for (const auto &Path : PathsCallback(Multilib))
+ addPathIfExists(D, InstallPath + Path, Paths);
+}
+
+void Generic_GCC::PushPPaths(ToolChain::path_list &PPaths) {
+ // Cross-compiling binutils and GCC installations (vanilla and openSUSE at
+ // least) put various tools in a triple-prefixed directory off of the parent
+ // of the GCC installation. We use the GCC triple here to ensure that we end
+ // up with tools that support the same amount of cross compiling as the
+ // detected GCC installation. For example, if we find a GCC installation
+ // targeting x86_64, but it is a bi-arch GCC installation, it can also be
+ // used to target i386.
+ if (GCCInstallation.isValid()) {
+ PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
+ GCCInstallation.getTriple().str() + "/bin")
+ .str());
+ }
+}
+
+void Generic_GCC::AddMultilibPaths(const Driver &D,
+ const std::string &SysRoot,
+ const std::string &OSLibDir,
+ const std::string &MultiarchTriple,
+ path_list &Paths) {
+ // Add the multilib suffixed paths where they are available.
+ if (GCCInstallation.isValid()) {
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ const std::string &LibPath =
+ std::string(GCCInstallation.getParentLibPath());
+
+ // Add toolchain / multilib specific file paths.
+ addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
+ GCCInstallation.getInstallPath(), Paths);
+
+ // Sourcery CodeBench MIPS toolchain holds some libraries under
+ // a biarch-like suffix of the GCC installation.
+ addPathIfExists(
+ D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
+ Paths);
+
+ // GCC cross compiling toolchains will install target libraries which ship
+ // as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
+ // any part of the GCC installation in
+ // <prefix>/<libdir>/gcc/<triple>/<version>. This decision is somewhat
+ // debatable, but is the reality today. We need to search this tree even
+ // when we have a sysroot somewhere else. It is the responsibility of
+ // whomever is doing the cross build targeting a sysroot using a GCC
+ // installation that is *not* within the system root to ensure two things:
+ //
+ // 1) Any DSOs that are linked in from this tree or from the install path
+ // above must be present on the system root and found via an
+ // appropriate rpath.
+ // 2) There must not be libraries installed into
+ // <prefix>/<triple>/<libdir> unless they should be preferred over
+ // those within the system root.
+ //
+ // Note that this matches the GCC behavior. See the below comment for where
+ // Clang diverges from GCC's behavior.
+ addPathIfExists(D,
+ LibPath + "/../" + GCCTriple.str() + "/lib/../" + OSLibDir +
+ SelectedMultilib.osSuffix(),
+ Paths);
+
+ // If the GCC installation we found is inside of the sysroot, we want to
+ // prefer libraries installed in the parent prefix of the GCC installation.
+ // It is important to *not* use these paths when the GCC installation is
+ // outside of the system root as that can pick up unintended libraries.
+ // This usually happens when there is an external cross compiler on the
+ // host system, and a more minimal sysroot available that is the target of
+ // the cross. Note that GCC does include some of these directories in some
+ // configurations but this seems somewhere between questionable and simply
+ // a bug.
+ if (StringRef(LibPath).startswith(SysRoot)) {
+ addPathIfExists(D, LibPath + "/" + MultiarchTriple, Paths);
+ addPathIfExists(D, LibPath + "/../" + OSLibDir, Paths);
+ }
+ }
+}
+
+void Generic_GCC::AddMultiarchPaths(const Driver &D,
+ const std::string &SysRoot,
+ const std::string &OSLibDir,
+ path_list &Paths) {
+ // Try walking via the GCC triple path in case of biarch or multiarch GCC
+ // installations with strange symlinks.
+ if (GCCInstallation.isValid()) {
+ addPathIfExists(D,
+ SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
+ "/../../" + OSLibDir,
+ Paths);
+
+ // Add the 'other' biarch variant path
+ Multilib BiarchSibling;
+ if (GCCInstallation.getBiarchSibling(BiarchSibling)) {
+ addPathIfExists(
+ D, GCCInstallation.getInstallPath() + BiarchSibling.gccSuffix(),
+ Paths);
+ }
+
+ // See comments above on the multilib variant for details of why this is
+ // included even from outside the sysroot.
+ const std::string &LibPath =
+ std::string(GCCInstallation.getParentLibPath());
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+ addPathIfExists(
+ D, LibPath + "/../" + GCCTriple.str() + "/lib" + Multilib.osSuffix(),
+ Paths);
+
+ // See comments above on the multilib variant for details of why this is
+ // only included from within the sysroot.
+ if (StringRef(LibPath).startswith(SysRoot))
+ addPathIfExists(D, LibPath, Paths);
+ }
+}
+
+void Generic_GCC::AddMultilibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // Add include directories specific to the selected multilib set and multilib.
+ if (GCCInstallation.isValid()) {
+ const auto &Callback = Multilibs.includeDirsCallback();
+ if (Callback) {
+ for (const auto &Path : Callback(GCCInstallation.getMultilib()))
+ addExternCSystemIncludeIfExists(
+ DriverArgs, CC1Args, GCCInstallation.getInstallPath() + Path);
+ }
+ }
+}
+
void Generic_GCC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
@@ -2696,7 +2884,7 @@ static std::string DetectLibcxxIncludePath(llvm::vfs::FileSystem &vfs,
!VersionText.slice(1, StringRef::npos).getAsInteger(10, Version)) {
if (Version > MaxVersion) {
MaxVersion = Version;
- MaxVersionString = VersionText;
+ MaxVersionString = std::string(VersionText);
}
}
}
@@ -2706,7 +2894,6 @@ static std::string DetectLibcxxIncludePath(llvm::vfs::FileSystem &vfs,
void
Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- const std::string& SysRoot = getDriver().SysRoot;
auto AddIncludePath = [&](std::string Path) {
std::string IncludePath = DetectLibcxxIncludePath(getVFS(), Path);
if (IncludePath.empty() || !getVFS().exists(IncludePath))
@@ -2722,6 +2909,7 @@ Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
// If this is a development, non-installed, clang, libcxx will
// not be found at ../include/c++ but it likely to be found at
// one of the following two locations:
+ std::string SysRoot = computeSysRoot();
if (AddIncludePath(SysRoot + "/usr/local/include/c++"))
return;
if (AddIncludePath(SysRoot + "/usr/include/c++"))
diff --git a/clang/lib/Driver/ToolChains/Gnu.h b/clang/lib/Driver/ToolChains/Gnu.h
index 083f74c05477..52690ab4b83c 100644
--- a/clang/lib/Driver/ToolChains/Gnu.h
+++ b/clang/lib/Driver/ToolChains/Gnu.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_GNU_H
#include "Cuda.h"
+#include "ROCm.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include <set>
@@ -35,23 +36,26 @@ bool findMIPSMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
namespace tools {
-/// Base class for all GNU tools that provide the same behavior when
-/// it comes to response files support
-class LLVM_LIBRARY_VISIBILITY GnuTool : public Tool {
- virtual void anchor();
-
+/// Directly call GNU Binutils' assembler and linker.
+namespace gnutools {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- GnuTool(const char *Name, const char *ShortName, const ToolChain &TC)
- : Tool(Name, ShortName, TC, RF_Full, llvm::sys::WEM_CurrentCodePage) {}
+ Assembler(const ToolChain &TC) : Tool("GNU::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
};
-/// Directly call GNU Binutils' assembler and linker.
-namespace gnutools {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Assembler(const ToolChain &TC) : GnuTool("GNU::Assembler", "assembler", TC) {}
+ Linker(const ToolChain &TC) : Tool("GNU::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
@@ -59,9 +63,10 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY StaticLibTool : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("GNU::Linker", "linker", TC) {}
+ StaticLibTool(const ToolChain &TC)
+ : Tool("GNU::StaticLibTool", "static-lib-linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -75,10 +80,10 @@ public:
/// gcc - Generic GCC tool implementations.
namespace gcc {
-class LLVM_LIBRARY_VISIBILITY Common : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Common : public Tool {
public:
Common(const char *Name, const char *ShortName, const ToolChain &TC)
- : GnuTool(Name, ShortName, TC) {}
+ : Tool(Name, ShortName, TC) {}
// A gcc tool has an "integrated" assembler that it will call to produce an
// object. Let it use that assembler so that we don't have to deal with
@@ -278,6 +283,7 @@ public:
protected:
GCCInstallationDetector GCCInstallation;
CudaInstallationDetector CudaInstallation;
+ RocmInstallationDetector RocmInstallation;
public:
Generic_GCC(const Driver &D, const llvm::Triple &Triple,
@@ -314,6 +320,16 @@ protected:
/// Check whether the target triple's architecture is 32-bits.
bool isTarget32Bit() const { return getTriple().isArch32Bit(); }
+ void PushPPaths(ToolChain::path_list &PPaths);
+ void AddMultilibPaths(const Driver &D, const std::string &SysRoot,
+ const std::string &OSLibDir,
+ const std::string &MultiarchTriple,
+ path_list &Paths);
+ void AddMultiarchPaths(const Driver &D, const std::string &SysRoot,
+ const std::string &OSLibDir, path_list &Paths);
+ void AddMultilibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+
// FIXME: This should be final, but the CrossWindows toolchain does weird
// things that can't be easily generalized.
void AddClangCXXStdlibIncludeArgs(
@@ -356,6 +372,12 @@ public:
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
+
+ virtual std::string getDynamicLinker(const llvm::opt::ArgList &Args) const {
+ return {};
+ }
+
+ virtual void addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {}
};
} // end namespace toolchains
diff --git a/clang/lib/Driver/ToolChains/HIP.cpp b/clang/lib/Driver/ToolChains/HIP.cpp
index f89e648948ab..7d17f809690e 100644
--- a/clang/lib/Driver/ToolChains/HIP.cpp
+++ b/clang/lib/Driver/ToolChains/HIP.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "HIP.h"
+#include "AMDGPU.h"
#include "CommonArgs.h"
#include "InputInfo.h"
#include "clang/Basic/Cuda.h"
@@ -16,6 +17,7 @@
#include "clang/Driver/Options.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/TargetParser.h"
using namespace clang::driver;
using namespace clang::driver::toolchains;
@@ -47,159 +49,51 @@ static void addBCLib(const Driver &D, const ArgList &Args,
}
D.Diag(diag::err_drv_no_such_file) << BCName;
}
-
-static const char *getOutputFileName(Compilation &C, StringRef Base,
- const char *Postfix,
- const char *Extension) {
- const char *OutputFileName;
- if (C.getDriver().isSaveTempsEnabled()) {
- OutputFileName =
- C.getArgs().MakeArgString(Base.str() + Postfix + "." + Extension);
- } else {
- std::string TmpName =
- C.getDriver().GetTemporaryPath(Base.str() + Postfix, Extension);
- OutputFileName = C.addTempFile(C.getArgs().MakeArgString(TmpName));
- }
- return OutputFileName;
-}
-
-static void addOptLevelArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
- bool IsLlc = false) {
- if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
- StringRef OOpt = "3";
- if (A->getOption().matches(options::OPT_O4) ||
- A->getOption().matches(options::OPT_Ofast))
- OOpt = "3";
- else if (A->getOption().matches(options::OPT_O0))
- OOpt = "0";
- else if (A->getOption().matches(options::OPT_O)) {
- // Clang and opt support -Os/-Oz; llc only supports -O0, -O1, -O2 and -O3
- // so we map -Os/-Oz to -O2.
- // Only clang supports -Og, and maps it to -O1.
- // We map anything else to -O2.
- OOpt = llvm::StringSwitch<const char *>(A->getValue())
- .Case("1", "1")
- .Case("2", "2")
- .Case("3", "3")
- .Case("s", IsLlc ? "2" : "s")
- .Case("z", IsLlc ? "2" : "z")
- .Case("g", "1")
- .Default("2");
- }
- CmdArgs.push_back(Args.MakeArgString("-O" + OOpt));
- }
-}
} // namespace
-const char *AMDGCN::Linker::constructLLVMLinkCommand(
- Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
- const ArgList &Args, StringRef SubArchName,
- StringRef OutputFilePrefix) const {
- ArgStringList CmdArgs;
- // Add the input bc's created by compile step.
- for (const auto &II : Inputs)
- CmdArgs.push_back(II.getFilename());
-
- // Add an intermediate output file.
- CmdArgs.push_back("-o");
- auto OutputFileName = getOutputFileName(C, OutputFilePrefix, "-linked", "bc");
- CmdArgs.push_back(OutputFileName);
- SmallString<128> ExecPath(C.getDriver().Dir);
- llvm::sys::path::append(ExecPath, "llvm-link");
- const char *Exec = Args.MakeArgString(ExecPath);
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
- return OutputFileName;
-}
-
-const char *AMDGCN::Linker::constructOptCommand(
- Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args, llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix, const char *InputFileName) const {
- // Construct opt command.
- ArgStringList OptArgs;
- // The input to opt is the output from llvm-link.
- OptArgs.push_back(InputFileName);
- // Pass optimization arg to opt.
- addOptLevelArgs(Args, OptArgs);
- OptArgs.push_back("-mtriple=amdgcn-amd-amdhsa");
- OptArgs.push_back(Args.MakeArgString("-mcpu=" + SubArchName));
-
- for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
- OptArgs.push_back(A->getValue(0));
- }
-
- OptArgs.push_back("-o");
- auto OutputFileName =
- getOutputFileName(C, OutputFilePrefix, "-optimized", "bc");
- OptArgs.push_back(OutputFileName);
- SmallString<128> OptPath(C.getDriver().Dir);
- llvm::sys::path::append(OptPath, "opt");
- const char *OptExec = Args.MakeArgString(OptPath);
- C.addCommand(std::make_unique<Command>(JA, *this, OptExec, OptArgs, Inputs));
- return OutputFileName;
-}
+void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const {
+ // Construct lld command.
+ // The output from ld.lld is an HSA code object file.
+ ArgStringList LldArgs{"-flavor", "gnu", "--no-undefined", "-shared",
+ "-plugin-opt=-amdgpu-internalize-symbols"};
-const char *AMDGCN::Linker::constructLlcCommand(
- Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args, llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix, const char *InputFileName,
- bool OutputIsAsm) const {
- // Construct llc command.
- ArgStringList LlcArgs;
- // The input to llc is the output from opt.
- LlcArgs.push_back(InputFileName);
- // Pass optimization arg to llc.
- addOptLevelArgs(Args, LlcArgs, /*IsLlc=*/true);
- LlcArgs.push_back("-mtriple=amdgcn-amd-amdhsa");
- LlcArgs.push_back(Args.MakeArgString("-mcpu=" + SubArchName));
- LlcArgs.push_back(
- Args.MakeArgString(Twine("-filetype=") + (OutputIsAsm ? "asm" : "obj")));
+ auto &TC = getToolChain();
+ auto &D = TC.getDriver();
+ assert(!Inputs.empty() && "Must have at least one input.");
+ addLTOOptions(TC, Args, LldArgs, Output, Inputs[0],
+ D.getLTOMode() == LTOK_Thin);
// Extract all the -m options
std::vector<llvm::StringRef> Features;
- handleTargetFeaturesGroup(
- Args, Features, options::OPT_m_amdgpu_Features_Group);
+ amdgpu::getAMDGPUTargetFeatures(D, Args, Features);
- // Add features to mattr such as xnack
- std::string MAttrString = "-mattr=";
- for(auto OneFeature : Features) {
+ // Add features to mattr such as cumode
+ std::string MAttrString = "-plugin-opt=-mattr=";
+ for (auto OneFeature : unifyTargetFeatures(Features)) {
MAttrString.append(Args.MakeArgString(OneFeature));
if (OneFeature != Features.back())
MAttrString.append(",");
}
- if(!Features.empty())
- LlcArgs.push_back(Args.MakeArgString(MAttrString));
+ if (!Features.empty())
+ LldArgs.push_back(Args.MakeArgString(MAttrString));
for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
- LlcArgs.push_back(A->getValue(0));
+ LldArgs.push_back(
+ Args.MakeArgString(Twine("-plugin-opt=") + A->getValue(0)));
}
- // Add output filename
- LlcArgs.push_back("-o");
- auto LlcOutputFile =
- getOutputFileName(C, OutputFilePrefix, "", OutputIsAsm ? "s" : "o");
- LlcArgs.push_back(LlcOutputFile);
- SmallString<128> LlcPath(C.getDriver().Dir);
- llvm::sys::path::append(LlcPath, "llc");
- const char *Llc = Args.MakeArgString(LlcPath);
- C.addCommand(std::make_unique<Command>(JA, *this, Llc, LlcArgs, Inputs));
- return LlcOutputFile;
-}
-
-void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const InputInfo &Output,
- const llvm::opt::ArgList &Args,
- const char *InputFileName) const {
- // Construct lld command.
- // The output from ld.lld is an HSA code object file.
- ArgStringList LldArgs{
- "-flavor", "gnu", "-shared", "-o", Output.getFilename(), InputFileName};
- SmallString<128> LldPath(C.getDriver().Dir);
- llvm::sys::path::append(LldPath, "lld");
- const char *Lld = Args.MakeArgString(LldPath);
- C.addCommand(std::make_unique<Command>(JA, *this, Lld, LldArgs, Inputs));
+ if (C.getDriver().isSaveTempsEnabled())
+ LldArgs.push_back("-save-temps");
+
+ LldArgs.append({"-o", Output.getFilename()});
+ for (auto Input : Inputs)
+ LldArgs.push_back(Input.getFilename());
+ const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Lld, LldArgs, Inputs));
}
// Construct a clang-offload-bundler command to bundle code objects for
@@ -226,14 +120,84 @@ void AMDGCN::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
- auto BundlerOutputArg =
- Args.MakeArgString(std::string("-outputs=").append(OutputFileName));
+ auto BundlerOutputArg = Args.MakeArgString(
+ std::string("-outputs=").append(std::string(OutputFileName)));
BundlerArgs.push_back(BundlerOutputArg);
- SmallString<128> BundlerPath(C.getDriver().Dir);
- llvm::sys::path::append(BundlerPath, "clang-offload-bundler");
- const char *Bundler = Args.MakeArgString(BundlerPath);
- C.addCommand(std::make_unique<Command>(JA, T, Bundler, BundlerArgs, Inputs));
+ const char *Bundler = Args.MakeArgString(
+ T.getToolChain().GetProgramPath("clang-offload-bundler"));
+ C.addCommand(std::make_unique<Command>(JA, T, ResponseFileSupport::None(),
+ Bundler, BundlerArgs, Inputs));
+}
+
+/// Add Generated HIP Object File which has device images embedded into the
+/// host to the argument list for linking. Using MC directives, embed the
+/// device code and also define symbols required by the code generation so that
+/// the image can be retrieved at runtime.
+void AMDGCN::Linker::constructGenerateObjFileFromHIPFatBinary(
+ Compilation &C, const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ const JobAction &JA) const {
+ const ToolChain &TC = getToolChain();
+ std::string Name =
+ std::string(llvm::sys::path::stem(Output.getFilename()));
+
+ // Create Temp Object File Generator,
+ // Offload Bundled file and Bundled Object file.
+ // Keep them if save-temps is enabled.
+ const char *McinFile;
+ const char *BundleFile;
+ if (C.getDriver().isSaveTempsEnabled()) {
+ McinFile = C.getArgs().MakeArgString(Name + ".mcin");
+ BundleFile = C.getArgs().MakeArgString(Name + ".hipfb");
+ } else {
+ auto TmpNameMcin = C.getDriver().GetTemporaryPath(Name, "mcin");
+ McinFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameMcin));
+ auto TmpNameFb = C.getDriver().GetTemporaryPath(Name, "hipfb");
+ BundleFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameFb));
+ }
+ constructHIPFatbinCommand(C, JA, BundleFile, Inputs, Args, *this);
+
+ // Create a buffer to write the contents of the temp obj generator.
+ std::string ObjBuffer;
+ llvm::raw_string_ostream ObjStream(ObjBuffer);
+
+ // Add MC directives to embed target binaries. We ensure that each
+ // section and image is 16-byte aligned. This is not mandatory, but
+ // increases the likelihood of data to be aligned with a cache block
+ // in several main host machines.
+ ObjStream << "# HIP Object Generator\n";
+ ObjStream << "# *** Automatically generated by Clang ***\n";
+ ObjStream << " .type __hip_fatbin,@object\n";
+ ObjStream << " .section .hip_fatbin,\"aMS\",@progbits,1\n";
+ ObjStream << " .data\n";
+ ObjStream << " .globl __hip_fatbin\n";
+ ObjStream << " .p2align 3\n";
+ ObjStream << "__hip_fatbin:\n";
+ ObjStream << " .incbin \"" << BundleFile << "\"\n";
+ ObjStream.flush();
+
+ // Dump the contents of the temp object file gen if the user requested that.
+ // We support this option to enable testing of behavior with -###.
+ if (C.getArgs().hasArg(options::OPT_fhip_dump_offload_linker_script))
+ llvm::errs() << ObjBuffer;
+
+ // Open script file and write the contents.
+ std::error_code EC;
+ llvm::raw_fd_ostream Objf(McinFile, EC, llvm::sys::fs::OF_None);
+
+ if (EC) {
+ C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return;
+ }
+
+ Objf << ObjBuffer;
+
+ ArgStringList McArgs{"-o", Output.getFilename(),
+ McinFile, "--filetype=obj"};
+ const char *Mc = Args.MakeArgString(TC.GetProgramPath("llvm-mc"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Mc, McArgs, Inputs));
}
// For amdgcn the inputs of the linker job are device bitcode and output is
@@ -243,37 +207,20 @@ void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ if (Inputs.size() > 0 &&
+ Inputs[0].getType() == types::TY_Image &&
+ JA.getType() == types::TY_Object)
+ return constructGenerateObjFileFromHIPFatBinary(C, Output, Inputs, Args, JA);
if (JA.getType() == types::TY_HIP_FATBIN)
return constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs, Args, *this);
- assert(getToolChain().getTriple().getArch() == llvm::Triple::amdgcn &&
- "Unsupported target");
-
- std::string SubArchName = JA.getOffloadingArch();
- assert(StringRef(SubArchName).startswith("gfx") && "Unsupported sub arch");
-
- // Prefix for temporary file name.
- std::string Prefix = llvm::sys::path::stem(Inputs[0].getFilename()).str();
- if (!C.getDriver().isSaveTempsEnabled())
- Prefix += "-" + SubArchName;
-
- // Each command outputs different files.
- const char *LLVMLinkCommand =
- constructLLVMLinkCommand(C, JA, Inputs, Args, SubArchName, Prefix);
- const char *OptCommand = constructOptCommand(C, JA, Inputs, Args, SubArchName,
- Prefix, LLVMLinkCommand);
- if (C.getDriver().isSaveTempsEnabled())
- constructLlcCommand(C, JA, Inputs, Args, SubArchName, Prefix, OptCommand,
- /*OutputIsAsm=*/true);
- const char *LlcCommand =
- constructLlcCommand(C, JA, Inputs, Args, SubArchName, Prefix, OptCommand);
- constructLldCommand(C, JA, Inputs, Output, Args, LlcCommand);
+ return constructLldCommand(C, JA, Inputs, Output, Args);
}
HIPToolChain::HIPToolChain(const Driver &D, const llvm::Triple &Triple,
const ToolChain &HostTC, const ArgList &Args)
- : ToolChain(D, Triple, Args), HostTC(HostTC) {
+ : ROCMToolChain(D, Triple, Args), HostTC(HostTC) {
// Lookup binaries into the driver directory, this is used to
// discover the clang-offload-bundler executable.
getProgramPaths().push_back(getDriver().Dir);
@@ -285,20 +232,16 @@ void HIPToolChain::addClangTargetOptions(
Action::OffloadKind DeviceOffloadingKind) const {
HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
- StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
+ StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
(void) GpuArch;
assert(DeviceOffloadingKind == Action::OFK_HIP &&
"Only HIP offloading kinds are supported for GPUs.");
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
+ const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
- CC1Args.push_back("-target-cpu");
- CC1Args.push_back(DriverArgs.MakeArgStringRef(GpuArch));
CC1Args.push_back("-fcuda-is-device");
- if (DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
- options::OPT_fno_cuda_flush_denormals_to_zero, false))
- CC1Args.push_back("-fcuda-flush-denormals-to-zero");
-
if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
@@ -306,6 +249,8 @@ void HIPToolChain::addClangTargetOptions(
if (DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
false))
CC1Args.push_back("-fgpu-rdc");
+ else
+ CC1Args.append({"-mllvm", "-amdgpu-internalize-symbols"});
StringRef MaxThreadsPerBlock =
DriverArgs.getLastArgValue(options::OPT_gpu_max_threads_per_block_EQ);
@@ -334,46 +279,50 @@ void HIPToolChain::addClangTargetOptions(
ArgStringList LibraryPaths;
// Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
- for (auto Path :
- DriverArgs.getAllArgValues(options::OPT_hip_device_lib_path_EQ))
+ for (auto Path : RocmInstallation.getRocmDeviceLibPathArg())
LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
- addDirectoryList(DriverArgs, LibraryPaths, "-L", "HIP_DEVICE_LIB_PATH");
+ addDirectoryList(DriverArgs, LibraryPaths, "", "HIP_DEVICE_LIB_PATH");
- llvm::SmallVector<std::string, 10> BCLibs;
+ // Maintain compatability with --hip-device-lib.
+ auto BCLibs = DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ);
+ if (!BCLibs.empty()) {
+ for (auto Lib : BCLibs)
+ addBCLib(getDriver(), DriverArgs, CC1Args, LibraryPaths, Lib);
+ } else {
+ if (!RocmInstallation.hasDeviceLibrary()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
+ return;
+ }
- // Add bitcode library in --hip-device-lib.
- for (auto Lib : DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ)) {
- BCLibs.push_back(DriverArgs.MakeArgString(Lib));
- }
+ std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
+ if (LibDeviceFile.empty()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GpuArch;
+ return;
+ }
- // If --hip-device-lib is not set, add the default bitcode libraries.
- if (BCLibs.empty()) {
- // Get the bc lib file name for ISA version. For example,
- // gfx803 => oclc_isa_version_803.amdgcn.bc.
- std::string GFXVersion = GpuArch.drop_front(3).str();
- std::string ISAVerBC = "oclc_isa_version_" + GFXVersion + ".amdgcn.bc";
-
- llvm::StringRef FlushDenormalControlBC;
- if (DriverArgs.hasArg(options::OPT_fcuda_flush_denormals_to_zero))
- FlushDenormalControlBC = "oclc_daz_opt_on.amdgcn.bc";
- else
- FlushDenormalControlBC = "oclc_daz_opt_off.amdgcn.bc";
-
- llvm::StringRef WaveFrontSizeBC;
- if (stoi(GFXVersion) < 1000)
- WaveFrontSizeBC = "oclc_wavefrontsize64_on.amdgcn.bc";
- else
- WaveFrontSizeBC = "oclc_wavefrontsize64_off.amdgcn.bc";
-
- BCLibs.append({"hip.amdgcn.bc", "ocml.amdgcn.bc", "ockl.amdgcn.bc",
- "oclc_finite_only_off.amdgcn.bc", FlushDenormalControlBC,
- "oclc_correctly_rounded_sqrt_on.amdgcn.bc",
- "oclc_unsafe_math_off.amdgcn.bc", ISAVerBC,
- WaveFrontSizeBC});
+ // If --hip-device-lib is not set, add the default bitcode libraries.
+ // TODO: There are way too many flags that change this. Do we need to check
+ // them all?
+ bool DAZ = DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
+ options::OPT_fno_cuda_flush_denormals_to_zero,
+ getDefaultDenormsAreZeroForTarget(Kind));
+ // TODO: Check standard C++ flags?
+ bool FiniteOnly = false;
+ bool UnsafeMathOpt = false;
+ bool FastRelaxedMath = false;
+ bool CorrectSqrt = true;
+ bool Wave64 = isWave64(DriverArgs, Kind);
+
+ // Add the HIP specific bitcode library.
+ CC1Args.push_back("-mlink-builtin-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(RocmInstallation.getHIPPath()));
+
+ // Add the generic set of libraries.
+ RocmInstallation.addCommonBitcodeLibCC1Args(
+ DriverArgs, CC1Args, LibDeviceFile, Wave64, DAZ, FiniteOnly,
+ UnsafeMathOpt, FastRelaxedMath, CorrectSqrt);
}
- for (auto Lib : BCLibs)
- addBCLib(getDriver(), DriverArgs, CC1Args, LibraryPaths, Lib);
}
llvm::opt::DerivedArgList *
@@ -388,42 +337,12 @@ HIPToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
const OptTable &Opts = getDriver().getOpts();
for (Arg *A : Args) {
- if (A->getOption().matches(options::OPT_Xarch__)) {
- // Skip this argument unless the architecture matches BoundArch.
- if (BoundArch.empty() || A->getValue(0) != BoundArch)
- continue;
-
- unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
- unsigned Prev = Index;
- std::unique_ptr<Arg> XarchArg(Opts.ParseOneArg(Args, Index));
-
- // If the argument parsing failed or more than one argument was
- // consumed, the -Xarch_ argument's parameter tried to consume
- // extra arguments. Emit an error and ignore.
- //
- // We also want to disallow any options which would alter the
- // driver behavior; that isn't going to work in our model. We
- // use isDriverOption() as an approximation, although things
- // like -O4 are going to slip through.
- if (!XarchArg || Index > Prev + 1) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
- << A->getAsString(Args);
- continue;
- } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
- getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
- << A->getAsString(Args);
- continue;
- }
- XarchArg->setBaseArg(A);
- A = XarchArg.release();
- DAL->AddSynthesizedArg(A);
- }
DAL->append(A);
}
if (!BoundArch.empty()) {
- DAL->eraseArg(options::OPT_march_EQ);
- DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), BoundArch);
+ DAL->eraseArg(options::OPT_mcpu_EQ);
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_mcpu_EQ), BoundArch);
}
return DAL;
@@ -458,6 +377,11 @@ void HIPToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
}
+void HIPToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
SanitizerMask HIPToolChain::getSupportedSanitizers() const {
// The HIPToolChain only supports sanitizers in the sense that it allows
// sanitizer arguments on the command line if they are supported by the host
diff --git a/clang/lib/Driver/ToolChains/HIP.h b/clang/lib/Driver/ToolChains/HIP.h
index c4f944e458bf..5e2be7138579 100644
--- a/clang/lib/Driver/ToolChains/HIP.h
+++ b/clang/lib/Driver/ToolChains/HIP.h
@@ -11,6 +11,7 @@
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Tool.h"
+#include "AMDGPU.h"
namespace clang {
namespace driver {
@@ -37,34 +38,17 @@ public:
const char *LinkingOutput) const override;
private:
- /// \return llvm-link output file name.
- const char *constructLLVMLinkCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix) const;
-
- /// \return opt output file name.
- const char *constructOptCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix,
- const char *InputFileName) const;
-
- /// \return llc output file name.
- const char *constructLlcCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix,
- const char *InputFileName,
- bool OutputIsAsm = false) const;
void constructLldCommand(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs, const InputInfo &Output,
- const llvm::opt::ArgList &Args,
- const char *InputFileName) const;
+ const llvm::opt::ArgList &Args) const;
+
+ // Construct command for creating Object from HIP fatbin.
+ void constructGenerateObjFileFromHIPFatBinary(Compilation &C,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ const JobAction &JA) const;
};
} // end namespace AMDGCN
@@ -72,7 +56,7 @@ private:
namespace toolchains {
-class LLVM_LIBRARY_VISIBILITY HIPToolChain : public ToolChain {
+class LLVM_LIBRARY_VISIBILITY HIPToolChain final : public ROCMToolChain {
public:
HIPToolChain(const Driver &D, const llvm::Triple &Triple,
const ToolChain &HostTC, const llvm::opt::ArgList &Args);
@@ -106,6 +90,8 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
SanitizerMask getSupportedSanitizers() const override;
diff --git a/clang/lib/Driver/ToolChains/Hexagon.cpp b/clang/lib/Driver/ToolChains/Hexagon.cpp
index e4d9ea8a70f9..775f6e1094fa 100644
--- a/clang/lib/Driver/ToolChains/Hexagon.cpp
+++ b/clang/lib/Driver/ToolChains/Hexagon.cpp
@@ -31,7 +31,6 @@ static StringRef getDefaultHvxLength(StringRef Cpu) {
.Case("v60", "64b")
.Case("v62", "64b")
.Case("v65", "64b")
- .Case("v66", "128b")
.Default("128b");
}
@@ -48,13 +47,12 @@ static void handleHVXWarnings(const Driver &D, const ArgList &Args) {
// Handle hvx target features explicitly.
static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
std::vector<StringRef> &Features,
- bool &HasHVX) {
+ StringRef Cpu, bool &HasHVX) {
// Handle HVX warnings.
handleHVXWarnings(D, Args);
// Add the +hvx* features based on commandline flags.
StringRef HVXFeature, HVXLength;
- StringRef Cpu(toolchains::HexagonToolChain::GetTargetCPUVersion(Args));
// Handle -mhvx, -mhvx=, -mno-hvx.
if (Arg *A = Args.getLastArg(options::OPT_mno_hexagon_hvx,
@@ -108,7 +106,15 @@ void hexagon::getHexagonTargetFeatures(const Driver &D, const ArgList &Args,
Features.push_back(UseLongCalls ? "+long-calls" : "-long-calls");
bool HasHVX = false;
- handleHVXTargetFeatures(D, Args, Features, HasHVX);
+ StringRef Cpu(toolchains::HexagonToolChain::GetTargetCPUVersion(Args));
+ // 't' in Cpu denotes tiny-core micro-architecture. For now, the co-processors
+ // have no dependency on micro-architecture.
+ const bool TinyCore = Cpu.contains('t');
+
+ if (TinyCore)
+ Cpu = Cpu.take_front(Cpu.size() - 1);
+
+ handleHVXTargetFeatures(D, Args, Features, Cpu, HasHVX);
if (HexagonToolChain::isAutoHVXEnabled(Args) && !HasHVX)
D.Diag(diag::warn_drv_vectorize_needs_hvx);
@@ -183,7 +189,8 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
}
auto *Exec = Args.MakeArgString(HTC.GetProgramPath(AsName));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void hexagon::Linker::RenderExtraToolArgs(const JobAction &JA,
@@ -258,18 +265,43 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
UseG0 = G.getValue() == 0;
}
- //----------------------------------------------------------------------------
- //
- //----------------------------------------------------------------------------
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
+ if (HTC.getTriple().isMusl()) {
+ if (!Args.hasArg(options::OPT_shared, options::OPT_static))
+ CmdArgs.push_back("-dynamic-linker=/lib/ld-musl-hexagon.so.1");
+
+ if (!Args.hasArg(options::OPT_shared, options::OPT_nostartfiles,
+ options::OPT_nostdlib))
+ CmdArgs.push_back(Args.MakeArgString(D.SysRoot + "/usr/lib/crt1.o"));
+ else if (Args.hasArg(options::OPT_shared) &&
+ !Args.hasArg(options::OPT_nostartfiles, options::OPT_nostdlib))
+ CmdArgs.push_back(Args.MakeArgString(D.SysRoot + "/usr/lib/crti.o"));
+
+ CmdArgs.push_back(
+ Args.MakeArgString(StringRef("-L") + D.SysRoot + "/usr/lib"));
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_e, options::OPT_s,
+ options::OPT_t, options::OPT_u_Group});
+ AddLinkerInputs(HTC, Inputs, Args, CmdArgs, JA);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ CmdArgs.push_back("-lclang_rt.builtins-hexagon");
+ CmdArgs.push_back("-lc");
+ }
+ if (D.CCCIsCXX()) {
+ if (HTC.ShouldLinkCXXStdlib(Args))
+ HTC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ }
+ return;
+ }
+
//----------------------------------------------------------------------------
// moslib
//----------------------------------------------------------------------------
std::vector<std::string> OsLibs;
bool HasStandalone = false;
-
for (const Arg *A : Args.filtered(options::OPT_moslib_EQ)) {
A->claim();
OsLibs.emplace_back(A->getValue());
@@ -375,7 +407,8 @@ void hexagon::Linker::ConstructJob(Compilation &C, const JobAction &JA,
LinkingOutput);
const char *Exec = Args.MakeArgString(HTC.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
// Hexagon tools end.
@@ -481,6 +514,22 @@ HexagonToolChain::HexagonToolChain(const Driver &D, const llvm::Triple &Triple,
HexagonToolChain::~HexagonToolChain() {}
+void HexagonToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CXXStdlibType Type = GetCXXStdlibType(Args);
+ switch (Type) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lunwind");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+}
+
Tool *HexagonToolChain::buildAssembler() const {
return new tools::hexagon::Assembler(*this);
}
@@ -517,6 +566,14 @@ unsigned HexagonToolChain::getOptimizationLevel(
void HexagonToolChain::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args,
Action::OffloadKind) const {
+
+ bool UseInitArrayDefault = getTriple().isMusl();
+
+ if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
+ options::OPT_fno_use_init_array,
+ UseInitArrayDefault))
+ CC1Args.push_back("-fno-use-init-array");
+
if (DriverArgs.hasArg(options::OPT_ffixed_r19)) {
CC1Args.push_back("-target-feature");
CC1Args.push_back("+reserved-r19");
@@ -534,12 +591,37 @@ void HexagonToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
const Driver &D = getDriver();
+ if (!D.SysRoot.empty()) {
+ SmallString<128> P(D.SysRoot);
+ if (getTriple().isMusl())
+ llvm::sys::path::append(P, "usr/include");
+ else
+ llvm::sys::path::append(P, "include");
+ addExternCSystemInclude(DriverArgs, CC1Args, P.str());
+ return;
+ }
+
std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
D.PrefixDirs);
addExternCSystemInclude(DriverArgs, CC1Args, TargetDir + "/hexagon/include");
}
-
+void HexagonToolChain::addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ if (!D.SysRoot.empty() && getTriple().isMusl())
+ addLibStdCXXIncludePaths(D.SysRoot + "/usr/include/c++/v1", "", "", "", "",
+ "", DriverArgs, CC1Args);
+ else if (getTriple().isMusl())
+ addLibStdCXXIncludePaths("/usr/include/c++/v1", "", "", "", "", "",
+ DriverArgs, CC1Args);
+ else {
+ std::string TargetDir = getHexagonTargetDir(D.InstalledDir, D.PrefixDirs);
+ addLibStdCXXIncludePaths(TargetDir, "/hexagon/include/c++/v1", "", "", "",
+ "", DriverArgs, CC1Args);
+ }
+}
void HexagonToolChain::addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
@@ -552,14 +634,22 @@ void HexagonToolChain::addLibStdCxxIncludePaths(
ToolChain::CXXStdlibType
HexagonToolChain::GetCXXStdlibType(const ArgList &Args) const {
Arg *A = Args.getLastArg(options::OPT_stdlib_EQ);
- if (!A)
- return ToolChain::CST_Libstdcxx;
-
+ if (!A) {
+ if (getTriple().isMusl())
+ return ToolChain::CST_Libcxx;
+ else
+ return ToolChain::CST_Libstdcxx;
+ }
StringRef Value = A->getValue();
- if (Value != "libstdc++")
+ if (Value != "libstdc++" && Value != "libc++")
getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
- return ToolChain::CST_Libstdcxx;
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ else if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+ else
+ return ToolChain::CST_Libstdcxx;
}
bool HexagonToolChain::isAutoHVXEnabled(const llvm::opt::ArgList &Args) {
diff --git a/clang/lib/Driver/ToolChains/Hexagon.h b/clang/lib/Driver/ToolChains/Hexagon.h
index d7b4a13d3a4f..c32cb7f09591 100644
--- a/clang/lib/Driver/ToolChains/Hexagon.h
+++ b/clang/lib/Driver/ToolChains/Hexagon.h
@@ -20,10 +20,10 @@ namespace hexagon {
// For Hexagon, we do not need to instantiate tools for PreProcess, PreCompile
// and Compile.
// We simply use "clang -cc1" for those actions.
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
Assembler(const ToolChain &TC)
- : GnuTool("hexagon::Assembler", "hexagon-as", TC) {}
+ : Tool("hexagon::Assembler", "hexagon-as", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -35,9 +35,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("hexagon::Linker", "hexagon-ld", TC) {}
+ Linker(const ToolChain &TC) : Tool("hexagon::Linker", "hexagon-ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -81,10 +81,18 @@ public:
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- const char *getDefaultLinker() const override { return "hexagon-link"; }
+ void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ const char *getDefaultLinker() const override {
+ return getTriple().isMusl() ? "ld.lld" : "hexagon-link";
+ }
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
bool IsIntegratedAssemblerDefault() const override {
return true;
diff --git a/clang/lib/Driver/ToolChains/Hurd.cpp b/clang/lib/Driver/ToolChains/Hurd.cpp
index 72166ca9f359..a700d7b9064c 100644
--- a/clang/lib/Driver/ToolChains/Hurd.cpp
+++ b/clang/lib/Driver/ToolChains/Hurd.cpp
@@ -61,21 +61,35 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
return Triple.isArch32Bit() ? "lib" : "lib64";
}
-Hurd::Hurd(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
+Hurd::Hurd(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
+ GCCInstallation.init(Triple, Args);
+ Multilibs = GCCInstallation.getMultilibs();
+ SelectedMultilib = GCCInstallation.getMultilib();
std::string SysRoot = computeSysRoot();
+ ToolChain::path_list &PPaths = getProgramPaths();
+
+ Generic_GCC::PushPPaths(PPaths);
+
+ // The selection of paths to try here is designed to match the patterns which
+ // the GCC driver itself uses, as this is part of the GCC-compatible driver.
+ // This was determined by running GCC in a fake filesystem, creating all
+ // possible permutations of these directories, and seeing which ones it added
+ // to the link paths.
path_list &Paths = getFilePaths();
- const std::string OSLibDir = getOSLibDir(Triple, Args);
+ const std::string OSLibDir = std::string(getOSLibDir(Triple, Args));
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
#ifdef ENABLE_LINKER_BUILD_ID
ExtraOpts.push_back("--build-id");
#endif
- // If we are currently running Clang inside of the requested system root, add
- // its parent library paths to those searched.
+ Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
+
+ // Similar to the logic for GCC above, if we currently running Clang inside
+ // of the requested system root, add its parent library paths to
+ // those searched.
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
if (StringRef(D.Dir).startswith(SysRoot)) {
@@ -89,8 +103,11 @@ Hurd::Hurd(const Driver &D, const llvm::Triple &Triple,
addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
- // If we are currently running Clang inside of the requested system root, add
- // its parent library path to those searched.
+ Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
+
+ // Similar to the logic for GCC above, if we are currently running Clang
+ // inside of the requested system root, add its parent library path to those
+ // searched.
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
if (StringRef(D.Dir).startswith(SysRoot))
@@ -108,13 +125,6 @@ Tool *Hurd::buildAssembler() const {
return new tools::gnutools::Assembler(*this);
}
-std::string Hurd::computeSysRoot() const {
- if (!getDriver().SysRoot.empty())
- return getDriver().SysRoot;
-
- return std::string();
-}
-
std::string Hurd::getDynamicLinker(const ArgList &Args) const {
if (getArch() == llvm::Triple::x86)
return "/lib/ld.so";
@@ -149,7 +159,7 @@ void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(Dirs, ":");
for (StringRef Dir : Dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(Dir) ? StringRef(SysRoot) : "";
+ llvm::sys::path::is_absolute(Dir) ? "" : StringRef(SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + Dir);
}
return;
@@ -157,6 +167,9 @@ void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Lacking those, try to detect the correct set of system includes for the
// target triple.
+
+ AddMultilibIncludeArgs(DriverArgs, CC1Args);
+
if (getTriple().getArch() == llvm::Triple::x86) {
std::string Path = SysRoot + "/usr/include/i386-gnu";
if (D.getVFS().exists(Path))
@@ -170,3 +183,8 @@ void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
}
+
+void Hurd::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
+ for (const auto &Opt : ExtraOpts)
+ CmdArgs.push_back(Opt.c_str());
+}
diff --git a/clang/lib/Driver/ToolChains/Hurd.h b/clang/lib/Driver/ToolChains/Hurd.h
index 86c6c3f734dd..0612a55280a8 100644
--- a/clang/lib/Driver/ToolChains/Hurd.h
+++ b/clang/lib/Driver/ToolChains/Hurd.h
@@ -27,9 +27,9 @@ public:
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- virtual std::string computeSysRoot() const;
+ std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
- virtual std::string getDynamicLinker(const llvm::opt::ArgList &Args) const;
+ void addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const override;
std::vector<std::string> ExtraOpts;
diff --git a/clang/lib/Driver/ToolChains/InterfaceStubs.cpp b/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
index 8f947e79bd1f..f7c11421e809 100644
--- a/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
+++ b/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
@@ -54,8 +54,9 @@ void Merger::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(InputFilename.c_str()));
}
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Merger),
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(Merger), CmdArgs,
+ Inputs));
}
} // namespace ifstool
} // namespace tools
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index bff1ab1009be..180350476c38 100644
--- a/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
@@ -208,15 +208,6 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
return Triple.isArch32Bit() ? "lib" : "lib64";
}
-static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
- const Multilib &Multilib,
- StringRef InstallPath,
- ToolChain::path_list &Paths) {
- if (const auto &PathsCallback = Multilibs.filePathsCallback())
- for (const auto &Path : PathsCallback(Multilib))
- addPathIfExists(D, InstallPath + Path, Paths);
-}
-
Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
@@ -224,21 +215,9 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
SelectedMultilib = GCCInstallation.getMultilib();
llvm::Triple::ArchType Arch = Triple.getArch();
std::string SysRoot = computeSysRoot();
-
- // Cross-compiling binutils and GCC installations (vanilla and openSUSE at
- // least) put various tools in a triple-prefixed directory off of the parent
- // of the GCC installation. We use the GCC triple here to ensure that we end
- // up with tools that support the same amount of cross compiling as the
- // detected GCC installation. For example, if we find a GCC installation
- // targeting x86_64, but it is a bi-arch GCC installation, it can also be
- // used to target i386.
- // FIXME: This seems unlikely to be Linux-specific.
ToolChain::path_list &PPaths = getProgramPaths();
- if (GCCInstallation.isValid()) {
- PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
- GCCInstallation.getTriple().str() + "/bin")
- .str());
- }
+
+ Generic_GCC::PushPPaths(PPaths);
Distro Distro(D.getVFS(), Triple);
@@ -253,10 +232,9 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
ExtraOpts.push_back("relro");
}
- // The lld default page size is too large for Aarch64, which produces much
- // larger .so files and images for arm64 device targets. Use 4KB page size
- // for Android arm64 targets instead.
- if (Triple.isAArch64() && Triple.isAndroid()) {
+ // Android ARM/AArch64 use max-page-size=4096 to reduce VMA usage. Note, lld
+ // from 11 onwards default max-page-size to 65536 for both ARM and AArch64.
+ if ((Triple.isARM() || Triple.isAArch64()) && Triple.isAndroid()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("max-page-size=4096");
}
@@ -314,60 +292,10 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// to the link paths.
path_list &Paths = getFilePaths();
- const std::string OSLibDir = getOSLibDir(Triple, Args);
+ const std::string OSLibDir = std::string(getOSLibDir(Triple, Args));
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
- // Add the multilib suffixed paths where they are available.
- if (GCCInstallation.isValid()) {
- const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
- const std::string &LibPath = GCCInstallation.getParentLibPath();
-
- // Add toolchain / multilib specific file paths.
- addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
- GCCInstallation.getInstallPath(), Paths);
-
- // Sourcery CodeBench MIPS toolchain holds some libraries under
- // a biarch-like suffix of the GCC installation.
- addPathIfExists(
- D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
- Paths);
-
- // GCC cross compiling toolchains will install target libraries which ship
- // as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
- // any part of the GCC installation in
- // <prefix>/<libdir>/gcc/<triple>/<version>. This decision is somewhat
- // debatable, but is the reality today. We need to search this tree even
- // when we have a sysroot somewhere else. It is the responsibility of
- // whomever is doing the cross build targeting a sysroot using a GCC
- // installation that is *not* within the system root to ensure two things:
- //
- // 1) Any DSOs that are linked in from this tree or from the install path
- // above must be present on the system root and found via an
- // appropriate rpath.
- // 2) There must not be libraries installed into
- // <prefix>/<triple>/<libdir> unless they should be preferred over
- // those within the system root.
- //
- // Note that this matches the GCC behavior. See the below comment for where
- // Clang diverges from GCC's behavior.
- addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib/../" +
- OSLibDir + SelectedMultilib.osSuffix(),
- Paths);
-
- // If the GCC installation we found is inside of the sysroot, we want to
- // prefer libraries installed in the parent prefix of the GCC installation.
- // It is important to *not* use these paths when the GCC installation is
- // outside of the system root as that can pick up unintended libraries.
- // This usually happens when there is an external cross compiler on the
- // host system, and a more minimal sysroot available that is the target of
- // the cross. Note that GCC does include some of these directories in some
- // configurations but this seems somewhere between questionable and simply
- // a bug.
- if (StringRef(LibPath).startswith(SysRoot)) {
- addPathIfExists(D, LibPath + "/" + MultiarchTriple, Paths);
- addPathIfExists(D, LibPath + "/../" + OSLibDir, Paths);
- }
- }
+ Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
// Similar to the logic for GCC above, if we currently running Clang inside
// of the requested system root, add its parent library paths to
@@ -411,36 +339,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
addPathIfExists(D, SysRoot + "/usr/" + OSLibDir + "/" + ABIName, Paths);
}
- // Try walking via the GCC triple path in case of biarch or multiarch GCC
- // installations with strange symlinks.
- if (GCCInstallation.isValid()) {
- addPathIfExists(D,
- SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
- "/../../" + OSLibDir,
- Paths);
-
- // Add the 'other' biarch variant path
- Multilib BiarchSibling;
- if (GCCInstallation.getBiarchSibling(BiarchSibling)) {
- addPathIfExists(D, GCCInstallation.getInstallPath() +
- BiarchSibling.gccSuffix(),
- Paths);
- }
-
- // See comments above on the multilib variant for details of why this is
- // included even from outside the sysroot.
- const std::string &LibPath = GCCInstallation.getParentLibPath();
- const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
- const Multilib &Multilib = GCCInstallation.getMultilib();
- addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib" +
- Multilib.osSuffix(),
- Paths);
-
- // See comments above on the multilib variant for details of why this is
- // only included from within the sysroot.
- if (StringRef(LibPath).startswith(SysRoot))
- addPathIfExists(D, LibPath, Paths);
- }
+ Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
// Similar to the logic for GCC above, if we are currently running Clang
// inside of the requested system root, add its parent library path to those
@@ -464,6 +363,10 @@ bool Linux::HasNativeLLVMSupport() const { return true; }
Tool *Linux::buildLinker() const { return new tools::gnutools::Linker(*this); }
+Tool *Linux::buildStaticLibTool() const {
+ return new tools::gnutools::StaticLibTool(*this);
+}
+
Tool *Linux::buildAssembler() const {
return new tools::gnutools::Assembler(*this);
}
@@ -638,6 +541,8 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
Loader = X32 ? "ld-linux-x32.so.2" : "ld-linux-x86-64.so.2";
break;
}
+ case llvm::Triple::ve:
+ return "/opt/nec/ve/lib/ld-linux-ve.so.1";
}
if (Distro == Distro::Exherbo &&
@@ -674,7 +579,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(dirs, ":");
for (StringRef dir : dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? StringRef(SysRoot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : StringRef(SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
return;
@@ -683,15 +588,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Lacking those, try to detect the correct set of system includes for the
// target triple.
- // Add include directories specific to the selected multilib set and multilib.
- if (GCCInstallation.isValid()) {
- const auto &Callback = Multilibs.includeDirsCallback();
- if (Callback) {
- for (const auto &Path : Callback(GCCInstallation.getMultilib()))
- addExternCSystemIncludeIfExists(
- DriverArgs, CC1Args, GCCInstallation.getInstallPath() + Path);
- }
- }
+ AddMultilibIncludeArgs(DriverArgs, CC1Args);
// Implement generic Debian multiarch support.
const StringRef X86_64MultiarchIncludeDirs[] = {
@@ -906,6 +803,11 @@ void Linux::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void Linux::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
void Linux::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (GCCInstallation.isValid()) {
@@ -944,6 +846,7 @@ SanitizerMask Linux::getSupportedSanitizers() const {
getTriple().getArch() == llvm::Triple::thumb ||
getTriple().getArch() == llvm::Triple::armeb ||
getTriple().getArch() == llvm::Triple::thumbeb;
+ const bool IsSystemZ = getTriple().getArch() == llvm::Triple::systemz;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
@@ -956,7 +859,8 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::SafeStack;
if (IsX86_64 || IsMIPS64 || IsAArch64)
Res |= SanitizerKind::DataFlow;
- if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsArmArch || IsPowerPC64)
+ if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsArmArch || IsPowerPC64 ||
+ IsSystemZ)
Res |= SanitizerKind::Leak;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64)
Res |= SanitizerKind::Thread;
@@ -976,13 +880,34 @@ SanitizerMask Linux::getSupportedSanitizers() const {
void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
- if (!needsProfileRT(Args)) return;
-
- // Add linker option -u__llvm_runtime_variable to cause runtime
+ // Add linker option -u__llvm_profile_runtime to cause runtime
// initialization module to be linked in.
- if ((!Args.hasArg(options::OPT_coverage)) &&
- (!Args.hasArg(options::OPT_ftest_coverage)))
+ if (needsProfileRT(Args))
CmdArgs.push_back(Args.MakeArgString(
Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
ToolChain::addProfileRTLibs(Args, CmdArgs);
}
+
+llvm::DenormalMode
+Linux::getDefaultDenormalModeForType(const llvm::opt::ArgList &DriverArgs,
+ const JobAction &JA,
+ const llvm::fltSemantics *FPType) const {
+ switch (getTriple().getArch()) {
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64: {
+ std::string Unused;
+ // DAZ and FTZ are turned on in crtfastmath.o
+ if (!DriverArgs.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
+ isFastMathRuntimeAvailable(DriverArgs, Unused))
+ return llvm::DenormalMode::getPreserveSign();
+ return llvm::DenormalMode::getIEEE();
+ }
+ default:
+ return llvm::DenormalMode::getIEEE();
+ }
+}
+
+void Linux::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
+ for (const auto &Opt : ExtraOpts)
+ CmdArgs.push_back(Opt.c_str());
+}
diff --git a/clang/lib/Driver/ToolChains/Linux.h b/clang/lib/Driver/ToolChains/Linux.h
index f5518eac218a..6b16b0e64990 100644
--- a/clang/lib/Driver/ToolChains/Linux.h
+++ b/clang/lib/Driver/ToolChains/Linux.h
@@ -31,6 +31,8 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
CXXStdlibType GetDefaultCXXStdlibType() const override;
@@ -40,15 +42,22 @@ public:
SanitizerMask getSupportedSanitizers() const override;
void addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
- virtual std::string computeSysRoot() const;
+ std::string computeSysRoot() const override;
- virtual std::string getDynamicLinker(const llvm::opt::ArgList &Args) const;
+ std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
+
+ void addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const override;
std::vector<std::string> ExtraOpts;
+ llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType = nullptr) const override;
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
+ Tool *buildStaticLibTool() const override;
std::string getMultiarchTriple(const Driver &D,
const llvm::Triple &TargetTriple,
diff --git a/clang/lib/Driver/ToolChains/MSP430.cpp b/clang/lib/Driver/ToolChains/MSP430.cpp
index bc77f015915d..b0bc2e014b48 100644
--- a/clang/lib/Driver/ToolChains/MSP430.cpp
+++ b/clang/lib/Driver/ToolChains/MSP430.cpp
@@ -143,7 +143,7 @@ std::string MSP430ToolChain::computeSysRoot() const {
else
llvm::sys::path::append(Dir, getDriver().Dir, "..", getTriple().str());
- return Dir.str();
+ return std::string(Dir.str());
}
void MSP430ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
@@ -227,6 +227,7 @@ void msp430::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Linker), CmdArgs, Inputs));
}
diff --git a/clang/lib/Driver/ToolChains/MSP430.h b/clang/lib/Driver/ToolChains/MSP430.h
index b5308a8dd687..58fd158cd12f 100644
--- a/clang/lib/Driver/ToolChains/MSP430.h
+++ b/clang/lib/Driver/ToolChains/MSP430.h
@@ -44,7 +44,7 @@ protected:
Tool *buildLinker() const override;
private:
- std::string computeSysRoot() const;
+ std::string computeSysRoot() const override;
};
} // end namespace toolchains
@@ -52,10 +52,9 @@ private:
namespace tools {
namespace msp430 {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : GnuTool("MSP430::Linker", "msp430-elf-ld", TC) {}
+ Linker(const ToolChain &TC) : Tool("MSP430::Linker", "msp430-elf-ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
diff --git a/clang/lib/Driver/ToolChains/MSVC.cpp b/clang/lib/Driver/ToolChains/MSVC.cpp
index 4e143f6a5d3f..6b3c00e2ab6d 100644
--- a/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -128,13 +128,13 @@ static bool findVCToolChainViaEnvironment(std::string &Path,
llvm::StringRef ParentPath = llvm::sys::path::parent_path(TestPath);
llvm::StringRef ParentFilename = llvm::sys::path::filename(ParentPath);
if (ParentFilename == "VC") {
- Path = ParentPath;
+ Path = std::string(ParentPath);
VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
return true;
}
if (ParentFilename == "x86ret" || ParentFilename == "x86chk"
|| ParentFilename == "amd64ret" || ParentFilename == "amd64chk") {
- Path = ParentPath;
+ Path = std::string(ParentPath);
VSLayout = MSVCToolChain::ToolsetLayout::DevDivInternal;
return true;
}
@@ -163,7 +163,7 @@ static bool findVCToolChainViaEnvironment(std::string &Path,
for (int i = 0; i < 3; ++i)
ToolChainPath = llvm::sys::path::parent_path(ToolChainPath);
- Path = ToolChainPath;
+ Path = std::string(ToolChainPath);
VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
return true;
}
@@ -261,7 +261,7 @@ static bool findVCToolChainViaSetupConfig(std::string &Path,
if (!llvm::sys::fs::is_directory(ToolchainPath))
return false;
- Path = ToolchainPath.str();
+ Path = std::string(ToolchainPath.str());
VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
return true;
#endif
@@ -282,7 +282,7 @@ static bool findVCToolChainViaRegistry(std::string &Path,
VSInstallPath.c_str(), VSInstallPath.find(R"(\Common7\IDE)")));
llvm::sys::path::append(VCPath, "VC");
- Path = VCPath.str();
+ Path = std::string(VCPath.str());
VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
return true;
}
@@ -300,7 +300,8 @@ static std::string FindVisualStudioExecutable(const ToolChain &TC,
SmallString<128> FilePath(MSVC.getSubDirectoryPath(
toolchains::MSVCToolChain::SubDirectoryType::Bin));
llvm::sys::path::append(FilePath, Exe);
- return llvm::sys::fs::can_execute(FilePath) ? FilePath.str() : Exe;
+ return std::string(llvm::sys::fs::can_execute(FilePath) ? FilePath.str()
+ : Exe);
}
void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -349,6 +350,16 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(std::string("-libpath:") + WindowsSdkLibPath));
}
+ // Add the compiler-rt library directories to libpath if they exist to help
+ // the linker find the various sanitizer, builtin, and profiling runtimes.
+ for (const auto &LibPath : TC.getLibraryPaths()) {
+ if (TC.getVFS().exists(LibPath))
+ CmdArgs.push_back(Args.MakeArgString("-libpath:" + LibPath));
+ }
+ auto CRTPath = TC.getCompilerRTPath();
+ if (TC.getVFS().exists(CRTPath))
+ CmdArgs.push_back(Args.MakeArgString("-libpath:" + CRTPath));
+
if (!C.getDriver().IsCLMode() && Args.hasArg(options::OPT_L))
for (const auto &LibPath : Args.getAllArgValues(options::OPT_L))
CmdArgs.push_back(Args.MakeArgString("-libpath:" + LibPath));
@@ -581,8 +592,9 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
linkPath = TC.GetProgramPath(Linker.str().c_str());
}
- auto LinkCmd = std::make_unique<Command>(
- JA, *this, Args.MakeArgString(linkPath), CmdArgs, Inputs);
+ auto LinkCmd =
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileUTF16(),
+ Args.MakeArgString(linkPath), CmdArgs, Inputs);
if (!Environment.empty())
LinkCmd->setEnvironment(Environment);
C.addCommand(std::move(LinkCmd));
@@ -722,13 +734,15 @@ std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
CmdArgs.push_back(Fo);
std::string Exec = FindVisualStudioExecutable(getToolChain(), "cl.exe");
- return std::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
- CmdArgs, Inputs);
+ return std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF16(),
+ Args.MakeArgString(Exec), CmdArgs, Inputs);
}
MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args) {
+ : ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args),
+ RocmInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
@@ -786,8 +800,14 @@ void MSVCToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void MSVCToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
void MSVCToolChain::printVerboseInfo(raw_ostream &OS) const {
CudaInstallation.print(OS);
+ RocmInstallation.print(OS);
}
// Windows SDKs and VC Toolchains group their contents into subdirectories based
@@ -892,7 +912,7 @@ MSVCToolChain::getSubDirectoryPath(SubDirectoryType Type,
llvm::sys::path::append(Path, "lib", SubdirName);
break;
}
- return Path.str();
+ return std::string(Path.str());
}
#ifdef _WIN32
@@ -1046,7 +1066,7 @@ static bool getWindows10SDKVersionFromPath(const std::string &SDKPath,
if (!CandidateName.startswith("10."))
continue;
if (CandidateName > SDKVersion)
- SDKVersion = CandidateName;
+ SDKVersion = std::string(CandidateName);
}
return !SDKVersion.empty();
@@ -1129,7 +1149,7 @@ bool MSVCToolChain::getWindowsSDKLibraryPath(std::string &path) const {
}
}
- path = libPath.str();
+ path = std::string(libPath.str());
return true;
}
@@ -1168,7 +1188,7 @@ bool MSVCToolChain::getUniversalCRTLibraryPath(std::string &Path) const {
llvm::SmallString<128> LibPath(UniversalCRTSdkPath);
llvm::sys::path::append(LibPath, "Lib", UCRTVersion, "ucrt", ArchName);
- Path = LibPath.str();
+ Path = std::string(LibPath.str());
return true;
}
@@ -1475,14 +1495,15 @@ static void TranslateDArg(Arg *A, llvm::opt::DerivedArgList &DAL,
return;
}
- std::string NewVal = Val;
+ std::string NewVal = std::string(Val);
NewVal[Hash] = '=';
DAL.AddJoinedArg(A, Opts.getOption(options::OPT_D), NewVal);
}
llvm::opt::DerivedArgList *
MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
- StringRef BoundArch, Action::OffloadKind) const {
+ StringRef BoundArch,
+ Action::OffloadKind OFK) const {
DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
const OptTable &Opts = getDriver().getOpts();
@@ -1521,7 +1542,8 @@ MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
} else if (A->getOption().matches(options::OPT_D)) {
// Translate -Dfoo#bar into -Dfoo=bar.
TranslateDArg(A, *DAL, Opts);
- } else {
+ } else if (OFK != Action::OFK_HIP) {
+ // HIP Toolchain translates input args by itself.
DAL->append(A);
}
}
diff --git a/clang/lib/Driver/ToolChains/MSVC.h b/clang/lib/Driver/ToolChains/MSVC.h
index 41a69a82fecf..dba99ed77246 100644
--- a/clang/lib/Driver/ToolChains/MSVC.h
+++ b/clang/lib/Driver/ToolChains/MSVC.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSVC_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSVC_H
+#include "AMDGPU.h"
#include "Cuda.h"
#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Driver/Compilation.h"
@@ -23,9 +24,7 @@ namespace tools {
namespace visualstudio {
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : Tool("visualstudio::Linker", "linker", TC, RF_Full,
- llvm::sys::WEM_UTF16) {}
+ Linker(const ToolChain &TC) : Tool("visualstudio::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -39,8 +38,7 @@ public:
class LLVM_LIBRARY_VISIBILITY Compiler : public Tool {
public:
Compiler(const ToolChain &TC)
- : Tool("visualstudio::Compiler", "compiler", TC, RF_Full,
- llvm::sys::WEM_UTF16) {}
+ : Tool("visualstudio::Compiler", "compiler", TC) {}
bool hasIntegratedAssembler() const override { return true; }
bool hasIntegratedCPP() const override { return true; }
@@ -125,6 +123,9 @@ public:
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
bool getWindowsSDKLibraryPath(std::string &path) const;
/// Check if Universal CRT should be used if available
bool getUniversalCRTLibraryPath(std::string &path) const;
@@ -155,6 +156,7 @@ private:
std::string VCToolChainPath;
ToolsetLayout VSLayout = ToolsetLayout::OlderVS;
CudaInstallationDetector CudaInstallation;
+ RocmInstallationDetector RocmInstallation;
};
} // end namespace toolchains
diff --git a/clang/lib/Driver/ToolChains/MinGW.cpp b/clang/lib/Driver/ToolChains/MinGW.cpp
index 8f24384e688b..a1a1b413fb6c 100644
--- a/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -18,6 +18,7 @@
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
using namespace clang::diag;
@@ -49,7 +50,8 @@ void tools::MinGW::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
if (Args.hasArg(options::OPT_gsplit_dwarf))
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
@@ -198,6 +200,17 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
TC.AddFilePathLibArgs(Args, CmdArgs);
+
+ // Add the compiler-rt library directories if they exist to help
+ // the linker find the various sanitizer, builtin, and profiling runtimes.
+ for (const auto &LibPath : TC.getLibraryPaths()) {
+ if (TC.getVFS().exists(LibPath))
+ CmdArgs.push_back(Args.MakeArgString("-L" + LibPath));
+ }
+ auto CRTPath = TC.getCompilerRTPath();
+ if (TC.getVFS().exists(CRTPath))
+ CmdArgs.push_back(Args.MakeArgString("-L" + CRTPath));
+
AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
// TODO: Add profile stuff here
@@ -292,21 +305,25 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lkernel32");
}
- if (Args.hasArg(options::OPT_static))
+ if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("--end-group");
- else
+ } else {
AddLibGCC(Args, CmdArgs);
+ if (!HasWindowsApp)
+ CmdArgs.push_back("-lkernel32");
+ }
}
if (!Args.hasArg(options::OPT_nostartfiles)) {
// Add crtfastmath.o if available and fast math is enabled.
- TC.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
+ TC.addFastMathRuntimeIfAvailable(Args, CmdArgs);
CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtend.o")));
}
}
const char *Exec = Args.MakeArgString(TC.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
// Simplified from Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple.
@@ -323,7 +340,7 @@ static bool findGccVersion(StringRef LibDir, std::string &GccLibDir,
continue;
if (CandidateVersion <= Version)
continue;
- Ver = VersionText;
+ Ver = std::string(VersionText);
GccLibDir = LI->path();
}
return Ver.size();
@@ -335,7 +352,7 @@ void toolchains::MinGW::findGccLibDir() {
Archs[0] += "-w64-mingw32";
Archs.emplace_back("mingw32");
if (Arch.empty())
- Arch = Archs[0].str();
+ Arch = std::string(Archs[0].str());
// lib: Arch Linux, Ubuntu, Windows
// lib64: openSUSE Linux
for (StringRef CandidateLib : {"lib", "lib64"}) {
@@ -343,7 +360,7 @@ void toolchains::MinGW::findGccLibDir() {
llvm::SmallString<1024> LibDir(Base);
llvm::sys::path::append(LibDir, CandidateLib, "gcc", CandidateArch);
if (findGccVersion(LibDir, GccLibDir, Ver)) {
- Arch = CandidateArch;
+ Arch = std::string(CandidateArch);
return;
}
}
@@ -372,7 +389,7 @@ llvm::ErrorOr<std::string> toolchains::MinGW::findClangRelativeSysroot() {
StringRef Sep = llvm::sys::path::get_separator();
for (StringRef CandidateSubdir : Subdirs) {
if (llvm::sys::fs::is_directory(ClangRoot + Sep + CandidateSubdir)) {
- Arch = CandidateSubdir;
+ Arch = std::string(CandidateSubdir);
return (ClangRoot + Sep + CandidateSubdir).str();
}
}
@@ -381,7 +398,8 @@ llvm::ErrorOr<std::string> toolchains::MinGW::findClangRelativeSysroot() {
toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args) {
+ : ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args),
+ RocmInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().SysRoot.size())
@@ -389,12 +407,13 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
// Look for <clang-bin>/../<triplet>; if found, use <clang-bin>/.. as the
// base as it could still be a base for a gcc setup with libgcc.
else if (llvm::ErrorOr<std::string> TargetSubdir = findClangRelativeSysroot())
- Base = llvm::sys::path::parent_path(TargetSubdir.get());
+ Base = std::string(llvm::sys::path::parent_path(TargetSubdir.get()));
else if (llvm::ErrorOr<std::string> GPPName = findGcc())
- Base = llvm::sys::path::parent_path(
- llvm::sys::path::parent_path(GPPName.get()));
+ Base = std::string(llvm::sys::path::parent_path(
+ llvm::sys::path::parent_path(GPPName.get())));
else
- Base = llvm::sys::path::parent_path(getDriver().getInstalledDir());
+ Base = std::string(
+ llvm::sys::path::parent_path(getDriver().getInstalledDir()));
Base += llvm::sys::path::get_separator();
findGccLibDir();
@@ -482,8 +501,14 @@ void toolchains::MinGW::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+void toolchains::MinGW::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
void toolchains::MinGW::printVerboseInfo(raw_ostream &OS) const {
CudaInstallation.print(OS);
+ RocmInstallation.print(OS);
}
// Include directories for various hosts:
diff --git a/clang/lib/Driver/ToolChains/MinGW.h b/clang/lib/Driver/ToolChains/MinGW.h
index 6752a405be87..2f1559fcf34c 100644
--- a/clang/lib/Driver/ToolChains/MinGW.h
+++ b/clang/lib/Driver/ToolChains/MinGW.h
@@ -11,8 +11,10 @@
#include "Cuda.h"
#include "Gnu.h"
+#include "ROCm.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/Support/ErrorOr.h"
namespace clang {
namespace driver {
@@ -34,8 +36,7 @@ public:
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC)
- : Tool("MinGW::Linker", "linker", TC, Tool::RF_Full) {}
+ Linker(const ToolChain &TC) : Tool("MinGW::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -81,6 +82,8 @@ public:
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void printVerboseInfo(raw_ostream &OS) const override;
@@ -91,6 +94,7 @@ protected:
private:
CudaInstallationDetector CudaInstallation;
+ RocmInstallationDetector RocmInstallation;
std::string Base;
std::string GccLibDir;
diff --git a/clang/lib/Driver/ToolChains/Minix.cpp b/clang/lib/Driver/ToolChains/Minix.cpp
index 6947049ea52e..d0314795620c 100644
--- a/clang/lib/Driver/ToolChains/Minix.cpp
+++ b/clang/lib/Driver/ToolChains/Minix.cpp
@@ -36,7 +36,8 @@ void tools::minix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -88,7 +89,8 @@ void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
diff --git a/clang/lib/Driver/ToolChains/Minix.h b/clang/lib/Driver/ToolChains/Minix.h
index 1ed6acebab9c..af8d59c5085a 100644
--- a/clang/lib/Driver/ToolChains/Minix.h
+++ b/clang/lib/Driver/ToolChains/Minix.h
@@ -18,10 +18,9 @@ namespace driver {
namespace tools {
/// minix -- Directly call GNU Binutils assembler and linker
namespace minix {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- Assembler(const ToolChain &TC)
- : GnuTool("minix::Assembler", "assembler", TC) {}
+ Assembler(const ToolChain &TC) : Tool("minix::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -31,9 +30,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("minix::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("minix::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/clang/lib/Driver/ToolChains/MipsLinux.cpp b/clang/lib/Driver/ToolChains/MipsLinux.cpp
index cfda7f4bb4df..41b7b839f3b3 100644
--- a/clang/lib/Driver/ToolChains/MipsLinux.cpp
+++ b/clang/lib/Driver/ToolChains/MipsLinux.cpp
@@ -136,5 +136,5 @@ std::string MipsLLVMToolChain::getCompilerRT(const ArgList &Args,
}
llvm::sys::path::append(
Path, Twine("libclang_rt." + Component + "-" + "mips" + Suffix));
- return Path.str();
+ return std::string(Path.str());
}
diff --git a/clang/lib/Driver/ToolChains/Myriad.cpp b/clang/lib/Driver/ToolChains/Myriad.cpp
index 2ce0f13ce3d1..84fe4748b6fa 100644
--- a/clang/lib/Driver/ToolChains/Myriad.cpp
+++ b/clang/lib/Driver/ToolChains/Myriad.cpp
@@ -77,8 +77,9 @@ void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
std::string Exec =
Args.MakeArgString(getToolChain().GetProgramPath("moviCompile"));
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(Exec), CmdArgs,
+ Inputs));
}
void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -112,8 +113,9 @@ void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
std::string Exec =
Args.MakeArgString(getToolChain().GetProgramPath("moviAsm"));
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
- CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(Exec), CmdArgs,
+ Inputs));
}
void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -198,8 +200,9 @@ void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string Exec =
Args.MakeArgString(TC.GetProgramPath("sparc-myriad-rtems-ld"));
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Exec),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Exec), CmdArgs, Inputs));
}
MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
diff --git a/clang/lib/Driver/ToolChains/Myriad.h b/clang/lib/Driver/ToolChains/Myriad.h
index 9f5225fbc62c..cae574bdcfea 100644
--- a/clang/lib/Driver/ToolChains/Myriad.h
+++ b/clang/lib/Driver/ToolChains/Myriad.h
@@ -49,9 +49,9 @@ public:
/// whereas the linker, which accepts code for a mixture of Sparc and SHAVE,
/// is in the Myriad namespace.
namespace Myriad {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("shave::Linker", "ld", TC) {}
+ Linker(const ToolChain &TC) : Tool("shave::Linker", "ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
diff --git a/clang/lib/Driver/ToolChains/NaCl.cpp b/clang/lib/Driver/ToolChains/NaCl.cpp
index 97241c884027..15a773675299 100644
--- a/clang/lib/Driver/ToolChains/NaCl.cpp
+++ b/clang/lib/Driver/ToolChains/NaCl.cpp
@@ -193,7 +193,8 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// NaCl Toolchain
diff --git a/clang/lib/Driver/ToolChains/NaCl.h b/clang/lib/Driver/ToolChains/NaCl.h
index ab243f8087bb..5e5fdb583bb6 100644
--- a/clang/lib/Driver/ToolChains/NaCl.h
+++ b/clang/lib/Driver/ToolChains/NaCl.h
@@ -27,9 +27,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("NaCl::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("NaCl::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/clang/lib/Driver/ToolChains/NetBSD.cpp b/clang/lib/Driver/ToolChains/NetBSD.cpp
index 0100a387d6c3..253ee6ce0f72 100644
--- a/clang/lib/Driver/ToolChains/NetBSD.cpp
+++ b/clang/lib/Driver/ToolChains/NetBSD.cpp
@@ -103,7 +103,8 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString((getToolChain().GetProgramPath("as")));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -337,7 +338,8 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
/// NetBSD - NetBSD tool chain which can call as(1) and ld(1) directly.
diff --git a/clang/lib/Driver/ToolChains/NetBSD.h b/clang/lib/Driver/ToolChains/NetBSD.h
index 6d404263f625..8348554fd149 100644
--- a/clang/lib/Driver/ToolChains/NetBSD.h
+++ b/clang/lib/Driver/ToolChains/NetBSD.h
@@ -19,10 +19,9 @@ namespace tools {
/// netbsd -- Directly call GNU Binutils assembler and linker
namespace netbsd {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- Assembler(const ToolChain &TC)
- : GnuTool("netbsd::Assembler", "assembler", TC) {}
+ Assembler(const ToolChain &TC) : Tool("netbsd::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -32,9 +31,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("netbsd::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("netbsd::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/clang/lib/Driver/ToolChains/OpenBSD.cpp b/clang/lib/Driver/ToolChains/OpenBSD.cpp
index 80343c0394cb..9c1a9c5f8228 100644
--- a/clang/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -89,7 +89,8 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -227,7 +228,8 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs));
}
SanitizerMask OpenBSD::getSupportedSanitizers() const {
diff --git a/clang/lib/Driver/ToolChains/OpenBSD.h b/clang/lib/Driver/ToolChains/OpenBSD.h
index 9f1ee0f66402..897eee57ab68 100644
--- a/clang/lib/Driver/ToolChains/OpenBSD.h
+++ b/clang/lib/Driver/ToolChains/OpenBSD.h
@@ -19,10 +19,10 @@ namespace tools {
/// openbsd -- Directly call GNU Binutils assembler and linker
namespace openbsd {
-class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
Assembler(const ToolChain &TC)
- : GnuTool("openbsd::Assembler", "assembler", TC) {}
+ : Tool("openbsd::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -32,9 +32,9 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("openbsd::Linker", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("openbsd::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
diff --git a/clang/lib/Driver/ToolChains/PS4CPU.cpp b/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 4e8840296205..6dc81899cbaa 100644
--- a/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -30,13 +30,17 @@ void tools::PS4cpu::addProfileRTArgs(const ToolChain &TC, const ArgList &Args,
if ((Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
false) ||
Args.hasFlag(options::OPT_fprofile_generate,
- options::OPT_fno_profile_instr_generate, false) ||
+ options::OPT_fno_profile_generate, false) ||
Args.hasFlag(options::OPT_fprofile_generate_EQ,
- options::OPT_fno_profile_instr_generate, false) ||
+ options::OPT_fno_profile_generate, false) ||
Args.hasFlag(options::OPT_fprofile_instr_generate,
options::OPT_fno_profile_instr_generate, false) ||
Args.hasFlag(options::OPT_fprofile_instr_generate_EQ,
options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fcs_profile_generate,
+ options::OPT_fno_profile_generate, false) ||
+ Args.hasFlag(options::OPT_fcs_profile_generate_EQ,
+ options::OPT_fno_profile_generate, false) ||
Args.hasArg(options::OPT_fcreate_profile) ||
Args.hasArg(options::OPT_coverage)))
CmdArgs.push_back("--dependent-lib=libclang_rt.profile-x86_64.a");
@@ -62,7 +66,8 @@ void tools::PS4cpu::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("orbis-as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
static void AddPS4SanitizerArgs(const ToolChain &TC, ArgStringList &CmdArgs) {
@@ -84,13 +89,13 @@ void tools::PS4cpu::addSanitizerArgs(const ToolChain &TC,
CmdArgs.push_back("--dependent-lib=libSceDbgAddressSanitizer_stub_weak.a");
}
-static void ConstructPS4LinkJob(const Tool &T, Compilation &C,
- const JobAction &JA, const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) {
+void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
const toolchains::FreeBSD &ToolChain =
- static_cast<const toolchains::FreeBSD &>(T.getToolChain());
+ static_cast<const toolchains::FreeBSD &>(getToolChain());
const Driver &D = ToolChain.getDriver();
ArgStringList CmdArgs;
@@ -139,216 +144,16 @@ static void ConstructPS4LinkJob(const Tool &T, Compilation &C,
CmdArgs.push_back("-lpthread");
}
- const char *Exec = Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld"));
-
- C.addCommand(std::make_unique<Command>(JA, T, Exec, CmdArgs, Inputs));
-}
-
-static void ConstructGoldLinkJob(const Tool &T, Compilation &C,
- const JobAction &JA, const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) {
- const toolchains::FreeBSD &ToolChain =
- static_cast<const toolchains::FreeBSD &>(T.getToolChain());
- const Driver &D = ToolChain.getDriver();
- ArgStringList CmdArgs;
-
- // Silence warning for "clang -g foo.o -o foo"
- Args.ClaimAllArgs(options::OPT_g_Group);
- // and "clang -emit-llvm foo.o -o foo"
- Args.ClaimAllArgs(options::OPT_emit_llvm);
- // and for "clang -w foo.o -o foo". Other warning options are already
- // handled somewhere else.
- Args.ClaimAllArgs(options::OPT_w);
-
- if (!D.SysRoot.empty())
- CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
-
- if (Args.hasArg(options::OPT_pie))
- CmdArgs.push_back("-pie");
-
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("-Bstatic");
- } else {
- if (Args.hasArg(options::OPT_rdynamic))
- CmdArgs.push_back("-export-dynamic");
- CmdArgs.push_back("--eh-frame-hdr");
- if (Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back("-Bshareable");
- } else {
- CmdArgs.push_back("-dynamic-linker");
- CmdArgs.push_back("/libexec/ld-elf.so.1");
- }
- CmdArgs.push_back("--enable-new-dtags");
- }
-
- if (Output.isFilename()) {
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
- }
-
- if(!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
- AddPS4SanitizerArgs(ToolChain, CmdArgs);
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- const char *crt1 = nullptr;
- if (!Args.hasArg(options::OPT_shared)) {
- if (Args.hasArg(options::OPT_pg))
- crt1 = "gcrt1.o";
- else if (Args.hasArg(options::OPT_pie))
- crt1 = "Scrt1.o";
- else
- crt1 = "crt1.o";
- }
- if (crt1)
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
-
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
-
- const char *crtbegin = nullptr;
- if (Args.hasArg(options::OPT_static))
- crtbegin = "crtbeginT.o";
- else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- crtbegin = "crtbeginS.o";
- else
- crtbegin = "crtbegin.o";
-
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
- }
-
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_s);
- Args.AddAllArgs(CmdArgs, options::OPT_t);
- Args.AddAllArgs(CmdArgs, options::OPT_r);
-
- if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
- CmdArgs.push_back("--no-demangle");
-
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- // For PS4, we always want to pass libm, libstdc++ and libkernel
- // libraries for both C and C++ compilations.
- CmdArgs.push_back("-lkernel");
- if (D.CCCIsCXX()) {
- if (ToolChain.ShouldLinkCXXStdlib(Args))
- ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
- if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back("-lm_p");
- else
- CmdArgs.push_back("-lm");
- }
- // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
- // the default system libraries. Just mimic this for now.
- if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back("-lgcc_p");
- else
- CmdArgs.push_back("-lcompiler_rt");
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("-lstdc++");
- } else if (Args.hasArg(options::OPT_pg)) {
- CmdArgs.push_back("-lgcc_eh_p");
- } else {
- CmdArgs.push_back("--as-needed");
- CmdArgs.push_back("-lstdc++");
- CmdArgs.push_back("--no-as-needed");
- }
-
- if (Args.hasArg(options::OPT_pthread)) {
- if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back("-lpthread_p");
- else
- CmdArgs.push_back("-lpthread");
- }
-
- if (Args.hasArg(options::OPT_pg)) {
- if (Args.hasArg(options::OPT_shared))
- CmdArgs.push_back("-lc");
- else {
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("--start-group");
- CmdArgs.push_back("-lc_p");
- CmdArgs.push_back("-lpthread_p");
- CmdArgs.push_back("--end-group");
- } else {
- CmdArgs.push_back("-lc_p");
- }
- }
- CmdArgs.push_back("-lgcc_p");
- } else {
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("--start-group");
- CmdArgs.push_back("-lc");
- CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("--end-group");
- } else {
- CmdArgs.push_back("-lc");
- }
- CmdArgs.push_back("-lcompiler_rt");
- }
-
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("-lstdc++");
- } else if (Args.hasArg(options::OPT_pg)) {
- CmdArgs.push_back("-lgcc_eh_p");
- } else {
- CmdArgs.push_back("--as-needed");
- CmdArgs.push_back("-lstdc++");
- CmdArgs.push_back("--no-as-needed");
- }
- }
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
- else
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ if (Args.hasArg(options::OPT_fuse_ld_EQ)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-fuse-ld" << getToolChain().getTriple().str();
}
const char *Exec =
-#ifdef _WIN32
- Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld.gold"));
-#else
Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld"));
-#endif
-
- C.addCommand(std::make_unique<Command>(JA, T, Exec, CmdArgs, Inputs));
-}
-
-void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- const toolchains::FreeBSD &ToolChain =
- static_cast<const toolchains::FreeBSD &>(getToolChain());
- const Driver &D = ToolChain.getDriver();
- bool PS4Linker;
- StringRef LinkerOptName;
- if (const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
- LinkerOptName = A->getValue();
- if (LinkerOptName != "ps4" && LinkerOptName != "gold")
- D.Diag(diag::err_drv_unsupported_linker) << LinkerOptName;
- }
- if (LinkerOptName == "gold")
- PS4Linker = false;
- else if (LinkerOptName == "ps4")
- PS4Linker = true;
- else
- PS4Linker = !Args.hasArg(options::OPT_shared);
-
- if (PS4Linker)
- ConstructPS4LinkJob(*this, C, JA, Output, Inputs, Args, LinkingOutput);
- else
- ConstructGoldLinkJob(*this, C, JA, Output, Inputs, Args, LinkingOutput);
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs));
}
toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
@@ -382,7 +187,7 @@ toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
if (!llvm::sys::fs::exists(PrefixDir))
getDriver().Diag(clang::diag::warn_missing_sysroot) << PrefixDir;
} else
- PrefixDir = PS4SDKDir.str();
+ PrefixDir = std::string(PS4SDKDir.str());
SmallString<512> PS4SDKIncludeDir(PrefixDir);
llvm::sys::path::append(PS4SDKIncludeDir, "target/include");
@@ -407,7 +212,7 @@ toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
<< "PS4 system libraries" << PS4SDKLibDir;
return;
}
- getFilePaths().push_back(PS4SDKLibDir.str());
+ getFilePaths().push_back(std::string(PS4SDKLibDir.str()));
}
Tool *toolchains::PS4CPU::buildAssembler() const {
@@ -430,3 +235,17 @@ SanitizerMask toolchains::PS4CPU::getSupportedSanitizers() const {
Res |= SanitizerKind::Vptr;
return Res;
}
+
+void toolchains::PS4CPU::addClangTargetOptions(
+ const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ // PS4 does not use init arrays.
+ if (DriverArgs.hasArg(options::OPT_fuse_init_array)) {
+ Arg *A = DriverArgs.getLastArg(options::OPT_fuse_init_array);
+ getDriver().Diag(clang::diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(DriverArgs) << getTriple().str();
+ }
+
+ CC1Args.push_back("-fno-use-init-array");
+}
diff --git a/clang/lib/Driver/ToolChains/PS4CPU.h b/clang/lib/Driver/ToolChains/PS4CPU.h
index 18852b2808cb..968be015d411 100644
--- a/clang/lib/Driver/ToolChains/PS4CPU.h
+++ b/clang/lib/Driver/ToolChains/PS4CPU.h
@@ -26,8 +26,7 @@ void addSanitizerArgs(const ToolChain &TC, llvm::opt::ArgStringList &CmdArgs);
class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
public:
- Assemble(const ToolChain &TC)
- : Tool("PS4cpu::Assemble", "assembler", TC, RF_Full) {}
+ Assemble(const ToolChain &TC) : Tool("PS4cpu::Assemble", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -40,7 +39,7 @@ public:
class LLVM_LIBRARY_VISIBILITY Link : public Tool {
public:
- Link(const ToolChain &TC) : Tool("PS4cpu::Link", "linker", TC, RF_Full) {}
+ Link(const ToolChain &TC) : Tool("PS4cpu::Link", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -88,6 +87,20 @@ public:
// capable of unit splitting.
bool canSplitThinLTOUnit() const override { return false; }
+ void addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const override;
+
+ llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType) const override {
+ // DAZ and FTZ are on by default.
+ return llvm::DenormalMode::getPreserveSign();
+ }
+
+ bool useRelaxRelocations() const override { return true; }
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
index ddc329e3c722..cc912d94cb92 100644
--- a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -119,7 +119,7 @@ std::string RISCVToolChain::computeSysRoot() const {
if (!llvm::sys::fs::exists(SysRootDir))
return std::string();
- return SysRootDir.str();
+ return std::string(SysRootDir.str());
}
void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -142,7 +142,7 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("elf32lriscv");
}
- std::string Linker = getToolChain().GetProgramPath(getShortName());
+ std::string Linker = getToolChain().GetLinkerPath();
bool WantCRTs =
!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles);
@@ -191,7 +191,8 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ C.addCommand(
+ std::make_unique<Command>(JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(Linker), CmdArgs, Inputs));
}
// RISCV tools end.
diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.h b/clang/lib/Driver/ToolChains/RISCVToolchain.h
index bb7f64849bcb..4734aee5f1ab 100644
--- a/clang/lib/Driver/ToolChains/RISCVToolchain.h
+++ b/clang/lib/Driver/ToolChains/RISCVToolchain.h
@@ -39,16 +39,16 @@ protected:
Tool *buildLinker() const override;
private:
- std::string computeSysRoot() const;
+ std::string computeSysRoot() const override;
};
} // end namespace toolchains
namespace tools {
namespace RISCV {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const ToolChain &TC) : GnuTool("RISCV::Linker", "ld", TC) {}
+ Linker(const ToolChain &TC) : Tool("RISCV::Linker", "ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
diff --git a/clang/lib/Driver/ToolChains/ROCm.h b/clang/lib/Driver/ToolChains/ROCm.h
new file mode 100644
index 000000000000..962c72fedfe0
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/ROCm.h
@@ -0,0 +1,228 @@
+//===--- ROCm.h - ROCm installation detector --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ROCM_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ROCM_H
+
+#include "clang/Basic/Cuda.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/VersionTuple.h"
+
+namespace clang {
+namespace driver {
+
+/// A class to find a viable ROCM installation
+/// TODO: Generalize to handle libclc.
+class RocmInstallationDetector {
+private:
+ struct ConditionalLibrary {
+ SmallString<0> On;
+ SmallString<0> Off;
+
+ bool isValid() const { return !On.empty() && !Off.empty(); }
+
+ StringRef get(bool Enabled) const {
+ assert(isValid());
+ return Enabled ? On : Off;
+ }
+ };
+
+ // Installation path candidate.
+ struct Candidate {
+ llvm::SmallString<0> Path;
+ bool StrictChecking;
+
+ Candidate(std::string Path, bool StrictChecking = false)
+ : Path(Path), StrictChecking(StrictChecking) {}
+ };
+
+ const Driver &D;
+ bool HasHIPRuntime = false;
+ bool HasDeviceLibrary = false;
+
+ // Default version if not detected or specified.
+ const unsigned DefaultVersionMajor = 3;
+ const unsigned DefaultVersionMinor = 5;
+ const char *DefaultVersionPatch = "0";
+
+ // The version string in Major.Minor.Patch format.
+ std::string DetectedVersion;
+ // Version containing major and minor.
+ llvm::VersionTuple VersionMajorMinor;
+ // Version containing patch.
+ std::string VersionPatch;
+
+ // ROCm path specified by --rocm-path.
+ StringRef RocmPathArg;
+ // ROCm device library paths specified by --rocm-device-lib-path.
+ std::vector<std::string> RocmDeviceLibPathArg;
+ // HIP version specified by --hip-version.
+ StringRef HIPVersionArg;
+ // Wheter -nogpulib is specified.
+ bool NoBuiltinLibs = false;
+
+ // Paths
+ SmallString<0> InstallPath;
+ SmallString<0> BinPath;
+ SmallString<0> LibPath;
+ SmallString<0> LibDevicePath;
+ SmallString<0> IncludePath;
+ llvm::StringMap<std::string> LibDeviceMap;
+
+ // Libraries that are always linked.
+ SmallString<0> OCML;
+ SmallString<0> OCKL;
+
+ // Libraries that are always linked depending on the language
+ SmallString<0> OpenCL;
+ SmallString<0> HIP;
+
+ // Libraries swapped based on compile flags.
+ ConditionalLibrary WavefrontSize64;
+ ConditionalLibrary FiniteOnly;
+ ConditionalLibrary UnsafeMath;
+ ConditionalLibrary DenormalsAreZero;
+ ConditionalLibrary CorrectlyRoundedSqrt;
+
+ bool allGenericLibsValid() const {
+ return !OCML.empty() && !OCKL.empty() && !OpenCL.empty() && !HIP.empty() &&
+ WavefrontSize64.isValid() && FiniteOnly.isValid() &&
+ UnsafeMath.isValid() && DenormalsAreZero.isValid() &&
+ CorrectlyRoundedSqrt.isValid();
+ }
+
+ // GPU architectures for which we have raised an error in
+ // CheckRocmVersionSupportsArch.
+ mutable llvm::SmallSet<CudaArch, 4> ArchsWithBadVersion;
+
+ void scanLibDevicePath(llvm::StringRef Path);
+ void ParseHIPVersionFile(llvm::StringRef V);
+ SmallVector<Candidate, 4> getInstallationPathCandidates();
+
+public:
+ RocmInstallationDetector(const Driver &D, const llvm::Triple &HostTriple,
+ const llvm::opt::ArgList &Args,
+ bool DetectHIPRuntime = true,
+ bool DetectDeviceLib = false);
+
+ /// Add arguments needed to link default bitcode libraries.
+ void addCommonBitcodeLibCC1Args(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ StringRef LibDeviceFile, bool Wave64,
+ bool DAZ, bool FiniteOnly, bool UnsafeMathOpt,
+ bool FastRelaxedMath, bool CorrectSqrt) const;
+
+ /// Emit an error if Version does not support the given Arch.
+ ///
+ /// If either Version or Arch is unknown, does not emit an error. Emits at
+ /// most one error per Arch.
+ void CheckRocmVersionSupportsArch(CudaArch Arch) const;
+
+ /// Check whether we detected a valid HIP runtime.
+ bool hasHIPRuntime() const { return HasHIPRuntime; }
+
+ /// Check whether we detected a valid ROCm device library.
+ bool hasDeviceLibrary() const { return HasDeviceLibrary; }
+
+ /// Print information about the detected ROCm installation.
+ void print(raw_ostream &OS) const;
+
+ /// Get the detected Rocm install's version.
+ // RocmVersion version() const { return Version; }
+
+ /// Get the detected Rocm installation path.
+ StringRef getInstallPath() const { return InstallPath; }
+
+ /// Get the detected path to Rocm's bin directory.
+ // StringRef getBinPath() const { return BinPath; }
+
+ /// Get the detected Rocm Include path.
+ StringRef getIncludePath() const { return IncludePath; }
+
+ /// Get the detected Rocm library path.
+ StringRef getLibPath() const { return LibPath; }
+
+ /// Get the detected Rocm device library path.
+ StringRef getLibDevicePath() const { return LibDevicePath; }
+
+ StringRef getOCMLPath() const {
+ assert(!OCML.empty());
+ return OCML;
+ }
+
+ StringRef getOCKLPath() const {
+ assert(!OCKL.empty());
+ return OCKL;
+ }
+
+ StringRef getOpenCLPath() const {
+ assert(!OpenCL.empty());
+ return OpenCL;
+ }
+
+ StringRef getHIPPath() const {
+ assert(!HIP.empty());
+ return HIP;
+ }
+
+ StringRef getWavefrontSize64Path(bool Enabled) const {
+ return WavefrontSize64.get(Enabled);
+ }
+
+ StringRef getFiniteOnlyPath(bool Enabled) const {
+ return FiniteOnly.get(Enabled);
+ }
+
+ StringRef getUnsafeMathPath(bool Enabled) const {
+ return UnsafeMath.get(Enabled);
+ }
+
+ StringRef getDenormalsAreZeroPath(bool Enabled) const {
+ return DenormalsAreZero.get(Enabled);
+ }
+
+ StringRef getCorrectlyRoundedSqrtPath(bool Enabled) const {
+ return CorrectlyRoundedSqrt.get(Enabled);
+ }
+
+ /// Get libdevice file for given architecture
+ std::string getLibDeviceFile(StringRef Gpu) const {
+ return LibDeviceMap.lookup(Gpu);
+ }
+
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+
+ void detectDeviceLibrary();
+ void detectHIPRuntime();
+
+ /// Get the values for --rocm-device-lib-path arguments
+ std::vector<std::string> getRocmDeviceLibPathArg() const {
+ return RocmDeviceLibPathArg;
+ }
+
+ /// Get the value for --rocm-path argument
+ StringRef getRocmPathArg() const { return RocmPathArg; }
+
+ /// Get the value for --hip-version argument
+ StringRef getHIPVersionArg() const { return HIPVersionArg; }
+
+ std::string getHIPVersion() const { return DetectedVersion; }
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ROCM_H
diff --git a/clang/lib/Driver/ToolChains/Solaris.cpp b/clang/lib/Driver/ToolChains/Solaris.cpp
index fc4e2cf151ef..b8fdc87478bc 100644
--- a/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -41,7 +41,8 @@ void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -150,7 +151,8 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().addProfileRTLibs(Args, CmdArgs);
const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
static StringRef getSolarisLibSuffix(const llvm::Triple &Triple) {
@@ -244,7 +246,7 @@ void Solaris::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(dirs, ":");
for (StringRef dir : dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : StringRef(D.SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
return;
diff --git a/clang/lib/Driver/ToolChains/VEToolchain.cpp b/clang/lib/Driver/ToolChains/VEToolchain.cpp
new file mode 100644
index 000000000000..6ea405c0269c
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/VEToolchain.cpp
@@ -0,0 +1,119 @@
+//===--- VE.cpp - VE ToolChain Implementations ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "VEToolchain.h"
+#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include <cstdlib> // ::getenv
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+
+/// VE tool chain
+VEToolChain::VEToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Linux(D, Triple, Args) {
+ getProgramPaths().push_back("/opt/nec/ve/bin");
+ // ProgramPaths are found via 'PATH' environment variable.
+
+ // default file paths are:
+ // ${RESOURCEDIR}/lib/linux/ve (== getArchSpecificLibPath)
+ // /lib/../lib64
+ // /usr/lib/../lib64
+ // ${BINPATH}/../lib
+ // /lib
+ // /usr/lib
+ //
+ // These are OK for host, but no go for VE. So, defines them all
+ // from scratch here.
+ getFilePaths().clear();
+ getFilePaths().push_back(getArchSpecificLibPath());
+ getFilePaths().push_back(computeSysRoot() + "/opt/nec/ve/lib");
+}
+
+Tool *VEToolChain::buildAssembler() const {
+ return new tools::gnutools::Assembler(*this);
+}
+
+Tool *VEToolChain::buildLinker() const {
+ return new tools::gnutools::Linker(*this);
+}
+
+bool VEToolChain::isPICDefault() const { return false; }
+
+bool VEToolChain::isPIEDefault() const { return false; }
+
+bool VEToolChain::isPICDefaultForced() const { return false; }
+
+bool VEToolChain::SupportsProfiling() const { return false; }
+
+bool VEToolChain::hasBlocksRuntime() const { return false; }
+
+void VEToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ if (DriverArgs.hasArg(options::OPT_nobuiltininc) &&
+ DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(getDriver().ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
+ if (const char *cl_include_dir = getenv("NCC_C_INCLUDE_PATH")) {
+ SmallVector<StringRef, 4> Dirs;
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ StringRef(cl_include_dir).split(Dirs, StringRef(EnvPathSeparatorStr));
+ ArrayRef<StringRef> DirVec(Dirs);
+ addSystemIncludes(DriverArgs, CC1Args, DirVec);
+ } else {
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/opt/nec/ve/include");
+ }
+ }
+}
+
+void VEToolChain::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ Action::OffloadKind) const {
+ CC1Args.push_back("-nostdsysteminc");
+ bool UseInitArrayDefault = true;
+ if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
+ options::OPT_fno_use_init_array, UseInitArrayDefault))
+ CC1Args.push_back("-fno-use-init-array");
+}
+
+void VEToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // TODO upstream VE libc++ patches
+ llvm_unreachable("The VE target has no C++ stdlib for Clang yet");
+}
+
+void VEToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // TODO upstream VE libc++ patches
+ llvm_unreachable("The VE target has no C++ stdlib for Clang yet");
+}
+
+llvm::ExceptionHandling
+VEToolChain::GetExceptionModel(const ArgList &Args) const {
+ // VE uses SjLj exceptions.
+ return llvm::ExceptionHandling::SjLj;
+}
diff --git a/clang/lib/Driver/ToolChains/VEToolchain.h b/clang/lib/Driver/ToolChains/VEToolchain.h
new file mode 100644
index 000000000000..59069c0a7595
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/VEToolchain.h
@@ -0,0 +1,66 @@
+//===--- VE.h - VE ToolChain Implementations --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_VE_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_VE_H
+
+#include "Linux.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY VEToolChain : public Linux {
+public:
+ VEToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+
+public:
+ bool isPICDefault() const override;
+ bool isPIEDefault() const override;
+ bool isPICDefaultForced() const override;
+ bool SupportsProfiling() const override;
+ bool hasBlocksRuntime() const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ llvm::ExceptionHandling
+ GetExceptionModel(const llvm::opt::ArgList &Args) const override;
+
+ CXXStdlibType
+ GetCXXStdlibType(const llvm::opt::ArgList &Args) const override {
+ return ToolChain::CST_Libcxx;
+ }
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return ToolChain::RLT_CompilerRT;
+ }
+
+ const char *getDefaultLinker() const override { return "nld"; }
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_VE_H
diff --git a/clang/lib/Driver/ToolChains/WebAssembly.cpp b/clang/lib/Driver/ToolChains/WebAssembly.cpp
index 907f86b8233c..10168736400f 100644
--- a/clang/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/clang/lib/Driver/ToolChains/WebAssembly.cpp
@@ -40,7 +40,7 @@ std::string wasm::Linker::getLinkerPath(const ArgList &Args) const {
if (!UseLinker.empty()) {
if (llvm::sys::path::is_absolute(UseLinker) &&
llvm::sys::fs::can_execute(UseLinker))
- return UseLinker;
+ return std::string(UseLinker);
// Accept 'lld', and 'ld' as aliases for the default linker
if (UseLinker != "lld" && UseLinker != "ld")
@@ -62,6 +62,12 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Linker = Args.MakeArgString(getLinkerPath(Args));
ArgStringList CmdArgs;
+ CmdArgs.push_back("-m");
+ if (getToolChain().getTriple().isArch64Bit())
+ CmdArgs.push_back("wasm64");
+ else
+ CmdArgs.push_back("wasm32");
+
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("--strip-all");
@@ -69,8 +75,26 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_u);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ const char *Crt1 = "crt1.o";
+ const char *Entry = NULL;
+ if (const Arg *A = Args.getLastArg(options::OPT_mexec_model_EQ)) {
+ StringRef CM = A->getValue();
+ if (CM == "command") {
+ // Use default values.
+ } else if (CM == "reactor") {
+ Crt1 = "crt1-reactor.o";
+ Entry = "_initialize";
+ } else {
+ ToolChain.getDriver().Diag(diag::err_drv_invalid_argument_to_option)
+ << CM << A->getOption().getName();
+ }
+ }
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(Crt1)));
+ if (Entry) {
+ CmdArgs.push_back(Args.MakeArgString("--entry"));
+ CmdArgs.push_back(Args.MakeArgString(Entry));
+ }
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
@@ -90,7 +114,8 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, Linker, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Linker, CmdArgs, Inputs));
// When optimizing, if wasm-opt is available, run it.
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
@@ -112,7 +137,9 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(llvm::Twine("-O") + OOpt));
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, WasmOpt, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), WasmOpt, CmdArgs,
+ Inputs));
}
}
}
@@ -283,7 +310,7 @@ void WebAssembly::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
CIncludeDirs.split(dirs, ":");
for (StringRef dir : dirs) {
StringRef Prefix =
- llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ llvm::sys::path::is_absolute(dir) ? "" : StringRef(D.SysRoot);
addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
}
return;
diff --git a/clang/lib/Driver/ToolChains/WebAssembly.h b/clang/lib/Driver/ToolChains/WebAssembly.h
index 67d5fce84576..616bfb5d3d0c 100644
--- a/clang/lib/Driver/ToolChains/WebAssembly.h
+++ b/clang/lib/Driver/ToolChains/WebAssembly.h
@@ -18,10 +18,9 @@ namespace driver {
namespace tools {
namespace wasm {
-class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- explicit Linker(const ToolChain &TC)
- : GnuTool("wasm::Linker", "linker", TC) {}
+ explicit Linker(const ToolChain &TC) : Tool("wasm::Linker", "linker", TC) {}
bool isLinkJob() const override { return true; }
bool hasIntegratedCPP() const override { return false; }
std::string getLinkerPath(const llvm::opt::ArgList &Args) const;
diff --git a/clang/lib/Driver/ToolChains/XCore.cpp b/clang/lib/Driver/ToolChains/XCore.cpp
index ba3a6d44adda..5030c73c7d82 100644
--- a/clang/lib/Driver/ToolChains/XCore.cpp
+++ b/clang/lib/Driver/ToolChains/XCore.cpp
@@ -52,7 +52,8 @@ void tools::XCore::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
void tools::XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -80,7 +81,8 @@ void tools::XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
- C.addCommand(std::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
}
/// XCore tool chain
diff --git a/clang/lib/Driver/Types.cpp b/clang/lib/Driver/Types.cpp
index 7d83be2521e7..399e26d8d64a 100644
--- a/clang/lib/Driver/Types.cpp
+++ b/clang/lib/Driver/Types.cpp
@@ -24,10 +24,19 @@ struct TypeInfo {
const char *Name;
const char *TempSuffix;
ID PreprocessedType;
- const llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> Phases;
+ class PhasesBitSet {
+ unsigned Bits = 0;
+
+ public:
+ constexpr PhasesBitSet(std::initializer_list<phases::ID> Phases) {
+ for (auto Id : Phases)
+ Bits |= 1 << Id;
+ }
+ bool contains(phases::ID Id) const { return Bits & (1 << Id); }
+ } Phases;
};
-static const TypeInfo TypeInfos[] = {
+static constexpr TypeInfo TypeInfos[] = {
#define TYPE(NAME, ID, PP_TYPE, TEMP_SUFFIX, ...) \
{ NAME, TEMP_SUFFIX, TY_##PP_TYPE, { __VA_ARGS__ }, },
#include "clang/Driver/Types.def"
@@ -46,18 +55,18 @@ const char *types::getTypeName(ID Id) {
types::ID types::getPreprocessedType(ID Id) {
ID PPT = getInfo(Id).PreprocessedType;
- assert((llvm::is_contained(getInfo(Id).Phases, phases::Preprocess) !=
+ assert((getInfo(Id).Phases.contains(phases::Preprocess) !=
(PPT == TY_INVALID)) &&
"Unexpected Preprocess Type.");
return PPT;
}
-static bool isPrepeocessedModuleType(ID Id) {
+static bool isPreprocessedModuleType(ID Id) {
return Id == TY_CXXModule || Id == TY_PP_CXXModule;
}
types::ID types::getPrecompiledType(ID Id) {
- if (isPrepeocessedModuleType(Id))
+ if (isPreprocessedModuleType(Id))
return TY_ModuleFile;
if (onlyPrecompileType(Id))
return TY_PCH;
@@ -81,15 +90,9 @@ const char *types::getTypeTempSuffix(ID Id, bool CLMode) {
return getInfo(Id).TempSuffix;
}
-bool types::onlyAssembleType(ID Id) {
- return llvm::is_contained(getInfo(Id).Phases, phases::Assemble) &&
- !llvm::is_contained(getInfo(Id).Phases, phases::Compile) &&
- !llvm::is_contained(getInfo(Id).Phases, phases::Backend);
-}
-
bool types::onlyPrecompileType(ID Id) {
- return llvm::is_contained(getInfo(Id).Phases, phases::Precompile) &&
- !isPrepeocessedModuleType(Id);
+ return getInfo(Id).Phases.contains(phases::Precompile) &&
+ !isPreprocessedModuleType(Id);
}
bool types::canTypeBeUserSpecified(ID Id) {
@@ -275,6 +278,7 @@ types::ID types::lookupTypeForExtension(llvm::StringRef Ext) {
.Case("gch", TY_PCH)
.Case("hip", TY_HIP)
.Case("hpp", TY_CXXHeader)
+ .Case("hxx", TY_CXXHeader)
.Case("iim", TY_PP_CXXModule)
.Case("lib", TY_Object)
.Case("mii", TY_PP_ObjCXX)
@@ -295,24 +299,28 @@ types::ID types::lookupTypeForTypeSpecifier(const char *Name) {
strcmp(Name, getInfo(Id).Name) == 0)
return Id;
}
-
+ // Accept "cu" as an alias for "cuda" for NVCC compatibility
+ if (strcmp(Name, "cu") == 0) {
+ return types::TY_CUDA;
+ }
return TY_INVALID;
}
-// FIXME: Why don't we just put this list in the defs file, eh.
-// FIXME: The list is now in Types.def but for now this function will verify
-// the old behavior and a subsequent change will delete most of the body.
-void types::getCompilationPhases(ID Id, llvm::SmallVectorImpl<phases::ID> &P) {
- P = getInfo(Id).Phases;
- assert(0 < P.size() && "Not enough phases in list");
+llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases>
+types::getCompilationPhases(ID Id, phases::ID LastPhase) {
+ llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> P;
+ const auto &Info = getInfo(Id);
+ for (int I = 0; I <= LastPhase; ++I)
+ if (Info.Phases.contains(static_cast<phases::ID>(I)))
+ P.push_back(static_cast<phases::ID>(I));
assert(P.size() <= phases::MaxNumberOfPhases && "Too many phases in list");
+ return P;
}
-void types::getCompilationPhases(const clang::driver::Driver &Driver,
- llvm::opt::DerivedArgList &DAL, ID Id,
- llvm::SmallVectorImpl<phases::ID> &P) {
- llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PhaseList;
- types::getCompilationPhases(Id, PhaseList);
+llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases>
+types::getCompilationPhases(const clang::driver::Driver &Driver,
+ llvm::opt::DerivedArgList &DAL, ID Id) {
+ phases::ID LastPhase;
// Filter to compiler mode. When the compiler is run as a preprocessor then
// compilation is not an option.
@@ -321,14 +329,12 @@ void types::getCompilationPhases(const clang::driver::Driver &Driver,
DAL.getLastArg(options::OPT__SLASH_EP) ||
DAL.getLastArg(options::OPT_M, options::OPT_MM) ||
DAL.getLastArg(options::OPT__SLASH_P))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Preprocess; });
+ LastPhase = phases::Preprocess;
// --precompile only runs up to precompilation.
// This is a clang extension and is not compatible with GCC.
else if (DAL.getLastArg(options::OPT__precompile))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Precompile; });
+ LastPhase = phases::Precompile;
// -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
else if (DAL.getLastArg(options::OPT_fsyntax_only) ||
@@ -340,21 +346,20 @@ void types::getCompilationPhases(const clang::driver::Driver &Driver,
DAL.getLastArg(options::OPT__migrate) ||
DAL.getLastArg(options::OPT__analyze) ||
DAL.getLastArg(options::OPT_emit_ast))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Compile; });
+ LastPhase = phases::Compile;
else if (DAL.getLastArg(options::OPT_S) ||
DAL.getLastArg(options::OPT_emit_llvm))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Backend; });
+ LastPhase = phases::Backend;
else if (DAL.getLastArg(options::OPT_c))
- llvm::copy_if(PhaseList, std::back_inserter(P),
- [](phases::ID Phase) { return Phase <= phases::Assemble; });
+ LastPhase = phases::Assemble;
// Generally means, do every phase until Link.
else
- P = PhaseList;
+ LastPhase = phases::LastPhase;
+
+ return types::getCompilationPhases(Id, LastPhase);
}
ID types::lookupCXXTypeForCType(ID Id) {
diff --git a/clang/lib/Driver/XRayArgs.cpp b/clang/lib/Driver/XRayArgs.cpp
index a2dd63f9eb77..f00c3906df97 100644
--- a/clang/lib/Driver/XRayArgs.cpp
+++ b/clang/lib/Driver/XRayArgs.cpp
@@ -13,10 +13,10 @@
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/SpecialCaseList.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang;
using namespace clang::driver;
@@ -32,157 +32,163 @@ constexpr const char *const XRaySupportedModes[] = {"xray-fdr", "xray-basic"};
XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
const Driver &D = TC.getDriver();
const llvm::Triple &Triple = TC.getTriple();
- if (Args.hasFlag(options::OPT_fxray_instrument,
- options::OPT_fnoxray_instrument, false)) {
- if (Triple.getOS() == llvm::Triple::Linux) {
- switch (Triple.getArch()) {
- case llvm::Triple::x86_64:
- case llvm::Triple::arm:
- case llvm::Triple::aarch64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- break;
- default:
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else if (Triple.isOSFreeBSD() ||
- Triple.isOSOpenBSD() ||
- Triple.isOSNetBSD() ||
- Triple.isMacOSX()) {
- if (Triple.getArch() != llvm::Triple::x86_64) {
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else if (Triple.getOS() == llvm::Triple::Fuchsia) {
- switch (Triple.getArch()) {
- case llvm::Triple::x86_64:
- case llvm::Triple::aarch64:
- break;
- default:
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else {
+ if (!Args.hasFlag(options::OPT_fxray_instrument,
+ options::OPT_fno_xray_instrument, false))
+ return;
+ if (Triple.getOS() == llvm::Triple::Linux) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86_64:
+ case llvm::Triple::arm:
+ case llvm::Triple::aarch64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ break;
+ default:
D.Diag(diag::err_drv_clang_unsupported)
<< (std::string(XRayInstrumentOption) + " on " + Triple.str());
}
-
- // Both XRay and -fpatchable-function-entry use
- // TargetOpcode::PATCHABLE_FUNCTION_ENTER.
- if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ))
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << "-fxray-instrument" << A->getSpelling();
-
- XRayInstrument = true;
- if (const Arg *A =
- Args.getLastArg(options::OPT_fxray_instruction_threshold_,
- options::OPT_fxray_instruction_threshold_EQ)) {
- StringRef S = A->getValue();
- if (S.getAsInteger(0, InstructionThreshold) || InstructionThreshold < 0)
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ } else if (Triple.isOSFreeBSD() || Triple.isOSOpenBSD() ||
+ Triple.isOSNetBSD() || Triple.isMacOSX()) {
+ if (Triple.getArch() != llvm::Triple::x86_64) {
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ }
+ } else if (Triple.getOS() == llvm::Triple::Fuchsia) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86_64:
+ case llvm::Triple::aarch64:
+ break;
+ default:
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
}
+ } else {
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ }
+
+ // Both XRay and -fpatchable-function-entry use
+ // TargetOpcode::PATCHABLE_FUNCTION_ENTER.
+ if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fxray-instrument" << A->getSpelling();
+
+ XRayInstrument = true;
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fxray_instruction_threshold_,
+ options::OPT_fxray_instruction_threshold_EQ)) {
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, InstructionThreshold) || InstructionThreshold < 0)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
+
+ // By default, the back-end will not emit the lowering for XRay customevent
+ // calls if the function is not instrumented. In the future we will change
+ // this default to be the reverse, but in the meantime we're going to
+ // introduce the new functionality behind a flag.
+ if (Args.hasFlag(options::OPT_fxray_always_emit_customevents,
+ options::OPT_fno_xray_always_emit_customevents, false))
+ XRayAlwaysEmitCustomEvents = true;
+
+ if (Args.hasFlag(options::OPT_fxray_always_emit_typedevents,
+ options::OPT_fno_xray_always_emit_typedevents, false))
+ XRayAlwaysEmitTypedEvents = true;
+
+ if (!Args.hasFlag(options::OPT_fxray_link_deps,
+ options::OPT_fnoxray_link_deps, true))
+ XRayRT = false;
- // By default, the back-end will not emit the lowering for XRay customevent
- // calls if the function is not instrumented. In the future we will change
- // this default to be the reverse, but in the meantime we're going to
- // introduce the new functionality behind a flag.
- if (Args.hasFlag(options::OPT_fxray_always_emit_customevents,
- options::OPT_fnoxray_always_emit_customevents, false))
- XRayAlwaysEmitCustomEvents = true;
-
- if (Args.hasFlag(options::OPT_fxray_always_emit_typedevents,
- options::OPT_fnoxray_always_emit_typedevents, false))
- XRayAlwaysEmitTypedEvents = true;
-
- if (!Args.hasFlag(options::OPT_fxray_link_deps,
- options::OPT_fnoxray_link_deps, true))
- XRayRT = false;
-
- auto Bundles =
- Args.getAllArgValues(options::OPT_fxray_instrumentation_bundle);
- if (Bundles.empty())
- InstrumentationBundle.Mask = XRayInstrKind::All;
- else
- for (const auto &B : Bundles) {
- llvm::SmallVector<StringRef, 2> BundleParts;
- llvm::SplitString(B, BundleParts, ",");
- for (const auto &P : BundleParts) {
- // TODO: Automate the generation of the string case table.
- auto Valid = llvm::StringSwitch<bool>(P)
- .Cases("none", "all", "function", "custom", true)
- .Default(false);
-
- if (!Valid) {
- D.Diag(clang::diag::err_drv_invalid_value)
- << "-fxray-instrumentation-bundle=" << P;
- continue;
- }
-
- auto Mask = parseXRayInstrValue(P);
- if (Mask == XRayInstrKind::None) {
- InstrumentationBundle.clear();
- break;
- }
-
- InstrumentationBundle.Mask |= Mask;
+ if (Args.hasFlag(options::OPT_fxray_ignore_loops,
+ options::OPT_fno_xray_ignore_loops, false))
+ XRayIgnoreLoops = true;
+
+ XRayFunctionIndex = Args.hasFlag(options::OPT_fxray_function_index,
+ options::OPT_fno_xray_function_index, true);
+
+ auto Bundles =
+ Args.getAllArgValues(options::OPT_fxray_instrumentation_bundle);
+ if (Bundles.empty())
+ InstrumentationBundle.Mask = XRayInstrKind::All;
+ else
+ for (const auto &B : Bundles) {
+ llvm::SmallVector<StringRef, 2> BundleParts;
+ llvm::SplitString(B, BundleParts, ",");
+ for (const auto &P : BundleParts) {
+ // TODO: Automate the generation of the string case table.
+ auto Valid = llvm::StringSwitch<bool>(P)
+ .Cases("none", "all", "function", "function-entry",
+ "function-exit", "custom", true)
+ .Default(false);
+
+ if (!Valid) {
+ D.Diag(clang::diag::err_drv_invalid_value)
+ << "-fxray-instrumentation-bundle=" << P;
+ continue;
}
- }
- // Validate the always/never attribute files. We also make sure that they
- // are treated as actual dependencies.
- for (const auto &Filename :
- Args.getAllArgValues(options::OPT_fxray_always_instrument)) {
- if (D.getVFS().exists(Filename)) {
- AlwaysInstrumentFiles.push_back(Filename);
- ExtraDeps.push_back(Filename);
- } else
- D.Diag(clang::diag::err_drv_no_such_file) << Filename;
- }
+ auto Mask = parseXRayInstrValue(P);
+ if (Mask == XRayInstrKind::None) {
+ InstrumentationBundle.clear();
+ break;
+ }
- for (const auto &Filename :
- Args.getAllArgValues(options::OPT_fxray_never_instrument)) {
- if (D.getVFS().exists(Filename)) {
- NeverInstrumentFiles.push_back(Filename);
- ExtraDeps.push_back(Filename);
- } else
- D.Diag(clang::diag::err_drv_no_such_file) << Filename;
+ InstrumentationBundle.Mask |= Mask;
+ }
}
- for (const auto &Filename :
- Args.getAllArgValues(options::OPT_fxray_attr_list)) {
- if (D.getVFS().exists(Filename)) {
- AttrListFiles.push_back(Filename);
- ExtraDeps.push_back(Filename);
- } else
- D.Diag(clang::diag::err_drv_no_such_file) << Filename;
- }
+ // Validate the always/never attribute files. We also make sure that they
+ // are treated as actual dependencies.
+ for (const auto &Filename :
+ Args.getAllArgValues(options::OPT_fxray_always_instrument)) {
+ if (D.getVFS().exists(Filename)) {
+ AlwaysInstrumentFiles.push_back(Filename);
+ ExtraDeps.push_back(Filename);
+ } else
+ D.Diag(clang::diag::err_drv_no_such_file) << Filename;
+ }
- // Get the list of modes we want to support.
- auto SpecifiedModes = Args.getAllArgValues(options::OPT_fxray_modes);
- if (SpecifiedModes.empty())
- llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
- else
- for (const auto &Arg : SpecifiedModes) {
- // Parse CSV values for -fxray-modes=...
- llvm::SmallVector<StringRef, 2> ModeParts;
- llvm::SplitString(Arg, ModeParts, ",");
- for (const auto &M : ModeParts)
- if (M == "none")
- Modes.clear();
- else if (M == "all")
- llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
- else
- Modes.push_back(M);
- }
+ for (const auto &Filename :
+ Args.getAllArgValues(options::OPT_fxray_never_instrument)) {
+ if (D.getVFS().exists(Filename)) {
+ NeverInstrumentFiles.push_back(Filename);
+ ExtraDeps.push_back(Filename);
+ } else
+ D.Diag(clang::diag::err_drv_no_such_file) << Filename;
+ }
- // Then we want to sort and unique the modes we've collected.
- llvm::sort(Modes);
- Modes.erase(std::unique(Modes.begin(), Modes.end()), Modes.end());
+ for (const auto &Filename :
+ Args.getAllArgValues(options::OPT_fxray_attr_list)) {
+ if (D.getVFS().exists(Filename)) {
+ AttrListFiles.push_back(Filename);
+ ExtraDeps.push_back(Filename);
+ } else
+ D.Diag(clang::diag::err_drv_no_such_file) << Filename;
}
+
+ // Get the list of modes we want to support.
+ auto SpecifiedModes = Args.getAllArgValues(options::OPT_fxray_modes);
+ if (SpecifiedModes.empty())
+ llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
+ else
+ for (const auto &Arg : SpecifiedModes) {
+ // Parse CSV values for -fxray-modes=...
+ llvm::SmallVector<StringRef, 2> ModeParts;
+ llvm::SplitString(Arg, ModeParts, ",");
+ for (const auto &M : ModeParts)
+ if (M == "none")
+ Modes.clear();
+ else if (M == "all")
+ llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
+ else
+ Modes.push_back(std::string(M));
+ }
+
+ // Then we want to sort and unique the modes we've collected.
+ llvm::sort(Modes);
+ Modes.erase(std::unique(Modes.begin(), Modes.end()), Modes.end());
}
void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
@@ -198,6 +204,12 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
if (XRayAlwaysEmitTypedEvents)
CmdArgs.push_back("-fxray-always-emit-typedevents");
+ if (XRayIgnoreLoops)
+ CmdArgs.push_back("-fxray-ignore-loops");
+
+ if (!XRayFunctionIndex)
+ CmdArgs.push_back("-fno-xray-function-index");
+
CmdArgs.push_back(Args.MakeArgString(Twine(XRayInstructionThresholdOption) +
Twine(InstructionThreshold)));
@@ -237,8 +249,14 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
} else if (InstrumentationBundle.empty()) {
Bundle += "none";
} else {
- if (InstrumentationBundle.has(XRayInstrKind::Function))
+ if (InstrumentationBundle.has(XRayInstrKind::FunctionEntry) &&
+ InstrumentationBundle.has(XRayInstrKind::FunctionExit))
Bundle += "function";
+ else if (InstrumentationBundle.has(XRayInstrKind::FunctionEntry))
+ Bundle += "function-entry";
+ else if (InstrumentationBundle.has(XRayInstrKind::FunctionExit))
+ Bundle += "function-exit";
+
if (InstrumentationBundle.has(XRayInstrKind::Custom))
Bundle += "custom";
if (InstrumentationBundle.has(XRayInstrKind::Typed))
diff --git a/clang/lib/Format/BreakableToken.cpp b/clang/lib/Format/BreakableToken.cpp
index cd0eb0b4324a..15fbe3b6515d 100644
--- a/clang/lib/Format/BreakableToken.cpp
+++ b/clang/lib/Format/BreakableToken.cpp
@@ -587,9 +587,8 @@ void BreakableBlockComment::insertBreak(unsigned LineIndex, unsigned TailOffset,
Text.data() - tokenAt(LineIndex).TokenText.data() + Split.first;
unsigned CharsToRemove = Split.second;
assert(LocalIndentAtLineBreak >= Prefix.size());
- std::string PrefixWithTrailingIndent = Prefix;
- for (unsigned I = 0; I < ContentIndent; ++I)
- PrefixWithTrailingIndent += " ";
+ std::string PrefixWithTrailingIndent = std::string(Prefix);
+ PrefixWithTrailingIndent.append(ContentIndent, ' ');
Whitespaces.replaceWhitespaceInToken(
tokenAt(LineIndex), BreakOffsetInToken, CharsToRemove, "",
PrefixWithTrailingIndent, InPPDirective, /*Newlines=*/1,
@@ -864,7 +863,8 @@ void BreakableLineCommentSection::reflow(unsigned LineIndex,
// tokens by the empty string.
Whitespaces.replaceWhitespace(
*Tokens[LineIndex], /*Newlines=*/0, /*Spaces=*/0,
- /*StartOfTokenColumn=*/StartColumn, /*InPPDirective=*/false);
+ /*StartOfTokenColumn=*/StartColumn, /*IsAligned=*/true,
+ /*InPPDirective=*/false);
} else if (LineIndex > 0) {
// In case we're reflowing after the '\' in:
//
@@ -932,6 +932,7 @@ void BreakableLineCommentSection::adaptStartOfLine(
/*Newlines=*/1,
/*Spaces=*/LineColumn,
/*StartOfTokenColumn=*/LineColumn,
+ /*IsAligned=*/true,
/*InPPDirective=*/false);
}
if (OriginalPrefix[LineIndex] != Prefix[LineIndex]) {
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index 2ff6e5ec2344..b1497651a8fe 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -329,6 +329,11 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
bool ContinuationIndenter::mustBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
+ if (Style.BraceWrapping.BeforeLambdaBody && Current.CanBreakBefore &&
+ Current.is(TT_LambdaLBrace) && Previous.isNot(TT_LineComment)) {
+ auto LambdaBodyLength = getLengthToMatchingParen(Current, State.Stack);
+ return (LambdaBodyLength > getColumnLimit(State));
+ }
if (Current.MustBreakBefore || Current.is(TT_InlineASMColon))
return true;
if (State.Stack.back().BreakBeforeClosingBrace &&
@@ -337,10 +342,16 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection)
return true;
if (Style.Language == FormatStyle::LK_ObjC &&
+ Style.ObjCBreakBeforeNestedBlockParam &&
Current.ObjCSelectorNameParts > 1 &&
Current.startsSequence(TT_SelectorName, tok::colon, tok::caret)) {
return true;
}
+ // Avoid producing inconsistent states by requiring breaks where they are not
+ // permitted for C# generic type constraints.
+ if (State.Stack.back().IsCSharpGenericTypeConstraint &&
+ Previous.isNot(TT_CSharpGenericTypeConstraintComma))
+ return false;
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName) &&
Style.isCpp() &&
@@ -356,6 +367,12 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
State.Stack.back().BreakBeforeParameter && !Current.isTrailingComment() &&
!Current.isOneOf(tok::r_paren, tok::r_brace))
return true;
+ if (State.Stack.back().IsChainedConditional &&
+ ((Style.BreakBeforeTernaryOperators && Current.is(TT_ConditionalExpr) &&
+ Current.is(tok::colon)) ||
+ (!Style.BreakBeforeTernaryOperators && Previous.is(TT_ConditionalExpr) &&
+ Previous.is(tok::colon))))
+ return true;
if (((Previous.is(TT_DictLiteral) && Previous.is(tok::l_brace)) ||
(Previous.is(TT_ArrayInitializerLSquare) &&
Previous.ParameterCount > 1) ||
@@ -412,7 +429,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
State.Stack.back().BreakBeforeParameter && Current.CanBreakBefore)
return true;
- if (State.Column <= NewLineColumn)
+ if (!State.Line->First->is(tok::kw_enum) && State.Column <= NewLineColumn)
return false;
if (Style.AlwaysBreakBeforeMultilineStrings &&
@@ -629,9 +646,12 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
State.Stack.back().NoLineBreak = true;
if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
+ !State.Stack.back().IsCSharpGenericTypeConstraint &&
Previous.opensScope() && Previous.isNot(TT_ObjCMethodExpr) &&
- (Current.isNot(TT_LineComment) || Previous.BlockKind == BK_BracedInit))
+ (Current.isNot(TT_LineComment) || Previous.BlockKind == BK_BracedInit)) {
State.Stack.back().Indent = State.Column + Spaces;
+ State.Stack.back().IsAligned = true;
+ }
if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style))
State.Stack.back().NoLineBreak = true;
if (startsSegmentOfBuilderTypeCall(Current) &&
@@ -673,7 +693,9 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// does not help.
bool HasTwoOperands =
P->OperatorIndex == 0 && !P->NextOperator && !P->is(TT_ConditionalExpr);
- if ((!BreakBeforeOperator && !(HasTwoOperands && Style.AlignOperands)) ||
+ if ((!BreakBeforeOperator &&
+ !(HasTwoOperands &&
+ Style.AlignOperands != FormatStyle::OAS_DontAlign)) ||
(!State.Stack.back().LastOperatorWrapped && BreakBeforeOperator))
State.Stack.back().NoLineBreakInOperand = true;
}
@@ -710,6 +732,8 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
} else if (Previous.is(TT_InheritanceColon)) {
State.Stack.back().Indent = State.Column;
State.Stack.back().LastSpace = State.Column;
+ } else if (Current.is(TT_CSharpGenericTypeConstraintColon)) {
+ State.Stack.back().ColonPos = State.Column;
} else if (Previous.opensScope()) {
// If a function has a trailing call, indent all parameters from the
// opening parenthesis. This avoids confusing indents like:
@@ -844,6 +868,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
bool ContinuePPDirective =
State.Line->InPPDirective && State.Line->Type != LT_ImportStatement;
Whitespaces.replaceWhitespace(Current, Newlines, State.Column, State.Column,
+ State.Stack.back().IsAligned,
ContinuePPDirective);
}
@@ -861,8 +886,10 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// Any break on this level means that the parent level has been broken
// and we need to avoid bin packing there.
bool NestedBlockSpecialCase =
- !Style.isCpp() && Current.is(tok::r_brace) && State.Stack.size() > 1 &&
- State.Stack[State.Stack.size() - 2].NestedBlockInlined;
+ (!Style.isCpp() && Current.is(tok::r_brace) && State.Stack.size() > 1 &&
+ State.Stack[State.Stack.size() - 2].NestedBlockInlined) ||
+ (Style.Language == FormatStyle::LK_ObjC && Current.is(tok::r_brace) &&
+ State.Stack.size() > 1 && !Style.ObjCBreakBeforeNestedBlockParam);
if (!NestedBlockSpecialCase)
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
@@ -917,7 +944,13 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (!State.NextToken || !State.NextToken->Previous)
return 0;
+
FormatToken &Current = *State.NextToken;
+
+ if (State.Stack.back().IsCSharpGenericTypeConstraint &&
+ Current.isNot(TT_CSharpGenericTypeConstraint))
+ return State.Stack.back().ColonPos + 2;
+
const FormatToken &Previous = *Current.Previous;
// If we are continuing an expression, we want to use the continuation indent.
unsigned ContinuationIndent =
@@ -997,8 +1030,28 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (State.Stack.back().QuestionColumn != 0 &&
((NextNonComment->is(tok::colon) &&
NextNonComment->is(TT_ConditionalExpr)) ||
- Previous.is(TT_ConditionalExpr)))
+ Previous.is(TT_ConditionalExpr))) {
+ if (((NextNonComment->is(tok::colon) && NextNonComment->Next &&
+ !NextNonComment->Next->FakeLParens.empty() &&
+ NextNonComment->Next->FakeLParens.back() == prec::Conditional) ||
+ (Previous.is(tok::colon) && !Current.FakeLParens.empty() &&
+ Current.FakeLParens.back() == prec::Conditional)) &&
+ !State.Stack.back().IsWrappedConditional) {
+ // NOTE: we may tweak this slightly:
+ // * not remove the 'lead' ContinuationIndentWidth
+ // * always un-indent by the operator when
+ // BreakBeforeTernaryOperators=true
+ unsigned Indent = State.Stack.back().Indent;
+ if (Style.AlignOperands != FormatStyle::OAS_DontAlign) {
+ Indent -= Style.ContinuationIndentWidth;
+ }
+ if (Style.BreakBeforeTernaryOperators &&
+ State.Stack.back().UnindentOperator)
+ Indent -= 2;
+ return Indent;
+ }
return State.Stack.back().QuestionColumn;
+ }
if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0)
return State.Stack.back().VariablePos;
if ((PreviousNonComment &&
@@ -1040,6 +1093,9 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (NextNonComment->is(TT_ArraySubscriptLSquare)) {
if (State.Stack.back().StartOfArraySubscripts != 0)
return State.Stack.back().StartOfArraySubscripts;
+ else if (Style.isCSharp()) // C# allows `["key"] = value` inside object
+ // initializers.
+ return State.Stack.back().Indent;
return ContinuationIndent;
}
@@ -1071,6 +1127,13 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return ContinuationIndent;
if (Current.is(TT_ProtoExtensionLSquare))
return State.Stack.back().Indent;
+ if (Current.isBinaryOperator() && State.Stack.back().UnindentOperator)
+ return State.Stack.back().Indent - Current.Tok.getLength() -
+ Current.SpacesRequiredBefore;
+ if (Current.isOneOf(tok::comment, TT_BlockComment, TT_LineComment) &&
+ NextNonComment->isBinaryOperator() && State.Stack.back().UnindentOperator)
+ return State.Stack.back().Indent - NextNonComment->Tok.getLength() -
+ NextNonComment->SpacesRequiredBefore;
if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
!PreviousNonComment->isOneOf(tok::r_brace, TT_CtorInitializerComma))
// Ensure that we fall back to the continuation indent width instead of
@@ -1079,14 +1142,28 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return State.Stack.back().Indent;
}
+static bool hasNestedBlockInlined(const FormatToken *Previous,
+ const FormatToken &Current,
+ const FormatStyle &Style) {
+ if (Previous->isNot(tok::l_paren))
+ return true;
+ if (Previous->ParameterCount > 1)
+ return true;
+
+ // Also a nested block if contains a lambda inside function with 1 parameter
+ return (Style.BraceWrapping.BeforeLambdaBody && Current.is(TT_LambdaLSquare));
+}
+
unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
bool DryRun, bool Newline) {
assert(State.Stack.size());
const FormatToken &Current = *State.NextToken;
+ if (Current.is(TT_CSharpGenericTypeConstraint))
+ State.Stack.back().IsCSharpGenericTypeConstraint = true;
if (Current.isOneOf(tok::comma, TT_BinaryOperator))
State.Stack.back().NoLineBreakInOperand = false;
- if (Current.is(TT_InheritanceColon))
+ if (Current.isOneOf(TT_InheritanceColon, TT_CSharpGenericTypeConstraintColon))
State.Stack.back().AvoidBinPacking = true;
if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator)) {
if (State.Stack.back().FirstLessLess == 0)
@@ -1102,6 +1179,11 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Current.is(TT_ArraySubscriptLSquare) &&
State.Stack.back().StartOfArraySubscripts == 0)
State.Stack.back().StartOfArraySubscripts = State.Column;
+ if (Current.is(TT_ConditionalExpr) && Current.is(tok::question) &&
+ ((Current.MustBreakBefore) ||
+ (Current.getNextNonComment() &&
+ Current.getNextNonComment()->MustBreakBefore)))
+ State.Stack.back().IsWrappedConditional = true;
if (Style.BreakBeforeTernaryOperators && Current.is(tok::question))
State.Stack.back().QuestionColumn = State.Column;
if (!Style.BreakBeforeTernaryOperators && Current.isNot(tok::colon)) {
@@ -1181,8 +1263,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr)) &&
!Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
State.Stack.back().NestedBlockInlined =
- !Newline &&
- (Previous->isNot(tok::l_paren) || Previous->ParameterCount > 1);
+ !Newline && hasNestedBlockInlined(Previous, Current, Style);
}
moveStatePastFakeLParens(State, Newline);
@@ -1233,7 +1314,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
(Previous && (Previous->opensScope() ||
Previous->isOneOf(tok::semi, tok::kw_return) ||
(Previous->getPrecedence() == prec::Assignment &&
- Style.AlignOperands) ||
+ Style.AlignOperands != FormatStyle::OAS_DontAlign) ||
Previous->is(TT_ObjCMethodExpr)));
for (SmallVectorImpl<prec::Level>::const_reverse_iterator
I = Current.FakeLParens.rbegin(),
@@ -1243,6 +1324,9 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
NewParenState.Tok = nullptr;
NewParenState.ContainsLineBreak = false;
NewParenState.LastOperatorWrapped = true;
+ NewParenState.IsChainedConditional = false;
+ NewParenState.IsWrappedConditional = false;
+ NewParenState.UnindentOperator = false;
NewParenState.NoLineBreak =
NewParenState.NoLineBreak || State.Stack.back().NoLineBreakInOperand;
@@ -1254,14 +1338,27 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// a builder type call after 'return' or, if the alignment after opening
// brackets is disabled.
if (!Current.isTrailingComment() &&
- (Style.AlignOperands || *I < prec::Assignment) &&
+ (Style.AlignOperands != FormatStyle::OAS_DontAlign ||
+ *I < prec::Assignment) &&
(!Previous || Previous->isNot(tok::kw_return) ||
(Style.Language != FormatStyle::LK_Java && *I > 0)) &&
(Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign ||
- *I != prec::Comma || Current.NestingLevel == 0))
+ *I != prec::Comma || Current.NestingLevel == 0)) {
NewParenState.Indent =
std::max(std::max(State.Column, NewParenState.Indent),
State.Stack.back().LastSpace);
+ }
+
+ // If BreakBeforeBinaryOperators is set, un-indent a bit to account for
+ // the operator and keep the operands aligned
+ if (Style.AlignOperands == FormatStyle::OAS_AlignAfterOperator &&
+ Previous &&
+ (Previous->getPrecedence() == prec::Assignment ||
+ Previous->is(tok::kw_return) ||
+ (*I == prec::Conditional && Previous->is(tok::question) &&
+ Previous->is(TT_ConditionalExpr))) &&
+ !Newline)
+ NewParenState.UnindentOperator = true;
// Do not indent relative to the fake parentheses inserted for "." or "->".
// This is a special case to make the following to statements consistent:
@@ -1275,14 +1372,21 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
NewParenState.StartOfFunctionCall = State.Column;
- // Always indent conditional expressions. Never indent expression where
- // the 'operator' is ',', ';' or an assignment (i.e. *I <=
- // prec::Assignment) as those have different indentation rules. Indent
- // other expression, unless the indentation needs to be skipped.
- if (*I == prec::Conditional ||
- (!SkipFirstExtraIndent && *I > prec::Assignment &&
- !Current.isTrailingComment()))
+ // Indent conditional expressions, unless they are chained "else-if"
+ // conditionals. Never indent expression where the 'operator' is ',', ';' or
+ // an assignment (i.e. *I <= prec::Assignment) as those have different
+ // indentation rules. Indent other expression, unless the indentation needs
+ // to be skipped.
+ if (*I == prec::Conditional && Previous && Previous->is(tok::colon) &&
+ Previous->is(TT_ConditionalExpr) && I == Current.FakeLParens.rbegin() &&
+ !State.Stack.back().IsWrappedConditional) {
+ NewParenState.IsChainedConditional = true;
+ NewParenState.UnindentOperator = State.Stack.back().UnindentOperator;
+ } else if (*I == prec::Conditional ||
+ (!SkipFirstExtraIndent && *I > prec::Assignment &&
+ !Current.isTrailingComment())) {
NewParenState.Indent += Style.ContinuationIndentWidth;
+ }
if ((Previous && !Previous->opensScope()) || *I != prec::Comma)
NewParenState.BreakBeforeParameter = false;
State.Stack.push_back(NewParenState);
@@ -1308,6 +1412,11 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
if (!Current.opensScope())
return;
+ // Don't allow '<' or '(' in C# generic type constraints to start new scopes.
+ if (Current.isOneOf(tok::less, tok::l_paren) &&
+ State.Stack.back().IsCSharpGenericTypeConstraint)
+ return;
+
if (Current.MatchingParen && Current.BlockKind == BK_Block) {
moveStateToNewBlock(State);
return;
@@ -1372,6 +1481,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
(State.Line->Type == LT_ObjCDecl && ObjCBinPackProtocolList);
AvoidBinPacking =
+ (State.Stack.back().IsCSharpGenericTypeConstraint) ||
(Style.Language == FormatStyle::LK_JavaScript && EndsInComma) ||
(State.Line->MustBeDeclaration && !BinPackDeclaration) ||
(!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
@@ -1380,7 +1490,8 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
(!BinPackInconclusiveFunctions &&
Current.PackingKind == PPK_Inconclusive)));
- if (Current.is(TT_ObjCMethodExpr) && Current.MatchingParen) {
+ if (Current.is(TT_ObjCMethodExpr) && Current.MatchingParen &&
+ Style.ObjCBreakBeforeNestedBlockParam) {
if (Style.ColumnLimit) {
// If this '[' opens an ObjC call, determine whether all parameters fit
// into one line and put one per line if they don't.
@@ -1418,7 +1529,22 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
ParenState(&Current, NewIndent, LastSpace, AvoidBinPacking, NoLineBreak));
State.Stack.back().NestedBlockIndent = NestedBlockIndent;
State.Stack.back().BreakBeforeParameter = BreakBeforeParameter;
- State.Stack.back().HasMultipleNestedBlocks = Current.BlockParameterCount > 1;
+ State.Stack.back().HasMultipleNestedBlocks =
+ (Current.BlockParameterCount > 1);
+
+ if (Style.BraceWrapping.BeforeLambdaBody && Current.Next != nullptr &&
+ Current.Tok.is(tok::l_paren)) {
+ // Search for any parameter that is a lambda
+ FormatToken const *next = Current.Next;
+ while (next != nullptr) {
+ if (next->is(TT_LambdaLSquare)) {
+ State.Stack.back().HasMultipleNestedBlocks = true;
+ break;
+ }
+ next = next->Next;
+ }
+ }
+
State.Stack.back().IsInsideObjCArrayLiteral =
Current.is(TT_ArrayInitializerLSquare) && Current.Previous &&
Current.Previous->is(tok::at);
@@ -1513,8 +1639,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
unsigned OldSuffixSize = 2 + OldDelimiter.size();
// We create a virtual text environment which expects a null-terminated
// string, so we cannot use StringRef.
- std::string RawText =
- Current.TokenText.substr(OldPrefixSize).drop_back(OldSuffixSize);
+ std::string RawText = std::string(
+ Current.TokenText.substr(OldPrefixSize).drop_back(OldSuffixSize));
if (NewDelimiter != OldDelimiter) {
// Don't update to the canonical delimiter 'deli' if ')deli"' occurs in the
// raw string.
@@ -1760,7 +1886,7 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
LineState &State, bool AllowBreak) {
unsigned StartColumn = State.Column - Current.ColumnWidth;
if (Current.isStringLiteral()) {
- // FIXME: String literal breaking is currently disabled for C#,Java and
+ // FIXME: String literal breaking is currently disabled for C#, Java and
// JavaScript, as it requires strings to be merged using "+" which we
// don't support.
if (Style.Language == FormatStyle::LK_Java ||
diff --git a/clang/lib/Format/ContinuationIndenter.h b/clang/lib/Format/ContinuationIndenter.h
index 11df619e0f40..b1b2611263a9 100644
--- a/clang/lib/Format/ContinuationIndenter.h
+++ b/clang/lib/Format/ContinuationIndenter.h
@@ -202,13 +202,16 @@ struct ParenState {
ParenState(const FormatToken *Tok, unsigned Indent, unsigned LastSpace,
bool AvoidBinPacking, bool NoLineBreak)
: Tok(Tok), Indent(Indent), LastSpace(LastSpace),
- NestedBlockIndent(Indent), BreakBeforeClosingBrace(false),
- AvoidBinPacking(AvoidBinPacking), BreakBeforeParameter(false),
- NoLineBreak(NoLineBreak), NoLineBreakInOperand(false),
- LastOperatorWrapped(true), ContainsLineBreak(false),
- ContainsUnwrappedBuilder(false), AlignColons(true),
- ObjCSelectorNameFound(false), HasMultipleNestedBlocks(false),
- NestedBlockInlined(false), IsInsideObjCArrayLiteral(false) {}
+ NestedBlockIndent(Indent), IsAligned(false),
+ BreakBeforeClosingBrace(false), AvoidBinPacking(AvoidBinPacking),
+ BreakBeforeParameter(false), NoLineBreak(NoLineBreak),
+ NoLineBreakInOperand(false), LastOperatorWrapped(true),
+ ContainsLineBreak(false), ContainsUnwrappedBuilder(false),
+ AlignColons(true), ObjCSelectorNameFound(false),
+ HasMultipleNestedBlocks(false), NestedBlockInlined(false),
+ IsInsideObjCArrayLiteral(false), IsCSharpGenericTypeConstraint(false),
+ IsChainedConditional(false), IsWrappedConditional(false),
+ UnindentOperator(false) {}
/// \brief The token opening this parenthesis level, or nullptr if this level
/// is opened by fake parenthesis.
@@ -264,6 +267,9 @@ struct ParenState {
/// Used to align further variables if necessary.
unsigned VariablePos = 0;
+ /// Whether this block's indentation is used for alignment.
+ bool IsAligned : 1;
+
/// Whether a newline needs to be inserted before the block's closing
/// brace.
///
@@ -329,6 +335,20 @@ struct ParenState {
/// array literal.
bool IsInsideObjCArrayLiteral : 1;
+ bool IsCSharpGenericTypeConstraint : 1;
+
+ /// \brief true if the current \c ParenState represents the false branch of
+ /// a chained conditional expression (e.g. else-if)
+ bool IsChainedConditional : 1;
+
+ /// \brief true if there conditionnal was wrapped on the first operator (the
+ /// question mark)
+ bool IsWrappedConditional : 1;
+
+ /// \brief Indicates the indent should be reduced by the length of the
+ /// operator.
+ bool UnindentOperator : 1;
+
bool operator<(const ParenState &Other) const {
if (Indent != Other.Indent)
return Indent < Other.Indent;
@@ -338,6 +358,8 @@ struct ParenState {
return NestedBlockIndent < Other.NestedBlockIndent;
if (FirstLessLess != Other.FirstLessLess)
return FirstLessLess < Other.FirstLessLess;
+ if (IsAligned != Other.IsAligned)
+ return IsAligned;
if (BreakBeforeClosingBrace != Other.BreakBeforeClosingBrace)
return BreakBeforeClosingBrace;
if (QuestionColumn != Other.QuestionColumn)
@@ -366,6 +388,14 @@ struct ParenState {
return ContainsUnwrappedBuilder;
if (NestedBlockInlined != Other.NestedBlockInlined)
return NestedBlockInlined;
+ if (IsCSharpGenericTypeConstraint != Other.IsCSharpGenericTypeConstraint)
+ return IsCSharpGenericTypeConstraint;
+ if (IsChainedConditional != Other.IsChainedConditional)
+ return IsChainedConditional;
+ if (IsWrappedConditional != Other.IsWrappedConditional)
+ return IsWrappedConditional;
+ if (UnindentOperator != Other.UnindentOperator)
+ return UnindentOperator;
return false;
}
};
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index f12bca48c630..0d277a6464af 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -14,6 +14,7 @@
#include "clang/Format/Format.h"
#include "AffectedRangeManager.h"
+#include "BreakableToken.h"
#include "ContinuationIndenter.h"
#include "FormatInternal.h"
#include "FormatTokenLexer.h"
@@ -93,6 +94,7 @@ template <> struct ScalarEnumerationTraits<FormatStyle::UseTabStyle> {
IO.enumCase(Value, "ForIndentation", FormatStyle::UT_ForIndentation);
IO.enumCase(Value, "ForContinuationAndIndentation",
FormatStyle::UT_ForContinuationAndIndentation);
+ IO.enumCase(Value, "AlignWithSpaces", FormatStyle::UT_AlignWithSpaces);
}
};
@@ -157,6 +159,13 @@ template <> struct ScalarEnumerationTraits<FormatStyle::BinPackStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::TrailingCommaStyle> {
+ static void enumeration(IO &IO, FormatStyle::TrailingCommaStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::TCS_None);
+ IO.enumCase(Value, "Wrapped", FormatStyle::TCS_Wrapped);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::BinaryOperatorStyle> {
static void enumeration(IO &IO, FormatStyle::BinaryOperatorStyle &Value) {
IO.enumCase(Value, "All", FormatStyle::BOS_All);
@@ -187,11 +196,13 @@ struct ScalarEnumerationTraits<
static void
enumeration(IO &IO,
FormatStyle::BraceWrappingAfterControlStatementStyle &Value) {
- IO.enumCase(Value, "false", FormatStyle::BWACS_Never);
- IO.enumCase(Value, "true", FormatStyle::BWACS_Always);
IO.enumCase(Value, "Never", FormatStyle::BWACS_Never);
IO.enumCase(Value, "MultiLine", FormatStyle::BWACS_MultiLine);
IO.enumCase(Value, "Always", FormatStyle::BWACS_Always);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::BWACS_Never);
+ IO.enumCase(Value, "true", FormatStyle::BWACS_Always);
}
};
@@ -225,6 +236,17 @@ struct ScalarEnumerationTraits<FormatStyle::PPDirectiveIndentStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::IndentExternBlockStyle> {
+ static void enumeration(IO &IO, FormatStyle::IndentExternBlockStyle &Value) {
+ IO.enumCase(Value, "AfterExternBlock", FormatStyle::IEBS_AfterExternBlock);
+ IO.enumCase(Value, "Indent", FormatStyle::IEBS_Indent);
+ IO.enumCase(Value, "NoIndent", FormatStyle::IEBS_NoIndent);
+ IO.enumCase(Value, "true", FormatStyle::IEBS_Indent);
+ IO.enumCase(Value, "false", FormatStyle::IEBS_NoIndent);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::ReturnTypeBreakingStyle> {
static void enumeration(IO &IO, FormatStyle::ReturnTypeBreakingStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::RTBS_None);
@@ -300,6 +322,19 @@ struct ScalarEnumerationTraits<FormatStyle::EscapedNewlineAlignmentStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::OperandAlignmentStyle> {
+ static void enumeration(IO &IO, FormatStyle::OperandAlignmentStyle &Value) {
+ IO.enumCase(Value, "DontAlign", FormatStyle::OAS_DontAlign);
+ IO.enumCase(Value, "Align", FormatStyle::OAS_Align);
+ IO.enumCase(Value, "AlignAfterOperator",
+ FormatStyle::OAS_AlignAfterOperator);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::OAS_Align);
+ IO.enumCase(Value, "false", FormatStyle::OAS_DontAlign);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> {
static void enumeration(IO &IO, FormatStyle::PointerAlignmentStyle &Value) {
IO.enumCase(Value, "Middle", FormatStyle::PAS_Middle);
@@ -319,6 +354,8 @@ struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
IO.enumCase(Value, "ControlStatements",
FormatStyle::SBPO_ControlStatements);
+ IO.enumCase(Value, "ControlStatementsExceptForEachMacros",
+ FormatStyle::SBPO_ControlStatementsExceptForEachMacros);
IO.enumCase(Value, "NonEmptyParentheses",
FormatStyle::SBPO_NonEmptyParentheses);
IO.enumCase(Value, "Always", FormatStyle::SBPO_Always);
@@ -378,6 +415,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
IO.mapOptional("AlignConsecutiveAssignments",
Style.AlignConsecutiveAssignments);
+ IO.mapOptional("AlignConsecutiveBitFields",
+ Style.AlignConsecutiveBitFields);
IO.mapOptional("AlignConsecutiveDeclarations",
Style.AlignConsecutiveDeclarations);
IO.mapOptional("AlignEscapedNewlines", Style.AlignEscapedNewlines);
@@ -389,6 +428,8 @@ template <> struct MappingTraits<FormatStyle> {
Style.AllowAllConstructorInitializersOnNextLine);
IO.mapOptional("AllowAllParametersOfDeclarationOnNextLine",
Style.AllowAllParametersOfDeclarationOnNextLine);
+ IO.mapOptional("AllowShortEnumsOnASingleLine",
+ Style.AllowShortEnumsOnASingleLine);
IO.mapOptional("AllowShortBlocksOnASingleLine",
Style.AllowShortBlocksOnASingleLine);
IO.mapOptional("AllowShortCaseLabelsOnASingleLine",
@@ -480,11 +521,14 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("IncludeIsMainSourceRegex",
Style.IncludeStyle.IncludeIsMainSourceRegex);
IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
+ IO.mapOptional("IndentCaseBlocks", Style.IndentCaseBlocks);
IO.mapOptional("IndentGotoLabels", Style.IndentGotoLabels);
IO.mapOptional("IndentPPDirectives", Style.IndentPPDirectives);
+ IO.mapOptional("IndentExternBlock", Style.IndentExternBlock);
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
Style.IndentWrappedFunctionNames);
+ IO.mapOptional("InsertTrailingCommas", Style.InsertTrailingCommas);
IO.mapOptional("JavaImportGroups", Style.JavaImportGroups);
IO.mapOptional("JavaScriptQuotes", Style.JavaScriptQuotes);
IO.mapOptional("JavaScriptWrapImports", Style.JavaScriptWrapImports);
@@ -497,6 +541,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("NamespaceMacros", Style.NamespaceMacros);
IO.mapOptional("ObjCBinPackProtocolList", Style.ObjCBinPackProtocolList);
IO.mapOptional("ObjCBlockIndentWidth", Style.ObjCBlockIndentWidth);
+ IO.mapOptional("ObjCBreakBeforeNestedBlockParam",
+ Style.ObjCBreakBeforeNestedBlockParam);
IO.mapOptional("ObjCSpaceAfterProperty", Style.ObjCSpaceAfterProperty);
IO.mapOptional("ObjCSpaceBeforeProtocolList",
Style.ObjCSpaceBeforeProtocolList);
@@ -553,6 +599,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("TypenameMacros", Style.TypenameMacros);
IO.mapOptional("UseCRLF", Style.UseCRLF);
IO.mapOptional("UseTab", Style.UseTab);
+ IO.mapOptional("WhitespaceSensitiveMacros",
+ Style.WhitespaceSensitiveMacros);
}
};
@@ -570,6 +618,8 @@ template <> struct MappingTraits<FormatStyle::BraceWrappingFlags> {
IO.mapOptional("AfterExternBlock", Wrapping.AfterExternBlock);
IO.mapOptional("BeforeCatch", Wrapping.BeforeCatch);
IO.mapOptional("BeforeElse", Wrapping.BeforeElse);
+ IO.mapOptional("BeforeLambdaBody", Wrapping.BeforeLambdaBody);
+ IO.mapOptional("BeforeWhile", Wrapping.BeforeWhile);
IO.mapOptional("IndentBraces", Wrapping.IndentBraces);
IO.mapOptional("SplitEmptyFunction", Wrapping.SplitEmptyFunction);
IO.mapOptional("SplitEmptyRecord", Wrapping.SplitEmptyRecord);
@@ -643,6 +693,8 @@ std::string ParseErrorCategory::message(int EV) const {
return "Invalid argument";
case ParseError::Unsuitable:
return "Unsuitable";
+ case ParseError::BinPackTrailingCommaConflict:
+ return "trailing comma insertion cannot be used with bin packing";
}
llvm_unreachable("unexpected parse error");
}
@@ -651,12 +703,24 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
if (Style.BreakBeforeBraces == FormatStyle::BS_Custom)
return Style;
FormatStyle Expanded = Style;
- Expanded.BraceWrapping = {false, false, FormatStyle::BWACS_Never,
- false, false, false,
- false, false, false,
- false, false, false,
- false, true, true,
- true};
+ Expanded.BraceWrapping = {/*AfterCaseLabel=*/false,
+ /*AfterClass=*/false,
+ /*AfterControlStatement=*/FormatStyle::BWACS_Never,
+ /*AfterEnum=*/false,
+ /*AfterFunction=*/false,
+ /*AfterNamespace=*/false,
+ /*AfterObjCDeclaration=*/false,
+ /*AfterStruct=*/false,
+ /*AfterUnion=*/false,
+ /*AfterExternBlock=*/false,
+ /*BeforeCatch=*/false,
+ /*BeforeElse=*/false,
+ /*BeforeLambdaBody=*/false,
+ /*BeforeWhile=*/false,
+ /*IndentBraces=*/false,
+ /*SplitEmptyFunction=*/true,
+ /*SplitEmptyRecord=*/true,
+ /*SplitEmptyNamespace=*/true};
switch (Style.BreakBeforeBraces) {
case FormatStyle::BS_Linux:
Expanded.BraceWrapping.AfterClass = true;
@@ -670,6 +734,7 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterUnion = true;
Expanded.BraceWrapping.AfterExternBlock = true;
+ Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.SplitEmptyFunction = true;
Expanded.BraceWrapping.SplitEmptyRecord = false;
break;
@@ -689,6 +754,7 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterUnion = true;
Expanded.BraceWrapping.AfterExternBlock = true;
+ Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.BeforeCatch = true;
Expanded.BraceWrapping.BeforeElse = true;
break;
@@ -702,16 +768,32 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.AfterObjCDeclaration = true;
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterExternBlock = true;
+ Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.BeforeCatch = true;
Expanded.BraceWrapping.BeforeElse = true;
+ Expanded.BraceWrapping.BeforeLambdaBody = true;
break;
case FormatStyle::BS_GNU:
- Expanded.BraceWrapping = {true, true, FormatStyle::BWACS_Always,
- true, true, true,
- true, true, true,
- true, true, true,
- true, true, true,
- true};
+ Expanded.BraceWrapping = {
+ /*AfterCaseLabel=*/true,
+ /*AfterClass=*/true,
+ /*AfterControlStatement=*/FormatStyle::BWACS_Always,
+ /*AfterEnum=*/true,
+ /*AfterFunction=*/true,
+ /*AfterNamespace=*/true,
+ /*AfterObjCDeclaration=*/true,
+ /*AfterStruct=*/true,
+ /*AfterUnion=*/true,
+ /*AfterExternBlock=*/true,
+ /*BeforeCatch=*/true,
+ /*BeforeElse=*/true,
+ /*BeforeLambdaBody=*/false,
+ /*BeforeWhile=*/true,
+ /*IndentBraces=*/true,
+ /*SplitEmptyFunction=*/true,
+ /*SplitEmptyRecord=*/true,
+ /*SplitEmptyNamespace=*/true};
+ Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
break;
case FormatStyle::BS_WebKit:
Expanded.BraceWrapping.AfterFunction = true;
@@ -728,14 +810,16 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AccessModifierOffset = -2;
LLVMStyle.AlignEscapedNewlines = FormatStyle::ENAS_Right;
LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
- LLVMStyle.AlignOperands = true;
+ LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
LLVMStyle.AlignTrailingComments = true;
LLVMStyle.AlignConsecutiveAssignments = false;
+ LLVMStyle.AlignConsecutiveBitFields = false;
LLVMStyle.AlignConsecutiveDeclarations = false;
LLVMStyle.AlignConsecutiveMacros = false;
LLVMStyle.AllowAllArgumentsOnNextLine = true;
LLVMStyle.AllowAllConstructorInitializersOnNextLine = true;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
+ LLVMStyle.AllowShortEnumsOnASingleLine = true;
LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
LLVMStyle.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Never;
LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
@@ -751,12 +835,25 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
- LLVMStyle.BraceWrapping = {false, false, FormatStyle::BWACS_Never,
- false, false, false,
- false, false, false,
- false, false, false,
- false, true, true,
- true};
+ LLVMStyle.BraceWrapping = {/*AfterCaseLabel=*/false,
+ /*AfterClass=*/false,
+ /*AfterControlStatement=*/FormatStyle::BWACS_Never,
+ /*AfterEnum=*/false,
+ /*AfterFunction=*/false,
+ /*AfterNamespace=*/false,
+ /*AfterObjCDeclaration=*/false,
+ /*AfterStruct=*/false,
+ /*AfterUnion=*/false,
+ /*AfterExternBlock=*/false,
+ /*BeforeCatch=*/false,
+ /*BeforeElse=*/false,
+ /*BeforeLambdaBody=*/false,
+ /*BeforeWhile=*/false,
+ /*IndentBraces=*/false,
+ /*SplitEmptyFunction=*/true,
+ /*SplitEmptyRecord=*/true,
+ /*SplitEmptyNamespace=*/true};
+ LLVMStyle.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
LLVMStyle.BreakAfterJavaFieldAnnotations = false;
LLVMStyle.BreakConstructorInitializers = FormatStyle::BCIS_BeforeColon;
LLVMStyle.BreakInheritanceList = FormatStyle::BILS_BeforeColon;
@@ -782,10 +879,12 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.IncludeStyle.IncludeIsMainRegex = "(Test)?$";
LLVMStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Preserve;
LLVMStyle.IndentCaseLabels = false;
+ LLVMStyle.IndentCaseBlocks = false;
LLVMStyle.IndentGotoLabels = true;
LLVMStyle.IndentPPDirectives = FormatStyle::PPDIS_None;
LLVMStyle.IndentWrappedFunctionNames = false;
LLVMStyle.IndentWidth = 2;
+ LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None;
LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave;
LLVMStyle.JavaScriptWrapImports = true;
LLVMStyle.TabWidth = 8;
@@ -794,6 +893,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
LLVMStyle.ObjCBinPackProtocolList = FormatStyle::BPS_Auto;
LLVMStyle.ObjCBlockIndentWidth = 2;
+ LLVMStyle.ObjCBreakBeforeNestedBlockParam = true;
LLVMStyle.ObjCSpaceAfterProperty = false;
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
@@ -835,6 +935,9 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SortUsingDeclarations = true;
LLVMStyle.StatementMacros.push_back("Q_UNUSED");
LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("STRINGIZE");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("PP_STRINGIZE");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("BOOST_PP_STRINGIZE");
// Defaults that differ when not C++.
if (Language == FormatStyle::LK_TableGen) {
@@ -911,6 +1014,8 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
"PARSE_TEXT_PROTO",
"ParseTextOrDie",
"ParseTextProtoOrDie",
+ "ParseTestProto",
+ "ParsePartialTestProto",
},
/*CanonicalDelimiter=*/"",
/*BasedOnStyle=*/"google",
@@ -924,7 +1029,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
if (Language == FormatStyle::LK_Java) {
GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
- GoogleStyle.AlignOperands = false;
+ GoogleStyle.AlignOperands = FormatStyle::OAS_DontAlign;
GoogleStyle.AlignTrailingComments = false;
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
GoogleStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
@@ -935,13 +1040,18 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.SpacesBeforeTrailingComments = 1;
} else if (Language == FormatStyle::LK_JavaScript) {
GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
- GoogleStyle.AlignOperands = false;
+ GoogleStyle.AlignOperands = FormatStyle::OAS_DontAlign;
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
+ // TODO: still under discussion whether to switch to SLS_All.
+ GoogleStyle.AllowShortLambdasOnASingleLine = FormatStyle::SLS_Empty;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.BreakBeforeTernaryOperators = false;
- // taze:, triple slash directives (`/// <...`), @see, which is commonly
- // followed by overlong URLs.
- GoogleStyle.CommentPragmas = "(taze:|^/[ \t]*<|@see)";
+ // taze:, triple slash directives (`/// <...`), tslint:, and @see, which is
+ // commonly followed by overlong URLs.
+ GoogleStyle.CommentPragmas = "(taze:|^/[ \t]*<|tslint:|@see)";
+ // TODO: enable once decided, in particular re disabling bin packing.
+ // https://google.github.io/styleguide/jsguide.html#features-arrays-trailing-comma
+ // GoogleStyle.InsertTrailingCommas = FormatStyle::TCS_Wrapped;
GoogleStyle.MaxEmptyLinesToKeep = 3;
GoogleStyle.NamespaceIndentation = FormatStyle::NI_All;
GoogleStyle.SpacesInContainerLiterals = false;
@@ -966,6 +1076,12 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
// #imports, etc.)
GoogleStyle.IncludeStyle.IncludeBlocks =
tooling::IncludeStyle::IBS_Preserve;
+ } else if (Language == FormatStyle::LK_CSharp) {
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
+ GoogleStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
+ GoogleStyle.BreakStringLiterals = false;
+ GoogleStyle.ColumnLimit = 100;
+ GoogleStyle.NamespaceIndentation = FormatStyle::NI_All;
}
return GoogleStyle;
@@ -1061,7 +1177,7 @@ FormatStyle getWebKitStyle() {
FormatStyle Style = getLLVMStyle();
Style.AccessModifierOffset = -4;
Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
- Style.AlignOperands = false;
+ Style.AlignOperands = FormatStyle::OAS_DontAlign;
Style.AlignTrailingComments = false;
Style.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Empty;
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
@@ -1110,9 +1226,12 @@ FormatStyle getMicrosoftStyle(FormatStyle::LanguageKind Language) {
Style.BraceWrapping.AfterObjCDeclaration = true;
Style.BraceWrapping.AfterStruct = true;
Style.BraceWrapping.AfterExternBlock = true;
+ Style.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Style.BraceWrapping.BeforeCatch = true;
Style.BraceWrapping.BeforeElse = true;
+ Style.BraceWrapping.BeforeWhile = false;
Style.PenaltyReturnTypeOnItsOwnLine = 1000;
+ Style.AllowShortEnumsOnASingleLine = false;
Style.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
Style.AllowShortCaseLabelsOnASingleLine = false;
Style.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
@@ -1207,6 +1326,11 @@ std::error_code parseConfiguration(StringRef Text, FormatStyle *Style) {
StyleSet.Add(std::move(DefaultStyle));
}
*Style = *StyleSet.Get(Language);
+ if (Style->InsertTrailingCommas != FormatStyle::TCS_None &&
+ Style->BinPackArguments) {
+ // See comment on FormatStyle::TSC_Wrapped.
+ return make_error_code(ParseError::BinPackTrailingCommaConflict);
+ }
return make_error_code(ParseError::Success);
}
@@ -1462,6 +1586,75 @@ private:
FormattingAttemptStatus *Status;
};
+/// TrailingCommaInserter inserts trailing commas into container literals.
+/// E.g.:
+/// const x = [
+/// 1,
+/// ];
+/// TrailingCommaInserter runs after formatting. To avoid causing a required
+/// reformatting (and thus reflow), it never inserts a comma that'd exceed the
+/// ColumnLimit.
+///
+/// Because trailing commas disable binpacking of arrays, TrailingCommaInserter
+/// is conceptually incompatible with bin packing.
+class TrailingCommaInserter : public TokenAnalyzer {
+public:
+ TrailingCommaInserter(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ insertTrailingCommas(AnnotatedLines, Result);
+ return {Result, 0};
+ }
+
+private:
+ /// Inserts trailing commas in [] and {} initializers if they wrap over
+ /// multiple lines.
+ void insertTrailingCommas(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result) {
+ for (AnnotatedLine *Line : Lines) {
+ insertTrailingCommas(Line->Children, Result);
+ if (!Line->Affected)
+ continue;
+ for (FormatToken *FormatTok = Line->First; FormatTok;
+ FormatTok = FormatTok->Next) {
+ if (FormatTok->NewlinesBefore == 0)
+ continue;
+ FormatToken *Matching = FormatTok->MatchingParen;
+ if (!Matching || !FormatTok->getPreviousNonComment())
+ continue;
+ if (!(FormatTok->is(tok::r_square) &&
+ Matching->is(TT_ArrayInitializerLSquare)) &&
+ !(FormatTok->is(tok::r_brace) && Matching->is(TT_DictLiteral)))
+ continue;
+ FormatToken *Prev = FormatTok->getPreviousNonComment();
+ if (Prev->is(tok::comma) || Prev->is(tok::semi))
+ continue;
+ // getEndLoc is not reliably set during re-lexing, use text length
+ // instead.
+ SourceLocation Start =
+ Prev->Tok.getLocation().getLocWithOffset(Prev->TokenText.size());
+ // If inserting a comma would push the code over the column limit, skip
+ // this location - it'd introduce an unstable formatting due to the
+ // required reflow.
+ unsigned ColumnNumber =
+ Env.getSourceManager().getSpellingColumnNumber(Start);
+ if (ColumnNumber > Style.ColumnLimit)
+ continue;
+ // Comma insertions cannot conflict with each other, and this pass has a
+ // clean set of Replacements, so the operation below cannot fail.
+ cantFail(Result.add(
+ tooling::Replacement(Env.getSourceManager(), Start, 0, ",")));
+ }
+ }
+ }
+};
+
// This class clean up the erroneous/redundant code around the given ranges in
// file.
class Cleaner : public TokenAnalyzer {
@@ -1808,7 +2001,7 @@ private:
<< FormatTok->Tok.getLocation().printToString(
SourceManager)
<< " token: " << FormatTok->TokenText << " token type: "
- << getTokenTypeName(FormatTok->Type) << "\n");
+ << getTokenTypeName(FormatTok->getType()) << "\n");
return true;
}
if (guessIsObjC(SourceManager, Line->Children, Keywords))
@@ -1951,8 +2144,7 @@ static void sortCppIncludes(const FormatStyle &Style,
// enough as additional newlines might be added or removed across #include
// blocks. This we handle below by generating the updated #imclude blocks and
// comparing it to the original.
- if (Indices.size() == Includes.size() &&
- std::is_sorted(Indices.begin(), Indices.end()) &&
+ if (Indices.size() == Includes.size() && llvm::is_sorted(Indices) &&
Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve)
return;
@@ -1973,8 +2165,8 @@ static void sortCppIncludes(const FormatStyle &Style,
// If the #includes are out of order, we generate a single replacement fixing
// the entire range of blocks. Otherwise, no replacement is generated.
- if (replaceCRLF(result) ==
- replaceCRLF(Code.substr(IncludesBeginOffset, IncludesBlockSize)))
+ if (replaceCRLF(result) == replaceCRLF(std::string(Code.substr(
+ IncludesBeginOffset, IncludesBlockSize))))
return;
auto Err = Replaces.add(tooling::Replacement(
@@ -2142,8 +2334,8 @@ static void sortJavaImports(const FormatStyle &Style,
// If the imports are out of order, we generate a single replacement fixing
// the entire block. Otherwise, no replacement is generated.
- if (replaceCRLF(result) ==
- replaceCRLF(Code.substr(Imports.front().Offset, ImportsBlockSize)))
+ if (replaceCRLF(result) == replaceCRLF(std::string(Code.substr(
+ Imports.front().Offset, ImportsBlockSize))))
return;
auto Err = Replaces.add(tooling::Replacement(FileName, Imports.front().Offset,
@@ -2431,6 +2623,12 @@ reformat(const FormatStyle &Style, StringRef Code,
return Formatter(Env, Expanded, Status).process();
});
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ Style.InsertTrailingCommas == FormatStyle::TCS_Wrapped)
+ Passes.emplace_back([&](const Environment &Env) {
+ return TrailingCommaInserter(Env, Expanded).process();
+ });
+
auto Env =
std::make_unique<Environment>(Code, FileName, Ranges, FirstStartColumn,
NextStartColumn, LastStartColumn);
@@ -2518,7 +2716,8 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
LangOpts.CPlusPlus11 = LexingStd >= FormatStyle::LS_Cpp11;
LangOpts.CPlusPlus14 = LexingStd >= FormatStyle::LS_Cpp14;
LangOpts.CPlusPlus17 = LexingStd >= FormatStyle::LS_Cpp17;
- LangOpts.CPlusPlus2a = LexingStd >= FormatStyle::LS_Cpp20;
+ LangOpts.CPlusPlus20 = LexingStd >= FormatStyle::LS_Cpp20;
+ LangOpts.Char8 = LexingStd >= FormatStyle::LS_Cpp20;
LangOpts.LineComment = 1;
bool AlternativeOperators = Style.isCpp();
@@ -2532,7 +2731,7 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
const char *StyleOptionHelpDescription =
"Coding style, currently supports:\n"
- " LLVM, Google, Chromium, Mozilla, WebKit.\n"
+ " LLVM, GNU, Google, Chromium, Microsoft, Mozilla, WebKit.\n"
"Use -style=file to load style configuration from\n"
".clang-format file located in one of the parent\n"
"directories of the source file (or current\n"
diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp
index 90d09064bb43..7d792974cd57 100644
--- a/clang/lib/Format/FormatToken.cpp
+++ b/clang/lib/Format/FormatToken.cpp
@@ -50,6 +50,7 @@ bool FormatToken::isSimpleTypeSpecifier() const {
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_wchar_t:
@@ -84,8 +85,8 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
const FormatToken *LBrace =
State.NextToken->Previous->getPreviousNonComment();
if (!LBrace || !LBrace->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
- LBrace->BlockKind == BK_Block || LBrace->Type == TT_DictLiteral ||
- LBrace->Next->Type == TT_DesignatedInitializerPeriod)
+ LBrace->BlockKind == BK_Block || LBrace->getType() == TT_DictLiteral ||
+ LBrace->Next->getType() == TT_DesignatedInitializerPeriod)
return 0;
// Calculate the number of code points we have to format this list. As the
diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h
index e9cd327754ef..d4287f53fde3 100644
--- a/clang/lib/Format/FormatToken.h
+++ b/clang/lib/Format/FormatToken.h
@@ -54,6 +54,7 @@ namespace format {
TYPE(InheritanceComma) \
TYPE(InlineASMBrace) \
TYPE(InlineASMColon) \
+ TYPE(InlineASMSymbolicNameLSquare) \
TYPE(JavaAnnotation) \
TYPE(JsComputedPropertyName) \
TYPE(JsExponentiation) \
@@ -101,10 +102,20 @@ namespace format {
TYPE(TrailingUnaryOperator) \
TYPE(TypenameMacro) \
TYPE(UnaryOperator) \
+ TYPE(UntouchableMacroFunc) \
TYPE(CSharpStringLiteral) \
+ TYPE(CSharpNamedArgumentColon) \
+ TYPE(CSharpNullable) \
TYPE(CSharpNullCoalescing) \
+ TYPE(CSharpNullConditional) \
+ TYPE(CSharpNullConditionalLSquare) \
+ TYPE(CSharpGenericTypeConstraint) \
+ TYPE(CSharpGenericTypeConstraintColon) \
+ TYPE(CSharpGenericTypeConstraintComma) \
TYPE(Unknown)
+/// Determines the semantic type of a syntactic token, e.g. whether "<" is a
+/// template opener or binary operator.
enum TokenType {
#define TYPE(X) TT_##X,
LIST_TOKEN_TYPES
@@ -172,6 +183,12 @@ struct FormatToken {
/// before the token.
bool MustBreakBefore = false;
+ /// Whether to not align across this token
+ ///
+ /// This happens for example when a preprocessor directive ended directly
+ /// before the token, but very rarely otherwise.
+ bool MustBreakAlignBefore = false;
+
/// The raw text of the token.
///
/// Contains the raw token text without leading whitespace and without leading
@@ -184,7 +201,10 @@ struct FormatToken {
/// Contains the kind of block if this token is a brace.
BraceBlockKind BlockKind = BK_Unknown;
- TokenType Type = TT_Unknown;
+ /// Returns the token's type, e.g. whether "<" is a template opener or
+ /// binary operator.
+ TokenType getType() const { return Type; }
+ void setType(TokenType T) { Type = T; }
/// The number of spaces that should be inserted before this token.
unsigned SpacesRequiredBefore = 0;
@@ -504,6 +524,9 @@ struct FormatToken {
/// Returns \c true if this tokens starts a block-type list, i.e. a
/// list that should be indented with a block indent.
bool opensBlockOrBlockTypeList(const FormatStyle &Style) const {
+ // C# Does not indent object initialisers as continuations.
+ if (is(tok::l_brace) && BlockKind == BK_BracedInit && Style.isCSharp())
+ return true;
if (is(TT_TemplateString) && opensScope())
return true;
return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
@@ -579,6 +602,8 @@ private:
return Previous->endsSequenceInternal(K1, Tokens...);
return is(K1) && Previous && Previous->endsSequenceInternal(Tokens...);
}
+
+ TokenType Type = TT_Unknown;
};
class ContinuationIndenter;
@@ -770,6 +795,8 @@ struct AdditionalKeywords {
kw_unchecked = &IdentTable.get("unchecked");
kw_unsafe = &IdentTable.get("unsafe");
kw_ushort = &IdentTable.get("ushort");
+ kw_when = &IdentTable.get("when");
+ kw_where = &IdentTable.get("where");
// Keep this at the end of the constructor to make sure everything here
// is
@@ -786,7 +813,8 @@ struct AdditionalKeywords {
kw_fixed, kw_foreach, kw_implicit, kw_in, kw_interface, kw_internal,
kw_is, kw_lock, kw_null, kw_object, kw_out, kw_override, kw_params,
kw_readonly, kw_ref, kw_string, kw_stackalloc, kw_sbyte, kw_sealed,
- kw_uint, kw_ulong, kw_unchecked, kw_unsafe, kw_ushort,
+ kw_uint, kw_ulong, kw_unchecked, kw_unsafe, kw_ushort, kw_when,
+ kw_where,
// Keywords from the JavaScript section.
kw_as, kw_async, kw_await, kw_declare, kw_finally, kw_from,
kw_function, kw_get, kw_import, kw_is, kw_let, kw_module, kw_readonly,
@@ -890,13 +918,77 @@ struct AdditionalKeywords {
IdentifierInfo *kw_unchecked;
IdentifierInfo *kw_unsafe;
IdentifierInfo *kw_ushort;
+ IdentifierInfo *kw_when;
+ IdentifierInfo *kw_where;
/// Returns \c true if \p Tok is a true JavaScript identifier, returns
/// \c false if it is a keyword or a pseudo keyword.
- bool IsJavaScriptIdentifier(const FormatToken &Tok) const {
- return Tok.is(tok::identifier) &&
- JsExtraKeywords.find(Tok.Tok.getIdentifierInfo()) ==
- JsExtraKeywords.end();
+ /// If \c AcceptIdentifierName is true, returns true not only for keywords,
+ // but also for IdentifierName tokens (aka pseudo-keywords), such as
+ // ``yield``.
+ bool IsJavaScriptIdentifier(const FormatToken &Tok,
+ bool AcceptIdentifierName = true) const {
+ // Based on the list of JavaScript & TypeScript keywords here:
+ // https://github.com/microsoft/TypeScript/blob/master/src/compiler/scanner.ts#L74
+ switch (Tok.Tok.getKind()) {
+ case tok::kw_break:
+ case tok::kw_case:
+ case tok::kw_catch:
+ case tok::kw_class:
+ case tok::kw_continue:
+ case tok::kw_const:
+ case tok::kw_default:
+ case tok::kw_delete:
+ case tok::kw_do:
+ case tok::kw_else:
+ case tok::kw_enum:
+ case tok::kw_export:
+ case tok::kw_false:
+ case tok::kw_for:
+ case tok::kw_if:
+ case tok::kw_import:
+ case tok::kw_module:
+ case tok::kw_new:
+ case tok::kw_private:
+ case tok::kw_protected:
+ case tok::kw_public:
+ case tok::kw_return:
+ case tok::kw_static:
+ case tok::kw_switch:
+ case tok::kw_this:
+ case tok::kw_throw:
+ case tok::kw_true:
+ case tok::kw_try:
+ case tok::kw_typeof:
+ case tok::kw_void:
+ case tok::kw_while:
+ // These are JS keywords that are lexed by LLVM/clang as keywords.
+ return false;
+ case tok::identifier: {
+ // For identifiers, make sure they are true identifiers, excluding the
+ // JavaScript pseudo-keywords (not lexed by LLVM/clang as keywords).
+ bool IsPseudoKeyword =
+ JsExtraKeywords.find(Tok.Tok.getIdentifierInfo()) !=
+ JsExtraKeywords.end();
+ return AcceptIdentifierName || !IsPseudoKeyword;
+ }
+ default:
+ // Other keywords are handled in the switch below, to avoid problems due
+ // to duplicate case labels when using the #include trick.
+ break;
+ }
+
+ switch (Tok.Tok.getKind()) {
+ // Handle C++ keywords not included above: these are all JS identifiers.
+#define KEYWORD(X, Y) case tok::kw_##X:
+#include "clang/Basic/TokenKinds.def"
+ // #undef KEYWORD is not needed -- it's #undef-ed at the end of
+ // TokenKinds.def
+ return true;
+ default:
+ // All other tokens (punctuation etc) are not JS identifiers.
+ return false;
+ }
}
/// Returns \c true if \p Tok is a C# keyword, returns
diff --git a/clang/lib/Format/FormatTokenLexer.cpp b/clang/lib/Format/FormatTokenLexer.cpp
index ef20ba884fb3..1fd153d1112e 100644
--- a/clang/lib/Format/FormatTokenLexer.cpp
+++ b/clang/lib/Format/FormatTokenLexer.cpp
@@ -22,13 +22,15 @@
namespace clang {
namespace format {
-FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
- unsigned Column, const FormatStyle &Style,
- encoding::Encoding Encoding)
+FormatTokenLexer::FormatTokenLexer(
+ const SourceManager &SourceMgr, FileID ID, unsigned Column,
+ const FormatStyle &Style, encoding::Encoding Encoding,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable)
: FormatTok(nullptr), IsFirstToken(true), StateStack({LexerState::NORMAL}),
Column(Column), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID),
- Style(Style), IdentTable(getFormattingLangOpts(Style)),
- Keywords(IdentTable), Encoding(Encoding), FirstInLineIndex(0),
+ Style(Style), IdentTable(IdentTable), Keywords(IdentTable),
+ Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
MacroBlockEndRegex(Style.MacroBlockEnd) {
Lex.reset(new Lexer(ID, SourceMgr.getBuffer(ID), SourceMgr,
@@ -43,6 +45,11 @@ FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
Macros.insert({&IdentTable.get(TypenameMacro), TT_TypenameMacro});
for (const std::string &NamespaceMacro : Style.NamespaceMacros)
Macros.insert({&IdentTable.get(NamespaceMacro), TT_NamespaceMacro});
+ for (const std::string &WhitespaceSensitiveMacro :
+ Style.WhitespaceSensitiveMacros) {
+ Macros.insert(
+ {&IdentTable.get(WhitespaceSensitiveMacro), TT_UntouchableMacroFunc});
+ }
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@@ -57,6 +64,10 @@ ArrayRef<FormatToken *> FormatTokenLexer::lex() {
if (Style.Language == FormatStyle::LK_TextProto)
tryParsePythonComment();
tryMergePreviousTokens();
+ if (Style.isCSharp())
+ // This needs to come after tokens have been merged so that C#
+ // string literals are correctly identified.
+ handleCSharpVerbatimAndInterpolatedStrings();
if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
FirstInLineIndex = Tokens.size() - 1;
} while (Tokens.back()->Tok.isNot(tok::eof));
@@ -70,15 +81,19 @@ void FormatTokenLexer::tryMergePreviousTokens() {
return;
if (tryMergeLessLess())
return;
+ if (tryMergeForEach())
+ return;
+ if (Style.isCpp() && tryTransformTryUsageForC())
+ return;
if (Style.isCSharp()) {
if (tryMergeCSharpKeywordVariables())
return;
- if (tryMergeCSharpVerbatimStringLiteral())
+ if (tryMergeCSharpStringLiteral())
return;
if (tryMergeCSharpDoubleQuestion())
return;
- if (tryMergeCSharpNullConditionals())
+ if (tryMergeCSharpNullConditional())
return;
if (tryTransformCSharpForEach())
return;
@@ -120,8 +135,11 @@ void FormatTokenLexer::tryMergePreviousTokens() {
Tokens.back()->Tok.setKind(tok::starequal);
return;
}
- if (tryMergeTokens(JSNullishOperator, TT_JsNullishCoalescingOperator))
+ if (tryMergeTokens(JSNullishOperator, TT_JsNullishCoalescingOperator)) {
+ // Treat like the "||" operator (as opposed to the ternary ?).
+ Tokens.back()->Tok.setKind(tok::pipepipe);
return;
+ }
if (tryMergeTokens(JSNullPropagatingOperator,
TT_JsNullPropagatingOperator)) {
// Treat like a regular "." access.
@@ -151,7 +169,7 @@ bool FormatTokenLexer::tryMergeNSStringLiteral() {
At->TokenText = StringRef(At->TokenText.begin(),
String->TokenText.end() - At->TokenText.begin());
At->ColumnWidth += String->ColumnWidth;
- At->Type = TT_ObjCStringLiteral;
+ At->setType(TT_ObjCStringLiteral);
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -170,7 +188,7 @@ bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
StringRef(Hash->TokenText.begin(),
Identifier->TokenText.end() - Hash->TokenText.begin());
Hash->ColumnWidth += Identifier->ColumnWidth;
- Hash->Type = TT_JsPrivateIdentifier;
+ Hash->setType(TT_JsPrivateIdentifier);
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -178,18 +196,71 @@ bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
// Search for verbatim or interpolated string literals @"ABC" or
// $"aaaaa{abc}aaaaa" i and mark the token as TT_CSharpStringLiteral, and to
// prevent splitting of @, $ and ".
-bool FormatTokenLexer::tryMergeCSharpVerbatimStringLiteral() {
+// Merging of multiline verbatim strings with embedded '"' is handled in
+// handleCSharpVerbatimAndInterpolatedStrings with lower-level lexing.
+bool FormatTokenLexer::tryMergeCSharpStringLiteral() {
if (Tokens.size() < 2)
return false;
- auto &At = *(Tokens.end() - 2);
+
+ // Interpolated strings could contain { } with " characters inside.
+ // $"{x ?? "null"}"
+ // should not be split into $"{x ?? ", null, "}" but should treated as a
+ // single string-literal.
+ //
+ // We opt not to try and format expressions inside {} within a C#
+ // interpolated string. Formatting expressions within an interpolated string
+ // would require similar work as that done for JavaScript template strings
+ // in `handleTemplateStrings()`.
+ auto &CSharpInterpolatedString = *(Tokens.end() - 2);
+ if (CSharpInterpolatedString->getType() == TT_CSharpStringLiteral &&
+ (CSharpInterpolatedString->TokenText.startswith(R"($")") ||
+ CSharpInterpolatedString->TokenText.startswith(R"($@")"))) {
+ int UnmatchedOpeningBraceCount = 0;
+
+ auto TokenTextSize = CSharpInterpolatedString->TokenText.size();
+ for (size_t Index = 0; Index < TokenTextSize; ++Index) {
+ char C = CSharpInterpolatedString->TokenText[Index];
+ if (C == '{') {
+ // "{{" inside an interpolated string is an escaped '{' so skip it.
+ if (Index + 1 < TokenTextSize &&
+ CSharpInterpolatedString->TokenText[Index + 1] == '{') {
+ ++Index;
+ continue;
+ }
+ ++UnmatchedOpeningBraceCount;
+ } else if (C == '}') {
+ // "}}" inside an interpolated string is an escaped '}' so skip it.
+ if (Index + 1 < TokenTextSize &&
+ CSharpInterpolatedString->TokenText[Index + 1] == '}') {
+ ++Index;
+ continue;
+ }
+ --UnmatchedOpeningBraceCount;
+ }
+ }
+
+ if (UnmatchedOpeningBraceCount > 0) {
+ auto &NextToken = *(Tokens.end() - 1);
+ CSharpInterpolatedString->TokenText =
+ StringRef(CSharpInterpolatedString->TokenText.begin(),
+ NextToken->TokenText.end() -
+ CSharpInterpolatedString->TokenText.begin());
+ CSharpInterpolatedString->ColumnWidth += NextToken->ColumnWidth;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+ }
+ }
+
+ // Look for @"aaaaaa" or $"aaaaaa".
auto &String = *(Tokens.end() - 1);
+ if (!String->is(tok::string_literal))
+ return false;
- // Look for $"aaaaaa" @"aaaaaa".
- if (!(At->is(tok::at) || At->TokenText == "$") ||
- !String->is(tok::string_literal))
+ auto &At = *(Tokens.end() - 2);
+ if (!(At->is(tok::at) || At->TokenText == "$"))
return false;
- if (Tokens.size() >= 2 && At->is(tok::at)) {
+ if (Tokens.size() > 2 && At->is(tok::at)) {
auto &Dollar = *(Tokens.end() - 3);
if (Dollar->TokenText == "$") {
// This looks like $@"aaaaa" so we need to combine all 3 tokens.
@@ -198,7 +269,7 @@ bool FormatTokenLexer::tryMergeCSharpVerbatimStringLiteral() {
StringRef(Dollar->TokenText.begin(),
String->TokenText.end() - Dollar->TokenText.begin());
Dollar->ColumnWidth += (At->ColumnWidth + String->ColumnWidth);
- Dollar->Type = TT_CSharpStringLiteral;
+ Dollar->setType(TT_CSharpStringLiteral);
Tokens.erase(Tokens.end() - 2);
Tokens.erase(Tokens.end() - 1);
return true;
@@ -210,11 +281,18 @@ bool FormatTokenLexer::tryMergeCSharpVerbatimStringLiteral() {
At->TokenText = StringRef(At->TokenText.begin(),
String->TokenText.end() - At->TokenText.begin());
At->ColumnWidth += String->ColumnWidth;
- At->Type = TT_CSharpStringLiteral;
+ At->setType(TT_CSharpStringLiteral);
Tokens.erase(Tokens.end() - 1);
return true;
}
+// Valid C# attribute targets:
+// https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/concepts/attributes/#attribute-targets
+const llvm::StringSet<> FormatTokenLexer::CSharpAttributeTargets = {
+ "assembly", "module", "field", "event", "method",
+ "param", "property", "return", "type",
+};
+
bool FormatTokenLexer::tryMergeCSharpDoubleQuestion() {
if (Tokens.size() < 2)
return false;
@@ -222,12 +300,38 @@ bool FormatTokenLexer::tryMergeCSharpDoubleQuestion() {
auto &SecondQuestion = *(Tokens.end() - 1);
if (!FirstQuestion->is(tok::question) || !SecondQuestion->is(tok::question))
return false;
- FirstQuestion->Tok.setKind(tok::question);
+ FirstQuestion->Tok.setKind(tok::question); // no '??' in clang tokens.
FirstQuestion->TokenText = StringRef(FirstQuestion->TokenText.begin(),
SecondQuestion->TokenText.end() -
FirstQuestion->TokenText.begin());
FirstQuestion->ColumnWidth += SecondQuestion->ColumnWidth;
- FirstQuestion->Type = TT_CSharpNullCoalescing;
+ FirstQuestion->setType(TT_CSharpNullCoalescing);
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
+// Merge '?[' and '?.' pairs into single tokens.
+bool FormatTokenLexer::tryMergeCSharpNullConditional() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &Question = *(Tokens.end() - 2);
+ auto &PeriodOrLSquare = *(Tokens.end() - 1);
+ if (!Question->is(tok::question) ||
+ !PeriodOrLSquare->isOneOf(tok::l_square, tok::period))
+ return false;
+ Question->TokenText =
+ StringRef(Question->TokenText.begin(),
+ PeriodOrLSquare->TokenText.end() - Question->TokenText.begin());
+ Question->ColumnWidth += PeriodOrLSquare->ColumnWidth;
+
+ if (PeriodOrLSquare->is(tok::l_square)) {
+ Question->Tok.setKind(tok::question); // no '?[' in clang tokens.
+ Question->setType(TT_CSharpNullConditionalLSquare);
+ } else {
+ Question->Tok.setKind(tok::question); // no '?.' in clang tokens.
+ Question->setType(TT_CSharpNullConditional);
+ }
+
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -246,24 +350,7 @@ bool FormatTokenLexer::tryMergeCSharpKeywordVariables() {
At->TokenText = StringRef(At->TokenText.begin(),
Keyword->TokenText.end() - At->TokenText.begin());
At->ColumnWidth += Keyword->ColumnWidth;
- At->Type = Keyword->Type;
- Tokens.erase(Tokens.end() - 1);
- return true;
-}
-
-// In C# merge the Identifier and the ? together e.g. arg?.
-bool FormatTokenLexer::tryMergeCSharpNullConditionals() {
- if (Tokens.size() < 2)
- return false;
- auto &Identifier = *(Tokens.end() - 2);
- auto &Question = *(Tokens.end() - 1);
- if (!Identifier->isOneOf(tok::r_square, tok::identifier) ||
- !Question->is(tok::question))
- return false;
- Identifier->TokenText =
- StringRef(Identifier->TokenText.begin(),
- Question->TokenText.end() - Identifier->TokenText.begin());
- Identifier->ColumnWidth += Question->ColumnWidth;
+ At->setType(Keyword->getType());
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -278,11 +365,53 @@ bool FormatTokenLexer::tryTransformCSharpForEach() {
if (Identifier->TokenText != "foreach")
return false;
- Identifier->Type = TT_ForEachMacro;
+ Identifier->setType(TT_ForEachMacro);
Identifier->Tok.setKind(tok::kw_for);
return true;
}
+bool FormatTokenLexer::tryMergeForEach() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &For = *(Tokens.end() - 2);
+ auto &Each = *(Tokens.end() - 1);
+ if (!For->is(tok::kw_for))
+ return false;
+ if (!Each->is(tok::identifier))
+ return false;
+ if (Each->TokenText != "each")
+ return false;
+
+ For->setType(TT_ForEachMacro);
+ For->Tok.setKind(tok::kw_for);
+
+ For->TokenText = StringRef(For->TokenText.begin(),
+ Each->TokenText.end() - For->TokenText.begin());
+ For->ColumnWidth += Each->ColumnWidth;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
+bool FormatTokenLexer::tryTransformTryUsageForC() {
+ if (Tokens.size() < 2)
+ return false;
+ auto &Try = *(Tokens.end() - 2);
+ if (!Try->is(tok::kw_try))
+ return false;
+ auto &Next = *(Tokens.end() - 1);
+ if (Next->isOneOf(tok::l_brace, tok::colon))
+ return false;
+
+ if (Tokens.size() > 2) {
+ auto &At = *(Tokens.end() - 3);
+ if (At->is(tok::at))
+ return false;
+ }
+
+ Try->Tok.setKind(tok::identifier);
+ return true;
+}
+
bool FormatTokenLexer::tryMergeLessLess() {
// Merge X,less,less,Y into X,lessless,Y unless X or Y is less.
if (Tokens.size() < 3)
@@ -329,7 +458,7 @@ bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
First[0]->TokenText = StringRef(First[0]->TokenText.data(),
First[0]->TokenText.size() + AddLength);
First[0]->ColumnWidth += AddLength;
- First[0]->Type = NewType;
+ First[0]->setType(NewType);
return true;
}
@@ -418,7 +547,7 @@ void FormatTokenLexer::tryParseJSRegexLiteral() {
}
}
- RegexToken->Type = TT_RegexLiteral;
+ RegexToken->setType(TT_RegexLiteral);
// Treat regex literals like other string_literals.
RegexToken->Tok.setKind(tok::string_literal);
RegexToken->TokenText = StringRef(RegexBegin, Offset - RegexBegin);
@@ -427,6 +556,68 @@ void FormatTokenLexer::tryParseJSRegexLiteral() {
resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
}
+void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
+ FormatToken *CSharpStringLiteral = Tokens.back();
+
+ if (CSharpStringLiteral->getType() != TT_CSharpStringLiteral)
+ return;
+
+ // Deal with multiline strings.
+ if (!(CSharpStringLiteral->TokenText.startswith(R"(@")") ||
+ CSharpStringLiteral->TokenText.startswith(R"($@")")))
+ return;
+
+ const char *StrBegin =
+ Lex->getBufferLocation() - CSharpStringLiteral->TokenText.size();
+ const char *Offset = StrBegin;
+ if (CSharpStringLiteral->TokenText.startswith(R"(@")"))
+ Offset += 2;
+ else // CSharpStringLiteral->TokenText.startswith(R"($@")")
+ Offset += 3;
+
+ // Look for a terminating '"' in the current file buffer.
+ // Make no effort to format code within an interpolated or verbatim string.
+ for (; Offset != Lex->getBuffer().end(); ++Offset) {
+ if (Offset[0] == '"') {
+ // "" within a verbatim string is an escaped double quote: skip it.
+ if (Offset + 1 < Lex->getBuffer().end() && Offset[1] == '"')
+ ++Offset;
+ else
+ break;
+ }
+ }
+
+ // Make no attempt to format code properly if a verbatim string is
+ // unterminated.
+ if (Offset == Lex->getBuffer().end())
+ return;
+
+ StringRef LiteralText(StrBegin, Offset - StrBegin + 1);
+ CSharpStringLiteral->TokenText = LiteralText;
+
+ // Adjust width for potentially multiline string literals.
+ size_t FirstBreak = LiteralText.find('\n');
+ StringRef FirstLineText = FirstBreak == StringRef::npos
+ ? LiteralText
+ : LiteralText.substr(0, FirstBreak);
+ CSharpStringLiteral->ColumnWidth = encoding::columnWidthWithTabs(
+ FirstLineText, CSharpStringLiteral->OriginalColumn, Style.TabWidth,
+ Encoding);
+ size_t LastBreak = LiteralText.rfind('\n');
+ if (LastBreak != StringRef::npos) {
+ CSharpStringLiteral->IsMultiline = true;
+ unsigned StartColumn = 0;
+ CSharpStringLiteral->LastLineColumnWidth = encoding::columnWidthWithTabs(
+ LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
+ Style.TabWidth, Encoding);
+ }
+
+ SourceLocation loc = Offset < Lex->getBuffer().end()
+ ? Lex->getSourceLocation(Offset + 1)
+ : SourceMgr.getLocForEndOfFile(ID);
+ resetLexer(SourceMgr.getFileOffset(loc));
+}
+
void FormatTokenLexer::handleTemplateStrings() {
FormatToken *BacktickToken = Tokens.back();
@@ -468,7 +659,7 @@ void FormatTokenLexer::handleTemplateStrings() {
}
StringRef LiteralText(TmplBegin, Offset - TmplBegin + 1);
- BacktickToken->Type = TT_TemplateString;
+ BacktickToken->setType(TT_TemplateString);
BacktickToken->Tok.setKind(tok::string_literal);
BacktickToken->TokenText = LiteralText;
@@ -506,7 +697,7 @@ void FormatTokenLexer::tryParsePythonComment() {
if (To == StringRef::npos)
To = Lex->getBuffer().size();
size_t Len = To - From;
- HashToken->Type = TT_LineComment;
+ HashToken->setType(TT_LineComment);
HashToken->Tok.setKind(tok::comment);
HashToken->TokenText = Lex->getBuffer().substr(From, Len);
SourceLocation Loc = To < Lex->getBuffer().size()
@@ -604,7 +795,7 @@ bool FormatTokenLexer::tryMergeConflictMarkers() {
// We do not need to build a complete token here, as we will skip it
// during parsing anyway (as we must not touch whitespace around conflict
// markers).
- Tokens.back()->Type = Type;
+ Tokens.back()->setType(Type);
Tokens.back()->Tok.setKind(tok::kw___unknown_anytype);
Tokens.push_back(Next);
@@ -691,13 +882,13 @@ FormatToken *FormatTokenLexer::getNextToken() {
break;
case '\\':
if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n'))
- FormatTok->Type = TT_ImplicitStringLiteral;
+ FormatTok->setType(TT_ImplicitStringLiteral);
break;
default:
- FormatTok->Type = TT_ImplicitStringLiteral;
+ FormatTok->setType(TT_ImplicitStringLiteral);
break;
}
- if (FormatTok->Type == TT_ImplicitStringLiteral)
+ if (FormatTok->getType() == TT_ImplicitStringLiteral)
break;
}
@@ -825,12 +1016,12 @@ FormatToken *FormatTokenLexer::getNextToken() {
Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_define) &&
it != Macros.end()) {
- FormatTok->Type = it->second;
+ FormatTok->setType(it->second);
} else if (FormatTok->is(tok::identifier)) {
if (MacroBlockBeginRegex.match(Text)) {
- FormatTok->Type = TT_MacroBlockBegin;
+ FormatTok->setType(TT_MacroBlockBegin);
} else if (MacroBlockEndRegex.match(Text)) {
- FormatTok->Type = TT_MacroBlockEnd;
+ FormatTok->setType(TT_MacroBlockEnd);
}
}
}
diff --git a/clang/lib/Format/FormatTokenLexer.h b/clang/lib/Format/FormatTokenLexer.h
index 611211be055a..6b08677e3369 100644
--- a/clang/lib/Format/FormatTokenLexer.h
+++ b/clang/lib/Format/FormatTokenLexer.h
@@ -21,6 +21,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Regex.h"
#include <stack>
@@ -37,7 +38,9 @@ enum LexerState {
class FormatTokenLexer {
public:
FormatTokenLexer(const SourceManager &SourceMgr, FileID ID, unsigned Column,
- const FormatStyle &Style, encoding::Encoding Encoding);
+ const FormatStyle &Style, encoding::Encoding Encoding,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable);
ArrayRef<FormatToken *> lex();
@@ -49,11 +52,13 @@ private:
bool tryMergeLessLess();
bool tryMergeNSStringLiteral();
bool tryMergeJSPrivateIdentifier();
- bool tryMergeCSharpVerbatimStringLiteral();
+ bool tryMergeCSharpStringLiteral();
bool tryMergeCSharpKeywordVariables();
- bool tryMergeCSharpNullConditionals();
bool tryMergeCSharpDoubleQuestion();
+ bool tryMergeCSharpNullConditional();
bool tryTransformCSharpForEach();
+ bool tryMergeForEach();
+ bool tryTransformTryUsageForC();
bool tryMergeTokens(ArrayRef<tok::TokenKind> Kinds, TokenType NewType);
@@ -79,6 +84,8 @@ private:
// nested template parts by balancing curly braces.
void handleTemplateStrings();
+ void handleCSharpVerbatimAndInterpolatedStrings();
+
void tryParsePythonComment();
bool tryMerge_TMacro();
@@ -98,10 +105,10 @@ private:
const SourceManager &SourceMgr;
FileID ID;
const FormatStyle &Style;
- IdentifierTable IdentTable;
+ IdentifierTable &IdentTable;
AdditionalKeywords Keywords;
encoding::Encoding Encoding;
- llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator;
// Index (in 'Tokens') of the last token that starts a new line.
unsigned FirstInLineIndex;
SmallVector<FormatToken *, 16> Tokens;
@@ -113,6 +120,9 @@ private:
llvm::Regex MacroBlockBeginRegex;
llvm::Regex MacroBlockEndRegex;
+ // Targets that may appear inside a C# attribute.
+ static const llvm::StringSet<> CSharpAttributeTargets;
+
void readRawToken(FormatToken &Tok);
void resetLexer(unsigned Offset);
diff --git a/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/clang/lib/Format/NamespaceEndCommentsFixer.cpp
index 20b424f86077..97de45bd1965 100644
--- a/clang/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/clang/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -121,7 +121,25 @@ bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName,
// Named namespace comments must not mention anonymous namespace.
if (!NamespaceName.empty() && !AnonymousInComment.empty())
return false;
- return NamespaceNameInComment == NamespaceName;
+ if (NamespaceNameInComment == NamespaceName)
+ return true;
+
+ // Has namespace comment flowed onto the next line.
+ // } // namespace
+ // // verylongnamespacenamethatdidnotfitonthepreviouscommentline
+ if (!(Comment->Next && Comment->Next->is(TT_LineComment)))
+ return false;
+
+ static const llvm::Regex CommentPattern = llvm::Regex(
+ "^/[/*] *( +([a-zA-Z0-9:_]+))?\\.? *(\\*/)?$", llvm::Regex::IgnoreCase);
+
+ // Pull out just the comment text.
+ if (!CommentPattern.match(Comment->Next->TokenText, &Groups)) {
+ return false;
+ }
+ NamespaceNameInComment = Groups.size() > 2 ? Groups[2] : "";
+
+ return (NamespaceNameInComment == NamespaceName);
}
void addEndComment(const FormatToken *RBraceTok, StringRef EndCommentText,
@@ -187,6 +205,23 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
const SourceManager &SourceMgr = Env.getSourceManager();
AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
tooling::Replacements Fixes;
+
+ // Spin through the lines and ensure we have balanced braces.
+ int Braces = 0;
+ for (size_t I = 0, E = AnnotatedLines.size(); I != E; ++I) {
+ FormatToken *Tok = AnnotatedLines[I]->First;
+ while (Tok) {
+ Braces += Tok->is(tok::l_brace) ? 1 : Tok->is(tok::r_brace) ? -1 : 0;
+ Tok = Tok->Next;
+ }
+ }
+ // Don't attempt to comment unbalanced braces or this can
+ // lead to comments being placed on the closing brace which isn't
+ // the matching brace of the namespace. (occurs during incomplete editing).
+ if (Braces != 0) {
+ return {Fixes, 0};
+ }
+
std::string AllNamespaceNames = "";
size_t StartLineIndex = SIZE_MAX;
StringRef NamespaceTokenText;
diff --git a/clang/lib/Format/SortJavaScriptImports.cpp b/clang/lib/Format/SortJavaScriptImports.cpp
index 5be243f4c07a..db2b65b08898 100644
--- a/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/clang/lib/Format/SortJavaScriptImports.cpp
@@ -144,7 +144,7 @@ public:
llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
return References[LHSI] < References[RHSI];
});
- bool ReferencesInOrder = std::is_sorted(Indices.begin(), Indices.end());
+ bool ReferencesInOrder = llvm::is_sorted(Indices);
std::string ReferencesText;
bool SymbolsInOrder = true;
diff --git a/clang/lib/Format/TokenAnalyzer.cpp b/clang/lib/Format/TokenAnalyzer.cpp
index eb98a205d526..f1459a808ff8 100644
--- a/clang/lib/Format/TokenAnalyzer.cpp
+++ b/clang/lib/Format/TokenAnalyzer.cpp
@@ -64,11 +64,16 @@ TokenAnalyzer::TokenAnalyzer(const Environment &Env, const FormatStyle &Style)
std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
tooling::Replacements Result;
- FormatTokenLexer Tokens(Env.getSourceManager(), Env.getFileID(),
- Env.getFirstStartColumn(), Style, Encoding);
+ llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
+ IdentifierTable IdentTable(getFormattingLangOpts(Style));
+ FormatTokenLexer Lex(Env.getSourceManager(), Env.getFileID(),
+ Env.getFirstStartColumn(), Style, Encoding, Allocator,
- UnwrappedLineParser Parser(Style, Tokens.getKeywords(),
- Env.getFirstStartColumn(), Tokens.lex(), *this);
+ IdentTable);
+ ArrayRef<FormatToken *> Toks(Lex.lex());
+ SmallVector<FormatToken *, 10> Tokens(Toks.begin(), Toks.end());
+ UnwrappedLineParser Parser(Style, Lex.getKeywords(),
+ Env.getFirstStartColumn(), Tokens, *this);
Parser.parse();
assert(UnwrappedLines.rbegin()->empty());
unsigned Penalty = 0;
@@ -76,14 +81,14 @@ std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
LLVM_DEBUG(llvm::dbgs() << "Run " << Run << "...\n");
SmallVector<AnnotatedLine *, 16> AnnotatedLines;
- TokenAnnotator Annotator(Style, Tokens.getKeywords());
+ TokenAnnotator Annotator(Style, Lex.getKeywords());
for (unsigned i = 0, e = UnwrappedLines[Run].size(); i != e; ++i) {
AnnotatedLines.push_back(new AnnotatedLine(UnwrappedLines[Run][i]));
Annotator.annotate(*AnnotatedLines.back());
}
std::pair<tooling::Replacements, unsigned> RunResult =
- analyze(Annotator, AnnotatedLines, Tokens);
+ analyze(Annotator, AnnotatedLines, Lex);
LLVM_DEBUG({
llvm::dbgs() << "Replacements for run " << Run << ":\n";
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index d5d394e61926..7f8e35126512 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -118,9 +118,9 @@ private:
if (Style.Language == FormatStyle::LK_TextProto ||
(Style.Language == FormatStyle::LK_Proto && Left->Previous &&
Left->Previous->isOneOf(TT_SelectorName, TT_DictLiteral)))
- CurrentToken->Type = TT_DictLiteral;
+ CurrentToken->setType(TT_DictLiteral);
else
- CurrentToken->Type = TT_TemplateCloser;
+ CurrentToken->setType(TT_TemplateCloser);
next();
return true;
}
@@ -131,7 +131,7 @@ private:
}
if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace) ||
(CurrentToken->isOneOf(tok::colon, tok::question) && InExprContext &&
- Style.Language != FormatStyle::LK_Proto &&
+ !Style.isCSharp() && Style.Language != FormatStyle::LK_Proto &&
Style.Language != FormatStyle::LK_TextProto))
return false;
// If a && or || is found and interpreted as a binary operator, this set
@@ -151,7 +151,7 @@ private:
if (CurrentToken->is(tok::colon) ||
(CurrentToken->isOneOf(tok::l_brace, tok::less) &&
Previous->isNot(tok::colon)))
- Previous->Type = TT_SelectorName;
+ Previous->setType(TT_SelectorName);
}
}
if (!consumeToken())
@@ -160,6 +160,27 @@ private:
return false;
}
+ bool parseUntouchableParens() {
+ while (CurrentToken) {
+ CurrentToken->Finalized = true;
+ switch (CurrentToken->Tok.getKind()) {
+ case tok::l_paren:
+ next();
+ if (!parseUntouchableParens())
+ return false;
+ continue;
+ case tok::r_paren:
+ next();
+ return true;
+ default:
+ // no-op
+ break;
+ }
+ next();
+ }
+ return false;
+ }
+
bool parseParens(bool LookForDecls = false) {
if (!CurrentToken)
return false;
@@ -171,6 +192,11 @@ private:
Contexts.back().ColonIsForRangeExpr =
Contexts.size() == 2 && Contexts[0].ColonIsForRangeExpr;
+ if (Left->Previous && Left->Previous->is(TT_UntouchableMacroFunc)) {
+ Left->Finalized = true;
+ return parseUntouchableParens();
+ }
+
bool StartsObjCMethodExpr = false;
if (FormatToken *MaybeSel = Left->Previous) {
// @selector( starts a selector.
@@ -217,7 +243,7 @@ private:
// This is the parameter list of an ObjC block.
Contexts.back().IsExpression = false;
} else if (Left->Previous && Left->Previous->is(tok::kw___attribute)) {
- Left->Type = TT_AttributeParen;
+ Left->setType(TT_AttributeParen);
} else if (Left->Previous && Left->Previous->is(TT_ForEachMacro)) {
// The first argument to a foreach macro is a declaration.
Contexts.back().IsForEachMacro = true;
@@ -233,7 +259,7 @@ private:
if (StartsObjCMethodExpr) {
Contexts.back().ColonIsObjCMethodExpr = true;
- Left->Type = TT_ObjCMethodExpr;
+ Left->setType(TT_ObjCMethodExpr);
}
// MightBeFunctionType and ProbablyFunctionType are used for
@@ -264,7 +290,7 @@ private:
if (PrevPrev && PrevPrev->is(tok::identifier) &&
Prev->isOneOf(tok::star, tok::amp, tok::ampamp) &&
CurrentToken->is(tok::identifier) && Next->isNot(tok::equal)) {
- Prev->Type = TT_BinaryOperator;
+ Prev->setType(TT_BinaryOperator);
LookForDecls = false;
}
}
@@ -282,8 +308,8 @@ private:
if (MightBeFunctionType && ProbablyFunctionType && CurrentToken->Next &&
(CurrentToken->Next->is(tok::l_paren) ||
(CurrentToken->Next->is(tok::l_square) && Line.MustBeDeclaration)))
- Left->Type = Left->Next->is(tok::caret) ? TT_ObjCBlockLParen
- : TT_FunctionTypeLParen;
+ Left->setType(Left->Next->is(tok::caret) ? TT_ObjCBlockLParen
+ : TT_FunctionTypeLParen);
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
@@ -295,12 +321,12 @@ private:
for (FormatToken *Tok = Left; Tok != CurrentToken; Tok = Tok->Next) {
if (Tok->is(TT_BinaryOperator) &&
Tok->isOneOf(tok::star, tok::amp, tok::ampamp))
- Tok->Type = TT_PointerOrReference;
+ Tok->setType(TT_PointerOrReference);
}
}
if (StartsObjCMethodExpr) {
- CurrentToken->Type = TT_ObjCMethodExpr;
+ CurrentToken->setType(TT_ObjCMethodExpr);
if (Contexts.back().FirstObjCSelectorName) {
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
Contexts.back().LongestObjCSelectorName;
@@ -308,13 +334,13 @@ private:
}
if (Left->is(TT_AttributeParen))
- CurrentToken->Type = TT_AttributeParen;
+ CurrentToken->setType(TT_AttributeParen);
if (Left->Previous && Left->Previous->is(TT_JavaAnnotation))
- CurrentToken->Type = TT_JavaAnnotation;
+ CurrentToken->setType(TT_JavaAnnotation);
if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation))
- CurrentToken->Type = TT_LeadingJavaAnnotation;
+ CurrentToken->setType(TT_LeadingJavaAnnotation);
if (Left->Previous && Left->Previous->is(TT_AttributeSquare))
- CurrentToken->Type = TT_AttributeSquare;
+ CurrentToken->setType(TT_AttributeSquare);
if (!HasMultipleLines)
Left->PackingKind = PPK_Inconclusive;
@@ -330,7 +356,7 @@ private:
return false;
if (CurrentToken->is(tok::l_brace))
- Left->Type = TT_Unknown; // Not TT_ObjCBlockLParen
+ Left->setType(TT_Unknown); // Not TT_ObjCBlockLParen
if (CurrentToken->is(tok::comma) && CurrentToken->Next &&
!CurrentToken->Next->HasUnescapedNewline &&
!CurrentToken->Next->isTrailingComment())
@@ -342,13 +368,13 @@ private:
if (CurrentToken->isOneOf(tok::semi, tok::colon)) {
MightBeObjCForRangeLoop = false;
if (PossibleObjCForInToken) {
- PossibleObjCForInToken->Type = TT_Unknown;
+ PossibleObjCForInToken->setType(TT_Unknown);
PossibleObjCForInToken = nullptr;
}
}
if (MightBeObjCForRangeLoop && CurrentToken->is(Keywords.kw_in)) {
PossibleObjCForInToken = CurrentToken;
- PossibleObjCForInToken->Type = TT_ObjCForIn;
+ PossibleObjCForInToken->setType(TT_ObjCForIn);
}
// When we discover a 'new', we set CanBeExpression to 'false' in order to
// parse the type correctly. Reset that after a comma.
@@ -369,6 +395,17 @@ private:
if (!Style.isCSharp())
return false;
+ // `identifier[i]` is not an attribute.
+ if (Tok.Previous && Tok.Previous->is(tok::identifier))
+ return false;
+
+ // Chains of [] in `identifier[i][j][k]` are not attributes.
+ if (Tok.Previous && Tok.Previous->is(tok::r_square)) {
+ auto *MatchingParen = Tok.Previous->MatchingParen;
+ if (!MatchingParen || MatchingParen->is(TT_ArraySubscriptLSquare))
+ return false;
+ }
+
const FormatToken *AttrTok = Tok.Next;
if (!AttrTok)
return false;
@@ -385,15 +422,15 @@ private:
if (!AttrTok)
return false;
- // Move past the end of ']'.
+ // Allow an attribute to be the only content of a file.
AttrTok = AttrTok->Next;
if (!AttrTok)
- return false;
+ return true;
// Limit this to being an access modifier that follows.
if (AttrTok->isOneOf(tok::kw_public, tok::kw_private, tok::kw_protected,
- tok::kw_class, tok::kw_static, tok::l_square,
- Keywords.kw_internal)) {
+ tok::comment, tok::kw_class, tok::kw_static,
+ tok::l_square, Keywords.kw_internal)) {
return true;
}
@@ -460,7 +497,7 @@ private:
Contexts.back().InCpp11AttributeSpecifier;
// Treat C# Attributes [STAThread] much like C++ attributes [[...]].
- bool IsCSharp11AttributeSpecifier =
+ bool IsCSharpAttributeSpecifier =
isCSharpAttributeSpecifier(*Left) ||
Contexts.back().InCSharpAttributeSpecifier;
@@ -469,7 +506,8 @@ private:
bool StartsObjCMethodExpr =
!IsCppStructuredBinding && !InsideInlineASM && !CppArrayTemplates &&
Style.isCpp() && !IsCpp11AttributeSpecifier &&
- Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
+ !IsCSharpAttributeSpecifier && Contexts.back().CanBeExpression &&
+ Left->isNot(TT_LambdaLSquare) &&
!CurrentToken->isOneOf(tok::l_brace, tok::r_square) &&
(!Parent ||
Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
@@ -483,24 +521,26 @@ private:
unsigned BindingIncrease = 1;
if (IsCppStructuredBinding) {
- Left->Type = TT_StructuredBindingLSquare;
+ Left->setType(TT_StructuredBindingLSquare);
} else if (Left->is(TT_Unknown)) {
if (StartsObjCMethodExpr) {
- Left->Type = TT_ObjCMethodExpr;
+ Left->setType(TT_ObjCMethodExpr);
+ } else if (InsideInlineASM) {
+ Left->setType(TT_InlineASMSymbolicNameLSquare);
} else if (IsCpp11AttributeSpecifier) {
- Left->Type = TT_AttributeSquare;
+ Left->setType(TT_AttributeSquare);
} else if (Style.Language == FormatStyle::LK_JavaScript && Parent &&
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
- Left->Type = TT_JsComputedPropertyName;
+ Left->setType(TT_JsComputedPropertyName);
} else if (Style.isCpp() && Contexts.back().ContextKind == tok::l_brace &&
Parent && Parent->isOneOf(tok::l_brace, tok::comma)) {
- Left->Type = TT_DesignatedInitializerLSquare;
- } else if (IsCSharp11AttributeSpecifier) {
- Left->Type = TT_AttributeSquare;
+ Left->setType(TT_DesignatedInitializerLSquare);
+ } else if (IsCSharpAttributeSpecifier) {
+ Left->setType(TT_AttributeSquare);
} else if (CurrentToken->is(tok::r_square) && Parent &&
Parent->is(TT_TemplateCloser)) {
- Left->Type = TT_ArraySubscriptLSquare;
+ Left->setType(TT_ArraySubscriptLSquare);
} else if (Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
// Square braces in LK_Proto can either be message field attributes:
@@ -529,13 +569,13 @@ private:
//
// In the first and the third case we want to spread the contents inside
// the square braces; in the second we want to keep them inline.
- Left->Type = TT_ArrayInitializerLSquare;
+ Left->setType(TT_ArrayInitializerLSquare);
if (!Left->endsSequence(tok::l_square, tok::numeric_constant,
tok::equal) &&
!Left->endsSequence(tok::l_square, tok::numeric_constant,
tok::identifier) &&
!Left->endsSequence(tok::l_square, tok::colon, TT_SelectorName)) {
- Left->Type = TT_ProtoExtensionLSquare;
+ Left->setType(TT_ProtoExtensionLSquare);
BindingIncrease = 10;
}
} else if (!CppArrayTemplates && Parent &&
@@ -544,10 +584,10 @@ private:
tok::question, tok::colon, tok::kw_return,
// Should only be relevant to JavaScript:
tok::kw_default)) {
- Left->Type = TT_ArrayInitializerLSquare;
+ Left->setType(TT_ArrayInitializerLSquare);
} else {
BindingIncrease = 10;
- Left->Type = TT_ArraySubscriptLSquare;
+ Left->setType(TT_ArraySubscriptLSquare);
}
}
@@ -559,14 +599,14 @@ private:
Contexts.back().ColonIsObjCMethodExpr = StartsObjCMethodExpr;
Contexts.back().InCpp11AttributeSpecifier = IsCpp11AttributeSpecifier;
- Contexts.back().InCSharpAttributeSpecifier = IsCSharp11AttributeSpecifier;
+ Contexts.back().InCSharpAttributeSpecifier = IsCSharpAttributeSpecifier;
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
if (IsCpp11AttributeSpecifier)
- CurrentToken->Type = TT_AttributeSquare;
- if (IsCSharp11AttributeSpecifier)
- CurrentToken->Type = TT_AttributeSquare;
+ CurrentToken->setType(TT_AttributeSquare);
+ if (IsCSharpAttributeSpecifier)
+ CurrentToken->setType(TT_AttributeSquare);
else if (((CurrentToken->Next &&
CurrentToken->Next->is(tok::l_paren)) ||
(CurrentToken->Previous &&
@@ -577,26 +617,26 @@ private:
// will be expanded to more tokens.
// FIXME: Do we incorrectly label ":" with this?
StartsObjCMethodExpr = false;
- Left->Type = TT_Unknown;
+ Left->setType(TT_Unknown);
}
if (StartsObjCMethodExpr && CurrentToken->Previous != Left) {
- CurrentToken->Type = TT_ObjCMethodExpr;
+ CurrentToken->setType(TT_ObjCMethodExpr);
// If we haven't seen a colon yet, make sure the last identifier
// before the r_square is tagged as a selector name component.
if (!ColonFound && CurrentToken->Previous &&
CurrentToken->Previous->is(TT_Unknown) &&
canBeObjCSelectorComponent(*CurrentToken->Previous))
- CurrentToken->Previous->Type = TT_SelectorName;
+ CurrentToken->Previous->setType(TT_SelectorName);
// determineStarAmpUsage() thinks that '*' '[' is allocating an
// array of pointers, but if '[' starts a selector then '*' is a
// binary operator.
if (Parent && Parent->is(TT_PointerOrReference))
- Parent->Type = TT_BinaryOperator;
+ Parent->setType(TT_BinaryOperator);
}
// An arrow after an ObjC method expression is not a lambda arrow.
- if (CurrentToken->Type == TT_ObjCMethodExpr && CurrentToken->Next &&
- CurrentToken->Next->is(TT_LambdaArrow))
- CurrentToken->Next->Type = TT_Unknown;
+ if (CurrentToken->getType() == TT_ObjCMethodExpr &&
+ CurrentToken->Next && CurrentToken->Next->is(TT_LambdaArrow))
+ CurrentToken->Next->setType(TT_Unknown);
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
// FirstObjCSelectorName is set when a colon is found. This does
@@ -630,21 +670,21 @@ private:
tok::kw_using)) {
// Remember that this is a [[using ns: foo]] C++ attribute, so we
// don't add a space before the colon (unlike other colons).
- CurrentToken->Type = TT_AttributeColon;
+ CurrentToken->setType(TT_AttributeColon);
} else if (Left->isOneOf(TT_ArraySubscriptLSquare,
TT_DesignatedInitializerLSquare)) {
- Left->Type = TT_ObjCMethodExpr;
+ Left->setType(TT_ObjCMethodExpr);
StartsObjCMethodExpr = true;
Contexts.back().ColonIsObjCMethodExpr = true;
if (Parent && Parent->is(tok::r_paren))
// FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
- Parent->Type = TT_CastRParen;
+ Parent->setType(TT_CastRParen);
}
ColonFound = true;
}
if (CurrentToken->is(tok::comma) && Left->is(TT_ObjCMethodExpr) &&
!ColonFound)
- Left->Type = TT_ArrayInitializerLSquare;
+ Left->setType(TT_ArrayInitializerLSquare);
FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
@@ -659,7 +699,7 @@ private:
Left->ParentBracket = Contexts.back().ContextKind;
if (Contexts.back().CaretFound)
- Left->Type = TT_ObjCBlockLBrace;
+ Left->setType(TT_ObjCBlockLBrace);
Contexts.back().CaretFound = false;
ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
@@ -688,18 +728,18 @@ private:
(!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) ||
Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
- Left->Type = TT_DictLiteral;
+ Left->setType(TT_DictLiteral);
if (Previous->Tok.getIdentifierInfo() ||
Previous->is(tok::string_literal))
- Previous->Type = TT_SelectorName;
+ Previous->setType(TT_SelectorName);
}
if (CurrentToken->is(tok::colon) ||
Style.Language == FormatStyle::LK_JavaScript)
- Left->Type = TT_DictLiteral;
+ Left->setType(TT_DictLiteral);
}
if (CurrentToken->is(tok::comma) &&
Style.Language == FormatStyle::LK_JavaScript)
- Left->Type = TT_DictLiteral;
+ Left->setType(TT_DictLiteral);
if (!consumeToken())
return false;
}
@@ -726,7 +766,7 @@ private:
bool parseConditional() {
while (CurrentToken) {
if (CurrentToken->is(tok::colon)) {
- CurrentToken->Type = TT_ConditionalExpr;
+ CurrentToken->setType(TT_ConditionalExpr);
next();
return true;
}
@@ -738,7 +778,7 @@ private:
bool parseTemplateDeclaration() {
if (CurrentToken && CurrentToken->is(tok::less)) {
- CurrentToken->Type = TT_TemplateOpener;
+ CurrentToken->setType(TT_TemplateOpener);
next();
if (!parseAngle())
return false;
@@ -756,7 +796,7 @@ private:
case tok::plus:
case tok::minus:
if (!Tok->Previous && Line.MustBeDeclaration)
- Tok->Type = TT_ObjCMethodSpecifier;
+ Tok->setType(TT_ObjCMethodSpecifier);
break;
case tok::colon:
if (!Tok->Previous)
@@ -773,21 +813,30 @@ private:
(Contexts.size() == 1 &&
Line.MustBeDeclaration)) { // method/property declaration
Contexts.back().IsExpression = false;
- Tok->Type = TT_JsTypeColon;
+ Tok->setType(TT_JsTypeColon);
+ break;
+ }
+ } else if (Style.isCSharp()) {
+ if (Contexts.back().InCSharpAttributeSpecifier) {
+ Tok->setType(TT_AttributeColon);
+ break;
+ }
+ if (Contexts.back().ContextKind == tok::l_paren) {
+ Tok->setType(TT_CSharpNamedArgumentColon);
break;
}
}
if (Contexts.back().ColonIsDictLiteral ||
Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
- Tok->Type = TT_DictLiteral;
+ Tok->setType(TT_DictLiteral);
if (Style.Language == FormatStyle::LK_TextProto) {
if (FormatToken *Previous = Tok->getPreviousNonComment())
- Previous->Type = TT_SelectorName;
+ Previous->setType(TT_SelectorName);
}
} else if (Contexts.back().ColonIsObjCMethodExpr ||
Line.startsWith(TT_ObjCMethodSpecifier)) {
- Tok->Type = TT_ObjCMethodExpr;
+ Tok->setType(TT_ObjCMethodExpr);
const FormatToken *BeforePrevious = Tok->Previous->Previous;
// Ensure we tag all identifiers in method declarations as
// TT_SelectorName.
@@ -802,7 +851,7 @@ private:
BeforePrevious->is(tok::r_square) ||
Contexts.back().LongestObjCSelectorName == 0 ||
UnknownIdentifierInMethodDeclaration) {
- Tok->Previous->Type = TT_SelectorName;
+ Tok->Previous->setType(TT_SelectorName);
if (!Contexts.back().FirstObjCSelectorName)
Contexts.back().FirstObjCSelectorName = Tok->Previous;
else if (Tok->Previous->ColumnWidth >
@@ -814,25 +863,30 @@ private:
++Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
}
} else if (Contexts.back().ColonIsForRangeExpr) {
- Tok->Type = TT_RangeBasedForLoopColon;
+ Tok->setType(TT_RangeBasedForLoopColon);
} else if (CurrentToken && CurrentToken->is(tok::numeric_constant)) {
- Tok->Type = TT_BitFieldColon;
+ Tok->setType(TT_BitFieldColon);
} else if (Contexts.size() == 1 &&
!Line.First->isOneOf(tok::kw_enum, tok::kw_case)) {
- if (Tok->getPreviousNonComment()->isOneOf(tok::r_paren,
- tok::kw_noexcept))
- Tok->Type = TT_CtorInitializerColon;
- else
- Tok->Type = TT_InheritanceColon;
+ FormatToken *Prev = Tok->getPreviousNonComment();
+ if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept))
+ Tok->setType(TT_CtorInitializerColon);
+ else if (Prev->is(tok::kw_try)) {
+ // Member initializer list within function try block.
+ FormatToken *PrevPrev = Prev->getPreviousNonComment();
+ if (PrevPrev && PrevPrev->isOneOf(tok::r_paren, tok::kw_noexcept))
+ Tok->setType(TT_CtorInitializerColon);
+ } else
+ Tok->setType(TT_InheritanceColon);
} else if (canBeObjCSelectorComponent(*Tok->Previous) && Tok->Next &&
(Tok->Next->isOneOf(tok::r_paren, tok::comma) ||
(canBeObjCSelectorComponent(*Tok->Next) && Tok->Next->Next &&
Tok->Next->Next->is(tok::colon)))) {
// This handles a special macro in ObjC code where selectors including
// the colon are passed as macro arguments.
- Tok->Type = TT_ObjCMethodExpr;
+ Tok->setType(TT_ObjCMethodExpr);
} else if (Contexts.back().ContextKind == tok::l_paren) {
- Tok->Type = TT_InlineASMColon;
+ Tok->setType(TT_InlineASMColon);
}
break;
case tok::pipe:
@@ -841,7 +895,7 @@ private:
// intersection types, respectively.
if (Style.Language == FormatStyle::LK_JavaScript &&
!Contexts.back().IsExpression)
- Tok->Type = TT_JsTypeOperator;
+ Tok->setType(TT_JsTypeOperator);
break;
case tok::kw_if:
case tok::kw_while:
@@ -877,9 +931,9 @@ private:
if (Tok->Previous && Tok->Previous->is(tok::r_paren) &&
Tok->Previous->MatchingParen &&
Tok->Previous->MatchingParen->is(TT_OverloadedOperatorLParen)) {
- Tok->Previous->Type = TT_OverloadedOperator;
- Tok->Previous->MatchingParen->Type = TT_OverloadedOperator;
- Tok->Type = TT_OverloadedOperatorLParen;
+ Tok->Previous->setType(TT_OverloadedOperator);
+ Tok->Previous->MatchingParen->setType(TT_OverloadedOperator);
+ Tok->setType(TT_OverloadedOperatorLParen);
}
if (!parseParens())
@@ -898,15 +952,15 @@ private:
case tok::l_brace:
if (Style.Language == FormatStyle::LK_TextProto) {
FormatToken *Previous = Tok->getPreviousNonComment();
- if (Previous && Previous->Type != TT_DictLiteral)
- Previous->Type = TT_SelectorName;
+ if (Previous && Previous->getType() != TT_DictLiteral)
+ Previous->setType(TT_SelectorName);
}
if (!parseBrace())
return false;
break;
case tok::less:
if (parseAngle()) {
- Tok->Type = TT_TemplateOpener;
+ Tok->setType(TT_TemplateOpener);
// In TT_Proto, we must distignuish between:
// map<key, value>
// msg < item: data >
@@ -915,13 +969,13 @@ private:
if (Style.Language == FormatStyle::LK_TextProto ||
(Style.Language == FormatStyle::LK_Proto && Tok->Previous &&
Tok->Previous->isOneOf(TT_SelectorName, TT_DictLiteral))) {
- Tok->Type = TT_DictLiteral;
+ Tok->setType(TT_DictLiteral);
FormatToken *Previous = Tok->getPreviousNonComment();
- if (Previous && Previous->Type != TT_DictLiteral)
- Previous->Type = TT_SelectorName;
+ if (Previous && Previous->getType() != TT_DictLiteral)
+ Previous->setType(TT_SelectorName);
}
} else {
- Tok->Type = TT_BinaryOperator;
+ Tok->setType(TT_BinaryOperator);
NonTemplateLess.insert(Tok);
CurrentToken = Tok;
next();
@@ -937,7 +991,7 @@ private:
break;
case tok::greater:
if (Style.Language != FormatStyle::LK_TextProto)
- Tok->Type = TT_BinaryOperator;
+ Tok->setType(TT_BinaryOperator);
if (Tok->Previous && Tok->Previous->is(TT_TemplateCloser))
Tok->SpacesRequiredBefore = 1;
break;
@@ -948,20 +1002,29 @@ private:
while (CurrentToken &&
!CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) {
if (CurrentToken->isOneOf(tok::star, tok::amp))
- CurrentToken->Type = TT_PointerOrReference;
+ CurrentToken->setType(TT_PointerOrReference);
consumeToken();
+ if (CurrentToken && CurrentToken->is(tok::comma) &&
+ CurrentToken->Previous->isNot(tok::kw_operator))
+ break;
if (CurrentToken && CurrentToken->Previous->isOneOf(
TT_BinaryOperator, TT_UnaryOperator, tok::comma,
tok::star, tok::arrow, tok::amp, tok::ampamp))
- CurrentToken->Previous->Type = TT_OverloadedOperator;
- }
- if (CurrentToken) {
- CurrentToken->Type = TT_OverloadedOperatorLParen;
- if (CurrentToken->Previous->is(TT_BinaryOperator))
- CurrentToken->Previous->Type = TT_OverloadedOperator;
+ CurrentToken->Previous->setType(TT_OverloadedOperator);
}
+ if (CurrentToken && CurrentToken->is(tok::l_paren))
+ CurrentToken->setType(TT_OverloadedOperatorLParen);
+ if (CurrentToken && CurrentToken->Previous->is(TT_BinaryOperator))
+ CurrentToken->Previous->setType(TT_OverloadedOperator);
break;
case tok::question:
+ if (Tok->is(TT_CSharpNullConditionalLSquare)) {
+ if (!parseSquare())
+ return false;
+ break;
+ }
+ if (Tok->isOneOf(TT_CSharpNullConditional, TT_CSharpNullCoalescing))
+ break;
if (Style.Language == FormatStyle::LK_JavaScript && Tok->Next &&
Tok->Next->isOneOf(tok::semi, tok::comma, tok::colon, tok::r_paren,
tok::r_brace)) {
@@ -969,7 +1032,7 @@ private:
// types (fields, parameters), e.g.
// function(x?: string, y?) {...}
// class X { y?; }
- Tok->Type = TT_JsTypeOptionalQuestion;
+ Tok->setType(TT_JsTypeOptionalQuestion);
break;
}
// Declarations cannot be conditional expressions, this can only be part
@@ -977,6 +1040,18 @@ private:
if (Line.MustBeDeclaration && !Contexts.back().IsExpression &&
Style.Language == FormatStyle::LK_JavaScript)
break;
+ if (Style.isCSharp()) {
+ // `Type?)`, `Type?>`, `Type? name;` and `Type? name =` can only be
+ // nullable types.
+ // Line.MustBeDeclaration will be true for `Type? name;`.
+ if ((!Contexts.back().IsExpression && Line.MustBeDeclaration) ||
+ (Tok->Next && Tok->Next->isOneOf(tok::r_paren, tok::greater)) ||
+ (Tok->Next && Tok->Next->is(tok::identifier) && Tok->Next->Next &&
+ Tok->Next->Next->is(tok::equal))) {
+ Tok->setType(TT_CSharpNullable);
+ break;
+ }
+ }
parseConditional();
break;
case tok::kw_template:
@@ -984,9 +1059,9 @@ private:
break;
case tok::comma:
if (Contexts.back().InCtorInitializer)
- Tok->Type = TT_CtorInitializerComma;
+ Tok->setType(TT_CtorInitializerComma);
else if (Contexts.back().InInheritanceList)
- Tok->Type = TT_InheritanceComma;
+ Tok->setType(TT_InheritanceComma);
else if (Contexts.back().FirstStartOfName &&
(Contexts.size() == 1 || Line.startsWith(tok::kw_for))) {
Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
@@ -1000,6 +1075,11 @@ private:
Keywords.kw___has_include_next)) {
parseHasInclude();
}
+ if (Style.isCSharp() && Tok->is(Keywords.kw_where) && Tok->Next &&
+ Tok->Next->isNot(tok::l_paren)) {
+ Tok->setType(TT_CSharpGenericTypeConstraint);
+ parseCSharpGenericTypeConstraint();
+ }
break;
default:
break;
@@ -1007,6 +1087,35 @@ private:
return true;
}
+ void parseCSharpGenericTypeConstraint() {
+ int OpenAngleBracketsCount = 0;
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::less)) {
+ // parseAngle is too greedy and will consume the whole line.
+ CurrentToken->setType(TT_TemplateOpener);
+ ++OpenAngleBracketsCount;
+ next();
+ } else if (CurrentToken->is(tok::greater)) {
+ CurrentToken->setType(TT_TemplateCloser);
+ --OpenAngleBracketsCount;
+ next();
+ } else if (CurrentToken->is(tok::comma) && OpenAngleBracketsCount == 0) {
+ // We allow line breaks after GenericTypeConstraintComma's
+ // so do not flag commas in Generics as GenericTypeConstraintComma's.
+ CurrentToken->setType(TT_CSharpGenericTypeConstraintComma);
+ next();
+ } else if (CurrentToken->is(Keywords.kw_where)) {
+ CurrentToken->setType(TT_CSharpGenericTypeConstraint);
+ next();
+ } else if (CurrentToken->is(tok::colon)) {
+ CurrentToken->setType(TT_CSharpGenericTypeConstraintColon);
+ next();
+ } else {
+ next();
+ }
+ }
+ }
+
void parseIncludeDirective() {
if (CurrentToken && CurrentToken->is(tok::less)) {
next();
@@ -1015,7 +1124,7 @@ private:
// literals.
if (CurrentToken->isNot(tok::comment) &&
!CurrentToken->TokenText.startswith("//"))
- CurrentToken->Type = TT_ImplicitStringLiteral;
+ CurrentToken->setType(TT_ImplicitStringLiteral);
next();
}
}
@@ -1027,7 +1136,7 @@ private:
// warning or error.
next();
while (CurrentToken) {
- CurrentToken->Type = TT_ImplicitStringLiteral;
+ CurrentToken->setType(TT_ImplicitStringLiteral);
next();
}
}
@@ -1041,7 +1150,7 @@ private:
next(); // Consume first token (so we fix leading whitespace).
while (CurrentToken) {
if (IsMark || CurrentToken->Previous->is(TT_BinaryOperator))
- CurrentToken->Type = TT_ImplicitStringLiteral;
+ CurrentToken->setType(TT_ImplicitStringLiteral);
next();
}
}
@@ -1068,7 +1177,7 @@ private:
// Treat these like C++ #include directives.
while (CurrentToken) {
// Tokens cannot be comments here.
- CurrentToken->Type = TT_ImplicitStringLiteral;
+ CurrentToken->setType(TT_ImplicitStringLiteral);
next();
}
return LT_ImportStatement;
@@ -1228,8 +1337,8 @@ private:
TT_TypenameMacro, TT_FunctionLBrace, TT_ImplicitStringLiteral,
TT_InlineASMBrace, TT_JsFatArrow, TT_LambdaArrow, TT_NamespaceMacro,
TT_OverloadedOperator, TT_RegexLiteral, TT_TemplateString,
- TT_ObjCStringLiteral))
- CurrentToken->Type = TT_Unknown;
+ TT_ObjCStringLiteral, TT_UntouchableMacroFunc))
+ CurrentToken->setType(TT_Unknown);
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
CurrentToken->FakeLParens.clear();
@@ -1317,7 +1426,7 @@ private:
if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) &&
Previous->isOneOf(tok::star, tok::amp, tok::ampamp) &&
Previous->Previous && Previous->Previous->isNot(tok::equal))
- Previous->Type = TT_PointerOrReference;
+ Previous->setType(TT_PointerOrReference);
}
}
} else if (Current.is(tok::lessless) &&
@@ -1339,7 +1448,7 @@ private:
for (FormatToken *Previous = Current.Previous;
Previous && Previous->isOneOf(tok::star, tok::amp);
Previous = Previous->Previous)
- Previous->Type = TT_PointerOrReference;
+ Previous->setType(TT_PointerOrReference);
if (Line.MustBeDeclaration && !Contexts.front().InCtorInitializer)
Contexts.back().IsExpression = false;
} else if (Current.is(tok::kw_new)) {
@@ -1423,19 +1532,36 @@ private:
// The token type is already known.
return;
+ if (Style.isCSharp() && CurrentToken->is(tok::question)) {
+ if (CurrentToken->TokenText == "??") {
+ Current.setType(TT_CSharpNullCoalescing);
+ return;
+ }
+ if (CurrentToken->TokenText == "?.") {
+ Current.setType(TT_CSharpNullConditional);
+ return;
+ }
+ if (CurrentToken->TokenText == "?[") {
+ Current.setType(TT_CSharpNullConditionalLSquare);
+ return;
+ }
+ }
+
if (Style.Language == FormatStyle::LK_JavaScript) {
if (Current.is(tok::exclaim)) {
if (Current.Previous &&
- (Current.Previous->isOneOf(tok::identifier, tok::kw_namespace,
- tok::r_paren, tok::r_square,
- tok::r_brace) ||
+ (Keywords.IsJavaScriptIdentifier(
+ *Current.Previous, /* AcceptIdentifierName= */ true) ||
+ Current.Previous->isOneOf(
+ tok::kw_namespace, tok::r_paren, tok::r_square, tok::r_brace,
+ Keywords.kw_type, Keywords.kw_get, Keywords.kw_set) ||
Current.Previous->Tok.isLiteral())) {
- Current.Type = TT_JsNonNullAssertion;
+ Current.setType(TT_JsNonNullAssertion);
return;
}
if (Current.Next &&
Current.Next->isOneOf(TT_BinaryOperator, Keywords.kw_as)) {
- Current.Type = TT_JsNonNullAssertion;
+ Current.setType(TT_JsNonNullAssertion);
return;
}
}
@@ -1445,11 +1571,11 @@ private:
// function declaration have been found. In this case, 'Current' is a
// trailing token of this declaration and thus cannot be a name.
if (Current.is(Keywords.kw_instanceof)) {
- Current.Type = TT_BinaryOperator;
+ Current.setType(TT_BinaryOperator);
} else if (isStartOfName(Current) &&
(!Line.MightBeFunctionDecl || Current.NestingLevel != 0)) {
Contexts.back().FirstStartOfName = &Current;
- Current.Type = TT_StartOfName;
+ Current.setType(TT_StartOfName);
} else if (Current.is(tok::semi)) {
// Reset FirstStartOfName after finding a semicolon so that a for loop
// with multiple increment statements is not confused with a for loop
@@ -1459,57 +1585,57 @@ private:
AutoFound = true;
} else if (Current.is(tok::arrow) &&
Style.Language == FormatStyle::LK_Java) {
- Current.Type = TT_LambdaArrow;
+ Current.setType(TT_LambdaArrow);
} else if (Current.is(tok::arrow) && AutoFound && Line.MustBeDeclaration &&
Current.NestingLevel == 0 &&
!Current.Previous->is(tok::kw_operator)) {
// not auto operator->() -> xxx;
- Current.Type = TT_TrailingReturnArrow;
+ Current.setType(TT_TrailingReturnArrow);
} else if (isDeductionGuide(Current)) {
// Deduction guides trailing arrow " A(...) -> A<T>;".
- Current.Type = TT_TrailingReturnArrow;
+ Current.setType(TT_TrailingReturnArrow);
} else if (Current.isOneOf(tok::star, tok::amp, tok::ampamp)) {
- Current.Type = determineStarAmpUsage(Current,
- Contexts.back().CanBeExpression &&
- Contexts.back().IsExpression,
- Contexts.back().InTemplateArgument);
+ Current.setType(determineStarAmpUsage(
+ Current,
+ Contexts.back().CanBeExpression && Contexts.back().IsExpression,
+ Contexts.back().InTemplateArgument));
} else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) {
- Current.Type = determinePlusMinusCaretUsage(Current);
+ Current.setType(determinePlusMinusCaretUsage(Current));
if (Current.is(TT_UnaryOperator) && Current.is(tok::caret))
Contexts.back().CaretFound = true;
} else if (Current.isOneOf(tok::minusminus, tok::plusplus)) {
- Current.Type = determineIncrementUsage(Current);
+ Current.setType(determineIncrementUsage(Current));
} else if (Current.isOneOf(tok::exclaim, tok::tilde)) {
- Current.Type = TT_UnaryOperator;
+ Current.setType(TT_UnaryOperator);
} else if (Current.is(tok::question)) {
if (Style.Language == FormatStyle::LK_JavaScript &&
Line.MustBeDeclaration && !Contexts.back().IsExpression) {
// In JavaScript, `interface X { foo?(): bar; }` is an optional method
// on the interface, not a ternary expression.
- Current.Type = TT_JsTypeOptionalQuestion;
+ Current.setType(TT_JsTypeOptionalQuestion);
} else {
- Current.Type = TT_ConditionalExpr;
+ Current.setType(TT_ConditionalExpr);
}
} else if (Current.isBinaryOperator() &&
(!Current.Previous || Current.Previous->isNot(tok::l_square)) &&
(!Current.is(tok::greater) &&
Style.Language != FormatStyle::LK_TextProto)) {
- Current.Type = TT_BinaryOperator;
+ Current.setType(TT_BinaryOperator);
} else if (Current.is(tok::comment)) {
if (Current.TokenText.startswith("/*")) {
if (Current.TokenText.endswith("*/"))
- Current.Type = TT_BlockComment;
+ Current.setType(TT_BlockComment);
else
// The lexer has for some reason determined a comment here. But we
// cannot really handle it, if it isn't properly terminated.
Current.Tok.setKind(tok::unknown);
} else {
- Current.Type = TT_LineComment;
+ Current.setType(TT_LineComment);
}
} else if (Current.is(tok::r_paren)) {
if (rParenEndsCast(Current))
- Current.Type = TT_CastRParen;
+ Current.setType(TT_CastRParen);
if (Current.MatchingParen && Current.Next &&
!Current.Next->isBinaryOperator() &&
!Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace,
@@ -1524,7 +1650,7 @@ private:
BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
(!BeforeParen->Previous ||
BeforeParen->Previous->ClosesTemplateDeclaration))
- Current.Type = TT_FunctionAnnotationRParen;
+ Current.setType(TT_FunctionAnnotationRParen);
}
}
} else if (Current.is(tok::at) && Current.Next &&
@@ -1536,10 +1662,10 @@ private:
case tok::objc_interface:
case tok::objc_implementation:
case tok::objc_protocol:
- Current.Type = TT_ObjCDecl;
+ Current.setType(TT_ObjCDecl);
break;
case tok::objc_property:
- Current.Type = TT_ObjCProperty;
+ Current.setType(TT_ObjCProperty);
break;
default:
break;
@@ -1548,11 +1674,11 @@ private:
FormatToken *PreviousNoComment = Current.getPreviousNonComment();
if (PreviousNoComment &&
PreviousNoComment->isOneOf(tok::comma, tok::l_brace))
- Current.Type = TT_DesignatedInitializerPeriod;
+ Current.setType(TT_DesignatedInitializerPeriod);
else if (Style.Language == FormatStyle::LK_Java && Current.Previous &&
Current.Previous->isOneOf(TT_JavaAnnotation,
TT_LeadingJavaAnnotation)) {
- Current.Type = Current.Previous->Type;
+ Current.setType(Current.Previous->getType());
}
} else if (canBeObjCSelectorComponent(Current) &&
// FIXME(bug 36976): ObjC return types shouldn't use
@@ -1565,7 +1691,7 @@ private:
// This is the first part of an Objective-C selector name. (If there's no
// colon after this, this is the only place which annotates the identifier
// as a selector.)
- Current.Type = TT_SelectorName;
+ Current.setType(TT_SelectorName);
} else if (Current.isOneOf(tok::identifier, tok::kw_const,
tok::kw_noexcept) &&
Current.Previous &&
@@ -1573,7 +1699,7 @@ private:
Line.MightBeFunctionDecl && Contexts.size() == 1) {
// Line.MightBeFunctionDecl can only be true after the parentheses of a
// function declaration have been found.
- Current.Type = TT_TrailingAnnotation;
+ Current.setType(TT_TrailingAnnotation);
} else if ((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
Current.Previous) {
@@ -1582,13 +1708,13 @@ private:
const FormatToken &AtToken = *Current.Previous;
const FormatToken *Previous = AtToken.getPreviousNonComment();
if (!Previous || Previous->is(TT_LeadingJavaAnnotation))
- Current.Type = TT_LeadingJavaAnnotation;
+ Current.setType(TT_LeadingJavaAnnotation);
else
- Current.Type = TT_JavaAnnotation;
+ Current.setType(TT_JavaAnnotation);
} else if (Current.Previous->is(tok::period) &&
Current.Previous->isOneOf(TT_JavaAnnotation,
TT_LeadingJavaAnnotation)) {
- Current.Type = Current.Previous->Type;
+ Current.setType(Current.Previous->getType());
}
}
}
@@ -1640,8 +1766,9 @@ private:
/// Determine whether ')' is ending a cast.
bool rParenEndsCast(const FormatToken &Tok) {
- // C-style casts are only used in C++ and Java.
- if (!Style.isCpp() && Style.Language != FormatStyle::LK_Java)
+ // C-style casts are only used in C++, C# and Java.
+ if (!Style.isCSharp() && !Style.isCpp() &&
+ Style.Language != FormatStyle::LK_Java)
return false;
// Empty parens aren't casts and there are no casts at the end of the line.
@@ -1676,6 +1803,10 @@ private:
if (Tok.Next->is(tok::question))
return false;
+ // `foreach((A a, B b) in someList)` should not be seen as a cast.
+ if (Tok.Next->is(Keywords.kw_in) && Style.isCSharp())
+ return false;
+
// Functions which end with decorations like volatile, noexcept are unlikely
// to be casts.
if (Tok.Next->isOneOf(tok::kw_noexcept, tok::kw_volatile, tok::kw_const,
@@ -1749,6 +1880,10 @@ private:
if (Style.Language == FormatStyle::LK_JavaScript)
return TT_BinaryOperator;
+ // && in C# must be a binary operator.
+ if (Style.isCSharp() && Tok.is(tok::ampamp))
+ return TT_BinaryOperator;
+
const FormatToken *PrevToken = Tok.getPreviousNonComment();
if (!PrevToken)
return TT_UnaryOperator;
@@ -1800,14 +1935,16 @@ private:
return TT_BinaryOperator;
// "&&(" is quite unlikely to be two successive unary "&".
- if (Tok.is(tok::ampamp) && NextToken && NextToken->is(tok::l_paren))
+ if (Tok.is(tok::ampamp) && NextToken->is(tok::l_paren))
return TT_BinaryOperator;
// This catches some cases where evaluation order is used as control flow:
// aaa && aaa->f();
- const FormatToken *NextNextToken = NextToken->getNextNonComment();
- if (NextNextToken && NextNextToken->is(tok::arrow))
- return TT_BinaryOperator;
+ if (NextToken->Tok.isAnyIdentifier()) {
+ const FormatToken *NextNextToken = NextToken->getNextNonComment();
+ if (NextNextToken && NextNextToken->is(tok::arrow))
+ return TT_BinaryOperator;
+ }
// It is very unlikely that we are going to find a pointer or reference type
// definition on the RHS of an assignment.
@@ -2176,6 +2313,10 @@ static bool isFunctionDeclarationName(const FormatToken &Current,
Next = Next->Next;
continue;
}
+ if (Next->is(TT_TemplateOpener) && Next->MatchingParen) {
+ Next = Next->MatchingParen;
+ continue;
+ }
break;
}
@@ -2277,7 +2418,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
bool InFunctionDecl = Line.MightBeFunctionDecl;
while (Current) {
if (isFunctionDeclarationName(*Current, Line))
- Current->Type = TT_FunctionDeclarationName;
+ Current->setType(TT_FunctionDeclarationName);
if (Current->is(TT_LineComment)) {
if (Current->Previous->BlockKind == BK_BracedInit &&
Current->Previous->opensScope())
@@ -2596,7 +2737,7 @@ bool TokenAnnotator::spaceRequiredBeforeParens(const FormatToken &Right) const {
/// otherwise.
static bool isKeywordWithCondition(const FormatToken &Tok) {
return Tok.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while, tok::kw_switch,
- tok::kw_constexpr);
+ tok::kw_constexpr, tok::kw_catch);
}
bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
@@ -2703,15 +2844,48 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Left.Previous &&
!Left.Previous->isOneOf(tok::l_paren, tok::coloncolon,
tok::l_square));
+ // Ensure right pointer alignement with ellipsis e.g. int *...P
+ if (Left.is(tok::ellipsis) && Left.Previous &&
+ Left.Previous->isOneOf(tok::star, tok::amp, tok::ampamp))
+ return Style.PointerAlignment != FormatStyle::PAS_Right;
+
if (Right.is(tok::star) && Left.is(tok::l_paren))
return false;
- if (Right.isOneOf(tok::star, tok::amp, tok::ampamp) &&
- (Left.is(tok::identifier) || Left.isSimpleTypeSpecifier()) &&
- Left.Previous && Left.Previous->is(tok::kw_operator))
- // Space between the type and the *
- // operator void*(), operator char*(), operator Foo*() dependant
- // on PointerAlignment style.
- return (Style.PointerAlignment != FormatStyle::PAS_Left);
+ if (Left.is(tok::star) && Right.isOneOf(tok::star, tok::amp, tok::ampamp))
+ return false;
+ if (Right.isOneOf(tok::star, tok::amp, tok::ampamp)) {
+ const FormatToken *Previous = &Left;
+ while (Previous && !Previous->is(tok::kw_operator)) {
+ if (Previous->is(tok::identifier) || Previous->isSimpleTypeSpecifier()) {
+ Previous = Previous->getPreviousNonComment();
+ continue;
+ }
+ if (Previous->is(TT_TemplateCloser) && Previous->MatchingParen) {
+ Previous = Previous->MatchingParen->getPreviousNonComment();
+ continue;
+ }
+ if (Previous->is(tok::coloncolon)) {
+ Previous = Previous->getPreviousNonComment();
+ continue;
+ }
+ break;
+ }
+ // Space between the type and the * in:
+ // operator void*()
+ // operator char*()
+ // operator /*comment*/ const char*()
+ // operator volatile /*comment*/ char*()
+ // operator Foo*()
+ // operator C<T>*()
+ // operator std::Foo*()
+ // operator C<T>::D<U>*()
+ // dependent on PointerAlignment style.
+ if (Previous &&
+ (Previous->endsSequence(tok::kw_operator) ||
+ Previous->endsSequence(tok::kw_const, tok::kw_operator) ||
+ Previous->endsSequence(tok::kw_volatile, tok::kw_operator)))
+ return (Style.PointerAlignment != FormatStyle::PAS_Left);
+ }
const auto SpaceRequiredForArrayInitializerLSquare =
[](const FormatToken &LSquareTok, const FormatStyle &Style) {
return Style.SpacesInContainerLiterals ||
@@ -2755,10 +2929,19 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// No whitespace in x(/*foo=*/1), except for JavaScript.
return Style.Language == FormatStyle::LK_JavaScript ||
!Left.TokenText.endswith("=*/");
+
+ // Space between template and attribute.
+ // e.g. template <typename T> [[nodiscard]] ...
+ if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeSquare))
+ return true;
if (Right.is(tok::l_paren)) {
if ((Left.is(tok::r_paren) && Left.is(TT_AttributeParen)) ||
(Left.is(tok::r_square) && Left.is(TT_AttributeSquare)))
return true;
+ if (Style.SpaceBeforeParens ==
+ FormatStyle::SBPO_ControlStatementsExceptForEachMacros &&
+ Left.is(TT_ForEachMacro))
+ return false;
return Line.Type == LT_ObjCDecl || Left.is(tok::semi) ||
(Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
(Left.isOneOf(tok::pp_elif, tok::kw_for, tok::kw_while,
@@ -2807,7 +2990,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Right.MatchingParen->endsSequence(TT_DictLiteral, tok::at))
// Objective-C dictionary literal -> no space before closing brace.
return false;
- if (Right.Type == TT_TrailingAnnotation &&
+ if (Right.getType() == TT_TrailingAnnotation &&
Right.isOneOf(tok::amp, tok::ampamp) &&
Left.isOneOf(tok::kw_const, tok::kw_volatile) &&
(!Right.Next || Right.Next->is(tok::semi)))
@@ -2855,13 +3038,83 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::numeric_constant) && Right.is(tok::percent))
return Right.WhitespaceRange.getEnd() != Right.WhitespaceRange.getBegin();
} else if (Style.isCSharp()) {
+ // Require spaces around '{' and before '}' unless they appear in
+ // interpolated strings. Interpolated strings are merged into a single token
+ // so cannot have spaces inserted by this function.
+
+ // No space between 'this' and '['
+ if (Left.is(tok::kw_this) && Right.is(tok::l_square))
+ return false;
+
+ // No space between 'new' and '('
+ if (Left.is(tok::kw_new) && Right.is(tok::l_paren))
+ return false;
+
+ // Space before { (including space within '{ {').
+ if (Right.is(tok::l_brace))
+ return true;
+
+ // Spaces inside braces.
+ if (Left.is(tok::l_brace) && Right.isNot(tok::r_brace))
+ return true;
+
+ if (Left.isNot(tok::l_brace) && Right.is(tok::r_brace))
+ return true;
+
+ // Spaces around '=>'.
+ if (Left.is(TT_JsFatArrow) || Right.is(TT_JsFatArrow))
+ return true;
+
+ // No spaces around attribute target colons
+ if (Left.is(TT_AttributeColon) || Right.is(TT_AttributeColon))
+ return false;
+
// space between type and variable e.g. Dictionary<string,string> foo;
if (Left.is(TT_TemplateCloser) && Right.is(TT_StartOfName))
return true;
+
+ // spaces inside square brackets.
+ if (Left.is(tok::l_square) || Right.is(tok::r_square))
+ return Style.SpacesInSquareBrackets;
+
+ // No space before ? in nullable types.
+ if (Right.is(TT_CSharpNullable))
+ return false;
+
+ // Require space after ? in nullable types except in generics and casts.
+ if (Left.is(TT_CSharpNullable))
+ return !Right.isOneOf(TT_TemplateCloser, tok::r_paren);
+
+ // No space before or after '?.'.
+ if (Left.is(TT_CSharpNullConditional) || Right.is(TT_CSharpNullConditional))
+ return false;
+
+ // Space before and after '??'.
+ if (Left.is(TT_CSharpNullCoalescing) || Right.is(TT_CSharpNullCoalescing))
+ return true;
+
+ // No space before '?['.
+ if (Right.is(TT_CSharpNullConditionalLSquare))
+ return false;
+
+ // No space between consecutive commas '[,,]'.
+ if (Left.is(tok::comma) && Right.is(tok::comma))
+ return false;
+
+ // Possible space inside `?[ 0 ]`.
+ if (Left.is(TT_CSharpNullConditionalLSquare))
+ return Style.SpacesInSquareBrackets;
+
+ // space after var in `var (key, value)`
+ if (Left.is(Keywords.kw_var) && Right.is(tok::l_paren))
+ return true;
+
// space between keywords and paren e.g. "using ("
if (Right.is(tok::l_paren))
- if (Left.is(tok::kw_using))
- return spaceRequiredBeforeParens(Left);
+ if (Left.isOneOf(tok::kw_using, Keywords.kw_async, Keywords.kw_when,
+ Keywords.kw_lock))
+ return Style.SpaceBeforeParens == FormatStyle::SBPO_ControlStatements ||
+ spaceRequiredBeforeParens(Right);
} else if (Style.Language == FormatStyle::LK_JavaScript) {
if (Left.is(TT_JsFatArrow))
return true;
@@ -2881,9 +3134,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
(Right.is(TT_TemplateString) && Right.TokenText.startswith("}")))
return false;
// In tagged template literals ("html`bar baz`"), there is no space between
- // the tag identifier and the template string. getIdentifierInfo makes sure
- // that the identifier is not a pseudo keyword like `yield`, either.
- if (Left.is(tok::identifier) && Keywords.IsJavaScriptIdentifier(Left) &&
+ // the tag identifier and the template string.
+ if (Keywords.IsJavaScriptIdentifier(Left,
+ /* AcceptIdentifierName= */ false) &&
Right.is(TT_TemplateString))
return false;
if (Right.is(tok::star) &&
@@ -3012,6 +3265,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return Style.SpacesInContainerLiterals;
if (Right.is(TT_AttributeColon))
return false;
+ if (Right.is(TT_CSharpNamedArgumentColon))
+ return false;
return true;
}
if (Left.is(TT_UnaryOperator)) {
@@ -3062,12 +3317,13 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
if (Right.is(tok::coloncolon) &&
!Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren))
+ // Put a space between < and :: in vector< ::std::string >
return (Left.is(TT_TemplateOpener) &&
- Style.Standard < FormatStyle::LS_Cpp11) ||
+ (Style.Standard < FormatStyle::LS_Cpp11 || Style.SpacesInAngles)) ||
!(Left.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
- tok::kw___super, TT_TemplateCloser,
- TT_TemplateOpener)) ||
- (Left.is(tok ::l_paren) && Style.SpacesInParentheses);
+ tok::kw___super, TT_TemplateOpener,
+ TT_TemplateCloser)) ||
+ (Left.is(tok::l_paren) && Style.SpacesInParentheses);
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
return Style.SpacesInAngles;
// Space before TT_StructuredBindingLSquare.
@@ -3104,13 +3360,67 @@ static bool isAllmanBrace(const FormatToken &Tok) {
!Tok.isOneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral);
}
+// Returns 'true' if 'Tok' is an function argument.
+static bool IsFunctionArgument(const FormatToken &Tok) {
+ return Tok.MatchingParen && Tok.MatchingParen->Next &&
+ Tok.MatchingParen->Next->isOneOf(tok::comma, tok::r_paren);
+}
+
+static bool
+isItAnEmptyLambdaAllowed(const FormatToken &Tok,
+ FormatStyle::ShortLambdaStyle ShortLambdaOption) {
+ return Tok.Children.empty() && ShortLambdaOption != FormatStyle::SLS_None;
+}
+
+static bool
+isItAInlineLambdaAllowed(const FormatToken &Tok,
+ FormatStyle::ShortLambdaStyle ShortLambdaOption) {
+ return (ShortLambdaOption == FormatStyle::SLS_Inline &&
+ IsFunctionArgument(Tok)) ||
+ (ShortLambdaOption == FormatStyle::SLS_All);
+}
+
+static bool isOneChildWithoutMustBreakBefore(const FormatToken &Tok) {
+ if (Tok.Children.size() != 1)
+ return false;
+ FormatToken *curElt = Tok.Children[0]->First;
+ while (curElt) {
+ if (curElt->MustBreakBefore)
+ return false;
+ curElt = curElt->Next;
+ }
+ return true;
+}
+static bool isAllmanLambdaBrace(const FormatToken &Tok) {
+ return (Tok.is(tok::l_brace) && Tok.BlockKind == BK_Block &&
+ !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral));
+}
+
+static bool isAllmanBraceIncludedBreakableLambda(
+ const FormatToken &Tok, FormatStyle::ShortLambdaStyle ShortLambdaOption) {
+ if (!isAllmanLambdaBrace(Tok))
+ return false;
+
+ if (isItAnEmptyLambdaAllowed(Tok, ShortLambdaOption))
+ return false;
+
+ return !isItAInlineLambdaAllowed(Tok, ShortLambdaOption) ||
+ !isOneChildWithoutMustBreakBefore(Tok);
+}
+
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0)
return true;
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isCSharp()) {
+ if (Right.is(TT_CSharpNamedArgumentColon) ||
+ Left.is(TT_CSharpNamedArgumentColon))
+ return false;
+ if (Right.is(TT_CSharpGenericTypeConstraint))
+ return true;
+ } else if (Style.Language == FormatStyle::LK_JavaScript) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::string_literal) && Left.is(tok::plus) && Left.Previous &&
Left.Previous->is(tok::string_literal))
@@ -3133,6 +3443,25 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// JavaScript top-level enum key/value pairs are put on separate lines
// instead of bin-packing.
return true;
+ if (Right.is(tok::r_brace) && Left.is(tok::l_brace) && Left.Previous &&
+ Left.Previous->is(TT_JsFatArrow)) {
+ // JS arrow function (=> {...}).
+ switch (Style.AllowShortLambdasOnASingleLine) {
+ case FormatStyle::SLS_All:
+ return false;
+ case FormatStyle::SLS_None:
+ return true;
+ case FormatStyle::SLS_Empty:
+ return !Left.Children.empty();
+ case FormatStyle::SLS_Inline:
+ // allow one-lining inline (e.g. in function call args) and empty arrow
+ // functions.
+ return (Left.NestingLevel == 0 && Line.Level == 0) &&
+ !Left.Children.empty();
+ }
+ llvm_unreachable("Unknown FormatStyle::ShortLambdaStyle enum");
+ }
+
if (Right.is(tok::r_brace) && Left.is(tok::l_brace) &&
!Left.Children.empty())
// Support AllowShortFunctionsOnASingleLine for JavaScript.
@@ -3220,6 +3549,14 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
if (Right.is(TT_InlineASMBrace))
return Right.HasUnescapedNewline;
+
+ auto ShortLambdaOption = Style.AllowShortLambdasOnASingleLine;
+ if (Style.BraceWrapping.BeforeLambdaBody &&
+ (isAllmanBraceIncludedBreakableLambda(Left, ShortLambdaOption) ||
+ isAllmanBraceIncludedBreakableLambda(Right, ShortLambdaOption))) {
+ return true;
+ }
+
if (isAllmanBrace(Left) || isAllmanBrace(Right))
return (Line.startsWith(tok::kw_enum) && Style.BraceWrapping.AfterEnum) ||
(Line.startsWith(tok::kw_typedef, tok::kw_enum) &&
@@ -3231,8 +3568,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
if (Left.is(TT_LambdaLBrace)) {
- if (Left.MatchingParen && Left.MatchingParen->Next &&
- Left.MatchingParen->Next->isOneOf(tok::comma, tok::r_paren) &&
+ if (IsFunctionArgument(Left) &&
Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline)
return false;
@@ -3243,13 +3579,6 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
}
- // Put multiple C# attributes on a new line.
- if (Style.isCSharp() &&
- ((Left.is(TT_AttributeSquare) && Left.is(tok::r_square)) ||
- (Left.is(tok::r_square) && Right.is(TT_AttributeSquare) &&
- Right.is(tok::l_square))))
- return true;
-
// Put multiple Java annotation on a new line.
if ((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
@@ -3376,9 +3705,15 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
-
// Language-specific stuff.
- if (Style.Language == FormatStyle::LK_Java) {
+ if (Style.isCSharp()) {
+ if (Left.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon) ||
+ Right.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon))
+ return false;
+ // Only break after commas for generic type constraints.
+ if (Line.First->is(TT_CSharpGenericTypeConstraint))
+ return Left.is(TT_CSharpGenericTypeConstraintComma);
+ } else if (Style.Language == FormatStyle::LK_Java) {
if (Left.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
Keywords.kw_implements))
return false;
@@ -3592,7 +3927,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.is(tok::kw___attribute) ||
(Right.is(tok::l_square) && Right.is(TT_AttributeSquare)))
- return true;
+ return !Left.is(TT_AttributeSquare);
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
return true;
@@ -3637,11 +3972,21 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if ((Left.is(TT_AttributeSquare) && Right.is(tok::l_square)) ||
(Left.is(tok::r_square) && Right.is(TT_AttributeSquare)))
return false;
+
+ auto ShortLambdaOption = Style.AllowShortLambdasOnASingleLine;
+ if (Style.BraceWrapping.BeforeLambdaBody) {
+ if (isAllmanLambdaBrace(Left))
+ return !isItAnEmptyLambdaAllowed(Left, ShortLambdaOption);
+ if (isAllmanLambdaBrace(Right))
+ return !isItAnEmptyLambdaAllowed(Right, ShortLambdaOption);
+ }
+
return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace,
tok::kw_class, tok::kw_struct, tok::comment) ||
Right.isMemberAccess() ||
Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow, tok::lessless,
tok::colon, tok::l_square, tok::at) ||
+ (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace)) ||
(Left.is(tok::r_paren) &&
Right.isOneOf(tok::identifier, tok::kw_const)) ||
(Left.is(tok::l_paren) && !Right.is(tok::r_paren)) ||
@@ -3654,9 +3999,9 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
while (Tok) {
llvm::errs() << " M=" << Tok->MustBreakBefore
<< " C=" << Tok->CanBreakBefore
- << " T=" << getTokenTypeName(Tok->Type)
+ << " T=" << getTokenTypeName(Tok->getType())
<< " S=" << Tok->SpacesRequiredBefore
- << " B=" << Tok->BlockParameterCount
+ << " F=" << Tok->Finalized << " B=" << Tok->BlockParameterCount
<< " BK=" << Tok->BlockKind << " P=" << Tok->SplitPenalty
<< " Name=" << Tok->Tok.getName() << " L=" << Tok->TotalLength
<< " PPK=" << Tok->PackingKind << " FakeLParens=";
diff --git a/clang/lib/Format/UnwrappedLineFormatter.cpp b/clang/lib/Format/UnwrappedLineFormatter.cpp
index fec85f1174da..22f27a668dcc 100644
--- a/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -64,6 +64,8 @@ public:
}
if (static_cast<int>(Indent) + Offset >= 0)
Indent += Offset;
+ if (Line.First->is(TT_CSharpGenericTypeConstraint))
+ Indent = Line.Level * Style.IndentWidth + Style.ContinuationIndentWidth;
}
/// Update the indent state given that \p Line indent should be
@@ -340,21 +342,6 @@ private:
? 1
: 0;
}
- // Try to merge either empty or one-line block if is precedeed by control
- // statement token
- if (TheLine->First->is(tok::l_brace) && TheLine->First == TheLine->Last &&
- I != AnnotatedLines.begin() &&
- I[-1]->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for)) {
- unsigned MergedLines = 0;
- if (Style.AllowShortBlocksOnASingleLine != FormatStyle::SBS_Never) {
- MergedLines = tryMergeSimpleBlock(I - 1, E, Limit);
- // If we managed to merge the block, discard the first merged line
- // since we are merging starting from I.
- if (MergedLines > 0)
- --MergedLines;
- }
- return MergedLines;
- }
// Don't merge block with left brace wrapped after ObjC special blocks
if (TheLine->First->is(tok::l_brace) && I != AnnotatedLines.begin() &&
I[-1]->First->is(tok::at) && I[-1]->First->Next) {
@@ -404,7 +391,7 @@ private:
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
}
- if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while)) {
+ if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while, tok::kw_do)) {
return Style.AllowShortLoopsOnASingleLine
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
@@ -449,7 +436,10 @@ private:
return 0;
Limit = limitConsideringMacros(I + 1, E, Limit);
AnnotatedLine &Line = **I;
- if (Line.Last->isNot(tok::r_paren))
+ if (!Line.First->is(tok::kw_do) && Line.Last->isNot(tok::r_paren))
+ return 0;
+ // Only merge do while if do is the only statement on the line.
+ if (Line.First->is(tok::kw_do) && !Line.Last->is(tok::kw_do))
return 0;
if (1 + I[1]->Last->TotalLength > Limit)
return 0;
@@ -593,9 +583,10 @@ private:
FormatToken *RecordTok = Line.First;
// Skip record modifiers.
while (RecordTok->Next &&
- RecordTok->isOneOf(tok::kw_typedef, tok::kw_export,
- Keywords.kw_declare, Keywords.kw_abstract,
- tok::kw_default))
+ RecordTok->isOneOf(
+ tok::kw_typedef, tok::kw_export, Keywords.kw_declare,
+ Keywords.kw_abstract, tok::kw_default, tok::kw_public,
+ tok::kw_private, tok::kw_protected, Keywords.kw_internal))
RecordTok = RecordTok->Next;
if (RecordTok &&
RecordTok->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct,
@@ -817,7 +808,8 @@ protected:
if (!DryRun) {
Whitespaces->replaceWhitespace(
*Child->First, /*Newlines=*/0, /*Spaces=*/1,
- /*StartOfTokenColumn=*/State.Column, State.Line->InPPDirective);
+ /*StartOfTokenColumn=*/State.Column, /*IsAligned=*/false,
+ State.Line->InPPDirective);
}
Penalty +=
formatLine(*Child, State.Column + 1, /*FirstStartColumn=*/0, DryRun);
@@ -1238,7 +1230,8 @@ void UnwrappedLineFormatter::formatFirstToken(
// If in Whitemsmiths mode, indent start and end of blocks
if (Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
- if (RootToken.isOneOf(tok::l_brace, tok::r_brace, tok::kw_case))
+ if (RootToken.isOneOf(tok::l_brace, tok::r_brace, tok::kw_case,
+ tok::kw_default))
Indent += Style.IndentWidth;
}
@@ -1249,6 +1242,7 @@ void UnwrappedLineFormatter::formatFirstToken(
Indent = 0;
Whitespaces->replaceWhitespace(RootToken, Newlines, Indent, Indent,
+ /*IsAligned=*/false,
Line.InPPDirective &&
!RootToken.HasUnescapedNewline);
}
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index ead6b4743207..a37386425aae 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "UnwrappedLineParser.h"
+#include "FormatToken.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -323,13 +324,54 @@ void UnwrappedLineParser::parseFile() {
addUnwrappedLine();
}
+void UnwrappedLineParser::parseCSharpGenericTypeConstraint() {
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_brace:
+ return;
+ default:
+ if (FormatTok->is(Keywords.kw_where)) {
+ addUnwrappedLine();
+ nextToken();
+ parseCSharpGenericTypeConstraint();
+ break;
+ }
+ nextToken();
+ break;
+ }
+ } while (!eof());
+}
+
+void UnwrappedLineParser::parseCSharpAttribute() {
+ int UnpairedSquareBrackets = 1;
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::r_square:
+ nextToken();
+ --UnpairedSquareBrackets;
+ if (UnpairedSquareBrackets == 0) {
+ addUnwrappedLine();
+ return;
+ }
+ break;
+ case tok::l_square:
+ ++UnpairedSquareBrackets;
+ nextToken();
+ break;
+ default:
+ nextToken();
+ break;
+ }
+ } while (!eof());
+}
+
void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
bool SwitchLabelEncountered = false;
do {
tok::TokenKind kind = FormatTok->Tok.getKind();
- if (FormatTok->Type == TT_MacroBlockBegin) {
+ if (FormatTok->getType() == TT_MacroBlockBegin) {
kind = tok::l_brace;
- } else if (FormatTok->Type == TT_MacroBlockEnd) {
+ } else if (FormatTok->getType() == TT_MacroBlockEnd) {
kind = tok::r_brace;
}
@@ -381,6 +423,13 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
SwitchLabelEncountered = true;
parseStructuralElement();
break;
+ case tok::l_square:
+ if (Style.isCSharp()) {
+ nextToken();
+ parseCSharpAttribute();
+ break;
+ }
+ LLVM_FALLTHROUGH;
default:
parseStructuralElement();
break;
@@ -851,14 +900,14 @@ void UnwrappedLineParser::parsePPUnknown() {
addUnwrappedLine();
}
-// Here we blacklist certain tokens that are not usually the first token in an
+// Here we exclude certain tokens that are not usually the first token in an
// unwrapped line. This is used in attempt to distinguish macro calls without
// trailing semicolons from other constructs split to several lines.
-static bool tokenCanStartNewLine(const clang::Token &Tok) {
+static bool tokenCanStartNewLine(const FormatToken &Tok) {
// Semicolon can be a null-statement, l_square can be a start of a macro or
// a C++11 attribute, but this doesn't seem to be common.
return Tok.isNot(tok::semi) && Tok.isNot(tok::l_brace) &&
- Tok.isNot(tok::l_square) &&
+ Tok.isNot(TT_AttributeSquare) &&
// Tokens that can only be used as binary operators and a part of
// overloaded operator names.
Tok.isNot(tok::period) && Tok.isNot(tok::periodstar) &&
@@ -984,11 +1033,11 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::kw_asm:
nextToken();
if (FormatTok->is(tok::l_brace)) {
- FormatTok->Type = TT_InlineASMBrace;
+ FormatTok->setType(TT_InlineASMBrace);
nextToken();
while (FormatTok && FormatTok->isNot(tok::eof)) {
if (FormatTok->is(tok::r_brace)) {
- FormatTok->Type = TT_InlineASMBrace;
+ FormatTok->setType(TT_InlineASMBrace);
nextToken();
addUnwrappedLine();
break;
@@ -1011,13 +1060,22 @@ void UnwrappedLineParser::parseStructuralElement() {
parseAccessSpecifier();
return;
case tok::kw_if:
+ if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // field/method declaration.
+ break;
parseIfThenElse();
return;
case tok::kw_for:
case tok::kw_while:
+ if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // field/method declaration.
+ break;
parseForOrWhileLoop();
return;
case tok::kw_do:
+ if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // field/method declaration.
+ break;
parseDoWhile();
return;
case tok::kw_switch:
@@ -1045,6 +1103,9 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
case tok::kw_try:
case tok::kw___try:
+ if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // field/method declaration.
+ break;
parseTryCatch();
return;
case tok::kw_extern:
@@ -1052,11 +1113,16 @@ void UnwrappedLineParser::parseStructuralElement() {
if (FormatTok->Tok.is(tok::string_literal)) {
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BraceWrapping.AfterExternBlock) {
- addUnwrappedLine();
- parseBlock(/*MustBeDeclaration=*/true);
+ if (!Style.IndentExternBlock) {
+ if (Style.BraceWrapping.AfterExternBlock) {
+ addUnwrappedLine();
+ }
+ parseBlock(/*MustBeDeclaration=*/true,
+ /*AddLevel=*/Style.BraceWrapping.AfterExternBlock);
} else {
- parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/false);
+ parseBlock(/*MustBeDeclaration=*/true,
+ /*AddLevel=*/Style.IndentExternBlock ==
+ FormatStyle::IEBS_Indent);
}
addUnwrappedLine();
return;
@@ -1274,14 +1340,14 @@ void UnwrappedLineParser::parseStructuralElement() {
parseChildBlock();
break;
case tok::l_brace:
- if (!tryToParseBracedList()) {
+ if (!tryToParsePropertyAccessor() && !tryToParseBracedList()) {
// A block outside of parentheses must be the last part of a
// structural element.
// FIXME: Figure out cases where this is not true, and add projections
// for them (the one we know is missing are lambdas).
if (Style.BraceWrapping.AfterFunction)
addUnwrappedLine();
- FormatTok->Type = TT_FunctionLBrace;
+ FormatTok->setType(TT_FunctionLBrace);
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
return;
@@ -1290,12 +1356,24 @@ void UnwrappedLineParser::parseStructuralElement() {
// element continues.
break;
case tok::kw_try:
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ Line->MustBeDeclaration) {
+ // field/method declaration.
+ nextToken();
+ break;
+ }
// We arrive here when parsing function-try blocks.
if (Style.BraceWrapping.AfterFunction)
addUnwrappedLine();
parseTryCatch();
return;
case tok::identifier: {
+ if (Style.isCSharp() && FormatTok->is(Keywords.kw_where) &&
+ Line->MustBeDeclaration) {
+ addUnwrappedLine();
+ parseCSharpGenericTypeConstraint();
+ break;
+ }
if (FormatTok->is(TT_MacroBlockEnd)) {
addUnwrappedLine();
return;
@@ -1368,7 +1446,7 @@ void UnwrappedLineParser::parseStructuralElement() {
: CommentsBeforeNextToken.front()->NewlinesBefore > 0;
if (FollowedByNewline && (Text.size() >= 5 || FunctionLike) &&
- tokenCanStartNewLine(FormatTok->Tok) && Text == Text.upper()) {
+ tokenCanStartNewLine(*FormatTok) && Text == Text.upper()) {
addUnwrappedLine();
return;
}
@@ -1381,19 +1459,30 @@ void UnwrappedLineParser::parseStructuralElement() {
// followed by a curly.
if (FormatTok->is(TT_JsFatArrow)) {
nextToken();
- if (FormatTok->is(tok::l_brace))
+ if (FormatTok->is(tok::l_brace)) {
+ // C# may break after => if the next character is a newline.
+ if (Style.isCSharp() && Style.BraceWrapping.AfterFunction == true) {
+ // calling `addUnwrappedLine()` here causes odd parsing errors.
+ FormatTok->MustBreakBefore = true;
+ }
parseChildBlock();
+ }
break;
}
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
+ // Block kind should probably be set to BK_BracedInit for any language.
+ // C# needs this change to ensure that array initialisers and object
+ // initialisers are indented the same way.
+ if (Style.isCSharp())
+ FormatTok->BlockKind = BK_BracedInit;
nextToken();
parseBracedList();
} else if (Style.Language == FormatStyle::LK_Proto &&
FormatTok->Tok.is(tok::less)) {
nextToken();
- parseBracedList(/*ContinueOnSemicolons=*/false,
+ parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
/*ClosingBraceKind=*/tok::greater);
}
break;
@@ -1410,6 +1499,96 @@ void UnwrappedLineParser::parseStructuralElement() {
} while (!eof());
}
+bool UnwrappedLineParser::tryToParsePropertyAccessor() {
+ assert(FormatTok->is(tok::l_brace));
+ if (!Style.isCSharp())
+ return false;
+ // See if it's a property accessor.
+ if (FormatTok->Previous->isNot(tok::identifier))
+ return false;
+
+ // See if we are inside a property accessor.
+ //
+ // Record the current tokenPosition so that we can advance and
+ // reset the current token. `Next` is not set yet so we need
+ // another way to advance along the token stream.
+ unsigned int StoredPosition = Tokens->getPosition();
+ FormatToken *Tok = Tokens->getNextToken();
+
+ // A trivial property accessor is of the form:
+ // { [ACCESS_SPECIFIER] [get]; [ACCESS_SPECIFIER] [set] }
+ // Track these as they do not require line breaks to be introduced.
+ bool HasGetOrSet = false;
+ bool IsTrivialPropertyAccessor = true;
+ while (!eof()) {
+ if (Tok->isOneOf(tok::semi, tok::kw_public, tok::kw_private,
+ tok::kw_protected, Keywords.kw_internal, Keywords.kw_get,
+ Keywords.kw_set)) {
+ if (Tok->isOneOf(Keywords.kw_get, Keywords.kw_set))
+ HasGetOrSet = true;
+ Tok = Tokens->getNextToken();
+ continue;
+ }
+ if (Tok->isNot(tok::r_brace))
+ IsTrivialPropertyAccessor = false;
+ break;
+ }
+
+ if (!HasGetOrSet) {
+ Tokens->setPosition(StoredPosition);
+ return false;
+ }
+
+ // Try to parse the property accessor:
+ // https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/classes-and-structs/properties
+ Tokens->setPosition(StoredPosition);
+ if (!IsTrivialPropertyAccessor && Style.BraceWrapping.AfterFunction == true)
+ addUnwrappedLine();
+ nextToken();
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::r_brace:
+ nextToken();
+ if (FormatTok->is(tok::equal)) {
+ while (!eof() && FormatTok->isNot(tok::semi))
+ nextToken();
+ nextToken();
+ }
+ addUnwrappedLine();
+ return true;
+ case tok::l_brace:
+ ++Line->Level;
+ parseBlock(/*MustBeDeclaration=*/true);
+ addUnwrappedLine();
+ --Line->Level;
+ break;
+ case tok::equal:
+ if (FormatTok->is(TT_JsFatArrow)) {
+ ++Line->Level;
+ do {
+ nextToken();
+ } while (!eof() && FormatTok->isNot(tok::semi));
+ nextToken();
+ addUnwrappedLine();
+ --Line->Level;
+ break;
+ }
+ nextToken();
+ break;
+ default:
+ if (FormatTok->isOneOf(Keywords.kw_get, Keywords.kw_set) &&
+ !IsTrivialPropertyAccessor) {
+ // Non-trivial get/set needs to be on its own line.
+ addUnwrappedLine();
+ }
+ nextToken();
+ }
+ } while (!eof());
+
+ // Unreachable for well-formed code (paired '{' and '}').
+ return true;
+}
+
bool UnwrappedLineParser::tryToParseLambda() {
if (!Style.isCpp()) {
nextToken();
@@ -1480,6 +1659,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::lessequal:
case tok::question:
case tok::colon:
+ case tok::ellipsis:
case tok::kw_true:
case tok::kw_false:
if (SeenArrow) {
@@ -1491,7 +1671,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
// This might or might not actually be a lambda arrow (this could be an
// ObjC method invocation followed by a dereferencing arrow). We might
// reset this back to TT_Unknown in TokenAnnotator.
- FormatTok->Type = TT_LambdaArrow;
+ FormatTok->setType(TT_LambdaArrow);
SeenArrow = true;
nextToken();
break;
@@ -1499,8 +1679,8 @@ bool UnwrappedLineParser::tryToParseLambda() {
return true;
}
}
- FormatTok->Type = TT_LambdaLBrace;
- LSquare.Type = TT_LambdaLSquare;
+ FormatTok->setType(TT_LambdaLBrace);
+ LSquare.setType(TT_LambdaLSquare);
parseChildBlock();
return true;
}
@@ -1533,7 +1713,7 @@ void UnwrappedLineParser::tryToParseJSFunction() {
// Consume * (generator function). Treat it like C++'s overloaded operators.
if (FormatTok->is(tok::star)) {
- FormatTok->Type = TT_OverloadedOperator;
+ FormatTok->setType(TT_OverloadedOperator);
nextToken();
}
@@ -1578,12 +1758,24 @@ bool UnwrappedLineParser::tryToParseBracedList() {
}
bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
+ bool IsEnum,
tok::TokenKind ClosingBraceKind) {
bool HasError = false;
// FIXME: Once we have an expression parser in the UnwrappedLineParser,
// replace this by using parseAssigmentExpression() inside.
do {
+ if (Style.isCSharp()) {
+ if (FormatTok->is(TT_JsFatArrow)) {
+ nextToken();
+ // Fat arrows can be followed by simple expressions or by child blocks
+ // in curly braces.
+ if (FormatTok->is(tok::l_brace)) {
+ parseChildBlock();
+ continue;
+ }
+ }
+ }
if (Style.Language == FormatStyle::LK_JavaScript) {
if (FormatTok->is(Keywords.kw_function) ||
FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)) {
@@ -1607,6 +1799,8 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
}
}
if (FormatTok->Tok.getKind() == ClosingBraceKind) {
+ if (IsEnum && !Style.AllowShortEnumsOnASingleLine)
+ addUnwrappedLine();
nextToken();
return !HasError;
}
@@ -1618,7 +1812,10 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
}
break;
case tok::l_square:
- tryToParseLambda();
+ if (Style.isCSharp())
+ parseSquare();
+ else
+ tryToParseLambda();
break;
case tok::l_paren:
parseParens();
@@ -1640,7 +1837,7 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
case tok::less:
if (Style.Language == FormatStyle::LK_Proto) {
nextToken();
- parseBracedList(/*ContinueOnSemicolons=*/false,
+ parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
/*ClosingBraceKind=*/tok::greater);
} else {
nextToken();
@@ -1662,6 +1859,8 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
break;
case tok::comma:
nextToken();
+ if (IsEnum && !Style.AllowShortEnumsOnASingleLine)
+ addUnwrappedLine();
break;
default:
nextToken();
@@ -1768,6 +1967,9 @@ void UnwrappedLineParser::parseIfThenElse() {
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
+ // handle [[likely]] / [[unlikely]]
+ if (FormatTok->is(tok::l_square) && tryToParseSimpleAttribute())
+ parseSquare();
bool NeedsUnwrappedLine = false;
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
@@ -1784,6 +1986,9 @@ void UnwrappedLineParser::parseIfThenElse() {
}
if (FormatTok->Tok.is(tok::kw_else)) {
nextToken();
+ // handle [[likely]] / [[unlikely]]
+ if (FormatTok->Tok.is(tok::l_square) && tryToParseSimpleAttribute())
+ parseSquare();
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
@@ -1810,11 +2015,20 @@ void UnwrappedLineParser::parseTryCatch() {
if (FormatTok->is(tok::colon)) {
// We are in a function try block, what comes is an initializer list.
nextToken();
+
+ // In case identifiers were removed by clang-tidy, what might follow is
+ // multiple commas in sequence - before the first identifier.
+ while (FormatTok->is(tok::comma))
+ nextToken();
+
while (FormatTok->is(tok::identifier)) {
nextToken();
if (FormatTok->is(tok::l_paren))
parseParens();
- if (FormatTok->is(tok::comma))
+
+ // In case identifiers were removed by clang-tidy, what might follow is
+ // multiple commas in sequence - after the first identifier.
+ while (FormatTok->is(tok::comma))
nextToken();
}
}
@@ -1898,7 +2112,7 @@ void UnwrappedLineParser::parseNamespace() {
DeclarationScopeStack.size() > 1);
parseBlock(/*MustBeDeclaration=*/true, AddLevel);
// Munch the semicolon after a namespace. This is more common than one would
- // think. Puttin the semicolon into its own line is very ugly.
+ // think. Putting the semicolon into its own line is very ugly.
if (FormatTok->Tok.is(tok::semi))
nextToken();
addUnwrappedLine();
@@ -1909,6 +2123,19 @@ void UnwrappedLineParser::parseNamespace() {
void UnwrappedLineParser::parseNew() {
assert(FormatTok->is(tok::kw_new) && "'new' expected");
nextToken();
+
+ if (Style.isCSharp()) {
+ do {
+ if (FormatTok->is(tok::l_brace))
+ parseBracedList();
+
+ if (FormatTok->isOneOf(tok::semi, tok::comma))
+ return;
+
+ nextToken();
+ } while (!eof());
+ }
+
if (Style.Language != FormatStyle::LK_Java)
return;
@@ -1959,7 +2186,7 @@ void UnwrappedLineParser::parseDoWhile() {
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
- if (Style.BraceWrapping.IndentBraces)
+ if (Style.BraceWrapping.BeforeWhile)
addUnwrappedLine();
} else {
addUnwrappedLine();
@@ -1985,15 +2212,21 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
--Line->Level;
if (LeftAlignLabel)
Line->Level = 0;
- if (CommentsBeforeNextToken.empty() && FormatTok->Tok.is(tok::l_brace)) {
+ if (!Style.IndentCaseBlocks && CommentsBeforeNextToken.empty() &&
+ FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Line->Level,
Style.BraceWrapping.AfterCaseLabel,
Style.BraceWrapping.IndentBraces);
parseBlock(/*MustBeDeclaration=*/false);
if (FormatTok->Tok.is(tok::kw_break)) {
if (Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_Always)
+ FormatStyle::BWACS_Always) {
addUnwrappedLine();
+ if (!Style.IndentCaseBlocks &&
+ Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
+ Line->Level++;
+ }
+ }
parseStructuralElement();
}
addUnwrappedLine();
@@ -2097,9 +2330,18 @@ bool UnwrappedLineParser::parseEnum() {
return true;
}
+ if (!Style.AllowShortEnumsOnASingleLine)
+ addUnwrappedLine();
// Parse enum body.
nextToken();
- bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true);
+ if (!Style.AllowShortEnumsOnASingleLine) {
+ addUnwrappedLine();
+ Line->Level += 1;
+ }
+ bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true,
+ /*IsEnum=*/true);
+ if (!Style.AllowShortEnumsOnASingleLine)
+ Line->Level -= 1;
if (HasError) {
if (FormatTok->is(tok::semi))
nextToken();
@@ -2112,6 +2354,51 @@ bool UnwrappedLineParser::parseEnum() {
// "} n, m;" will end up in one unwrapped line.
}
+namespace {
+// A class used to set and restore the Token position when peeking
+// ahead in the token source.
+class ScopedTokenPosition {
+ unsigned StoredPosition;
+ FormatTokenSource *Tokens;
+
+public:
+ ScopedTokenPosition(FormatTokenSource *Tokens) : Tokens(Tokens) {
+ assert(Tokens && "Tokens expected to not be null");
+ StoredPosition = Tokens->getPosition();
+ }
+
+ ~ScopedTokenPosition() { Tokens->setPosition(StoredPosition); }
+};
+} // namespace
+
+// Look to see if we have [[ by looking ahead, if
+// its not then rewind to the original position.
+bool UnwrappedLineParser::tryToParseSimpleAttribute() {
+ ScopedTokenPosition AutoPosition(Tokens);
+ FormatToken *Tok = Tokens->getNextToken();
+ // We already read the first [ check for the second.
+ if (Tok && !Tok->is(tok::l_square)) {
+ return false;
+ }
+ // Double check that the attribute is just something
+ // fairly simple.
+ while (Tok) {
+ if (Tok->is(tok::r_square)) {
+ break;
+ }
+ Tok = Tokens->getNextToken();
+ }
+ Tok = Tokens->getNextToken();
+ if (Tok && !Tok->is(tok::r_square)) {
+ return false;
+ }
+ Tok = Tokens->getNextToken();
+ if (Tok && Tok->is(tok::semi)) {
+ return false;
+ }
+ return true;
+}
+
void UnwrappedLineParser::parseJavaEnumBody() {
// Determine whether the enum is simple, i.e. does not have a semicolon or
// constants with class bodies. Simple enums can be formatted like braced
@@ -2181,9 +2468,10 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
// The actual identifier can be a nested name specifier, and in macros
// it is often token-pasted.
+ // An [[attribute]] can be before the identifier.
while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash,
tok::kw___attribute, tok::kw___declspec,
- tok::kw_alignas) ||
+ tok::kw_alignas, tok::l_square, tok::r_square) ||
((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
FormatTok->isOneOf(tok::period, tok::comma))) {
@@ -2203,8 +2491,16 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
FormatTok->TokenText != FormatTok->TokenText.upper();
nextToken();
// We can have macros or attributes in between 'class' and the class name.
- if (!IsNonMacroIdentifier && FormatTok->Tok.is(tok::l_paren))
- parseParens();
+ if (!IsNonMacroIdentifier) {
+ if (FormatTok->Tok.is(tok::l_paren)) {
+ parseParens();
+ } else if (FormatTok->is(TT_AttributeSquare)) {
+ parseSquare();
+ // Consume the closing TT_AttributeSquare.
+ if (FormatTok->Next && FormatTok->is(TT_AttributeSquare))
+ nextToken();
+ }
+ }
}
// Note that parsing away template declarations here leads to incorrectly
@@ -2226,6 +2522,12 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
}
if (FormatTok->Tok.is(tok::semi))
return;
+ if (Style.isCSharp() && FormatTok->is(Keywords.kw_where)) {
+ addUnwrappedLine();
+ nextToken();
+ parseCSharpGenericTypeConstraint();
+ break;
+ }
nextToken();
}
}
@@ -2451,8 +2753,8 @@ LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
E = Line.Tokens.end();
I != E; ++I) {
llvm::dbgs() << I->Tok->Tok.getName() << "["
- << "T=" << I->Tok->Type << ", OC=" << I->Tok->OriginalColumn
- << "] ";
+ << "T=" << I->Tok->getType()
+ << ", OC=" << I->Tok->OriginalColumn << "] ";
}
for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
E = Line.Tokens.end();
@@ -2723,18 +3025,19 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
flushComments(isOnNewLine(*FormatTok));
parsePPDirective();
}
- while (FormatTok->Type == TT_ConflictStart ||
- FormatTok->Type == TT_ConflictEnd ||
- FormatTok->Type == TT_ConflictAlternative) {
- if (FormatTok->Type == TT_ConflictStart) {
+ while (FormatTok->getType() == TT_ConflictStart ||
+ FormatTok->getType() == TT_ConflictEnd ||
+ FormatTok->getType() == TT_ConflictAlternative) {
+ if (FormatTok->getType() == TT_ConflictStart) {
conditionalCompilationStart(/*Unreachable=*/false);
- } else if (FormatTok->Type == TT_ConflictAlternative) {
+ } else if (FormatTok->getType() == TT_ConflictAlternative) {
conditionalCompilationAlternative();
- } else if (FormatTok->Type == TT_ConflictEnd) {
+ } else if (FormatTok->getType() == TT_ConflictEnd) {
conditionalCompilationEnd();
}
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
+ FormatTok->MustBreakAlignBefore = true;
}
if (!PPStack.empty() && (PPStack.back().Kind == PP_Unreachable) &&
@@ -2759,6 +3062,7 @@ void UnwrappedLineParser::pushToken(FormatToken *Tok) {
Line->Tokens.push_back(UnwrappedLineNode(Tok));
if (MustBreakBeforeNextToken) {
Line->Tokens.back().Tok->MustBreakBefore = true;
+ Line->Tokens.back().Tok->MustBreakAlignBefore = true;
MustBreakBeforeNextToken = false;
}
}
diff --git a/clang/lib/Format/UnwrappedLineParser.h b/clang/lib/Format/UnwrappedLineParser.h
index 5d9bafc429a7..8b3aa4c84edb 100644
--- a/clang/lib/Format/UnwrappedLineParser.h
+++ b/clang/lib/Format/UnwrappedLineParser.h
@@ -98,7 +98,7 @@ private:
void readTokenWithJavaScriptASI();
void parseStructuralElement();
bool tryToParseBracedList();
- bool parseBracedList(bool ContinueOnSemicolons = false,
+ bool parseBracedList(bool ContinueOnSemicolons = false, bool IsEnum = false,
tok::TokenKind ClosingBraceKind = tok::r_brace);
void parseParens();
void parseSquare(bool LambdaIntroducer = false);
@@ -125,9 +125,16 @@ private:
bool parseObjCProtocol();
void parseJavaScriptEs6ImportExport();
void parseStatementMacro();
+ void parseCSharpAttribute();
+ // Parse a C# generic type constraint: `where T : IComparable<T>`.
+ // See:
+ // https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/keywords/where-generic-type-constraint
+ void parseCSharpGenericTypeConstraint();
bool tryToParseLambda();
bool tryToParseLambdaIntroducer();
+ bool tryToParsePropertyAccessor();
void tryToParseJSFunction();
+ bool tryToParseSimpleAttribute();
void addUnwrappedLine();
bool eof() const;
// LevelDifference is the difference of levels after and before the current
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index 5a44500d355f..32e0b685ea0f 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -30,28 +30,29 @@ WhitespaceManager::Change::Change(const FormatToken &Tok,
int Spaces, unsigned StartOfTokenColumn,
unsigned NewlinesBefore,
StringRef PreviousLinePostfix,
- StringRef CurrentLinePrefix,
+ StringRef CurrentLinePrefix, bool IsAligned,
bool ContinuesPPDirective, bool IsInsideToken)
: Tok(&Tok), CreateReplacement(CreateReplacement),
OriginalWhitespaceRange(OriginalWhitespaceRange),
StartOfTokenColumn(StartOfTokenColumn), NewlinesBefore(NewlinesBefore),
PreviousLinePostfix(PreviousLinePostfix),
- CurrentLinePrefix(CurrentLinePrefix),
+ CurrentLinePrefix(CurrentLinePrefix), IsAligned(IsAligned),
ContinuesPPDirective(ContinuesPPDirective), Spaces(Spaces),
IsInsideToken(IsInsideToken), IsTrailingComment(false), TokenLength(0),
PreviousEndOfTokenColumn(0), EscapedNewlineColumn(0),
- StartOfBlockComment(nullptr), IndentationOffset(0) {}
+ StartOfBlockComment(nullptr), IndentationOffset(0), ConditionalsLevel(0) {
+}
void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
unsigned Spaces,
unsigned StartOfTokenColumn,
- bool InPPDirective) {
+ bool IsAligned, bool InPPDirective) {
if (Tok.Finalized)
return;
Tok.Decision = (Newlines > 0) ? FD_Break : FD_Continue;
Changes.push_back(Change(Tok, /*CreateReplacement=*/true, Tok.WhitespaceRange,
Spaces, StartOfTokenColumn, Newlines, "", "",
- InPPDirective && !Tok.IsFirst,
+ IsAligned, InPPDirective && !Tok.IsFirst,
/*IsInsideToken=*/false));
}
@@ -62,7 +63,7 @@ void WhitespaceManager::addUntouchableToken(const FormatToken &Tok,
Changes.push_back(Change(Tok, /*CreateReplacement=*/false,
Tok.WhitespaceRange, /*Spaces=*/0,
Tok.OriginalColumn, Tok.NewlinesBefore, "", "",
- InPPDirective && !Tok.IsFirst,
+ /*IsAligned=*/false, InPPDirective && !Tok.IsFirst,
/*IsInsideToken=*/false));
}
@@ -82,7 +83,8 @@ void WhitespaceManager::replaceWhitespaceInToken(
Change(Tok, /*CreateReplacement=*/true,
SourceRange(Start, Start.getLocWithOffset(ReplaceChars)), Spaces,
std::max(0, Spaces), Newlines, PreviousPostfix, CurrentPrefix,
- InPPDirective && !Tok.IsFirst, /*IsInsideToken=*/true));
+ /*IsAligned=*/true, InPPDirective && !Tok.IsFirst,
+ /*IsInsideToken=*/true));
}
const tooling::Replacements &WhitespaceManager::generateReplacements() {
@@ -93,7 +95,9 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
calculateLineBreakInformation();
alignConsecutiveMacros();
alignConsecutiveDeclarations();
+ alignConsecutiveBitFields();
alignConsecutiveAssignments();
+ alignChainedConditionals();
alignTrailingComments();
alignEscapedNewlines();
generateChanges();
@@ -226,6 +230,33 @@ void WhitespaceManager::calculateLineBreakInformation() {
LastBlockComment = nullptr;
}
}
+
+ // Compute conditional nesting level
+ // Level is increased for each conditional, unless this conditional continues
+ // a chain of conditional, i.e. starts immediately after the colon of another
+ // conditional.
+ SmallVector<bool, 16> ScopeStack;
+ int ConditionalsLevel = 0;
+ for (auto &Change : Changes) {
+ for (unsigned i = 0, e = Change.Tok->FakeLParens.size(); i != e; ++i) {
+ bool isNestedConditional =
+ Change.Tok->FakeLParens[e - 1 - i] == prec::Conditional &&
+ !(i == 0 && Change.Tok->Previous &&
+ Change.Tok->Previous->is(TT_ConditionalExpr) &&
+ Change.Tok->Previous->is(tok::colon));
+ if (isNestedConditional)
+ ++ConditionalsLevel;
+ ScopeStack.push_back(isNestedConditional);
+ }
+
+ Change.ConditionalsLevel = ConditionalsLevel;
+
+ for (unsigned i = Change.Tok->FakeRParens; i > 0 && ScopeStack.size();
+ --i) {
+ if (ScopeStack.pop_back_val())
+ --ConditionalsLevel;
+ }
+ }
}
// Align a single sequence of tokens, see AlignTokens below.
@@ -247,6 +278,7 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
// double z);
// In the above example, we need to take special care to ensure that
// 'double z' is indented along with it's owning function 'b'.
+ // Special handling is required for 'nested' ternary operators.
SmallVector<unsigned, 16> ScopeStack;
for (unsigned i = Start; i != End; ++i) {
@@ -287,7 +319,10 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
unsigned ScopeStart = ScopeStack.back();
if (Changes[ScopeStart - 1].Tok->is(TT_FunctionDeclarationName) ||
(ScopeStart > Start + 1 &&
- Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName)))
+ Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName)) ||
+ Changes[i].Tok->is(TT_ConditionalExpr) ||
+ (Changes[i].Tok->Previous &&
+ Changes[i].Tok->Previous->is(TT_ConditionalExpr)))
Changes[i].Spaces += Shift;
}
@@ -340,7 +375,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// abort when we hit any token in a higher scope than the starting one.
auto IndentAndNestingLevel = StartAt < Changes.size()
? Changes[StartAt].indentAndNestingLevel()
- : std::pair<unsigned, unsigned>(0, 0);
+ : std::tuple<unsigned, unsigned, unsigned>();
// Keep track of the number of commas before the matching tokens, we will only
// align a sequence of matching tokens if they are preceded by the same number
@@ -376,9 +411,11 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (Changes[i].NewlinesBefore != 0) {
CommasBeforeMatch = 0;
EndOfSequence = i;
- // If there is a blank line, or if the last line didn't contain any
- // matching token, the sequence ends here.
- if (Changes[i].NewlinesBefore > 1 || !FoundMatchOnLine)
+ // If there is a blank line, there is a forced-align-break (eg,
+ // preprocessor), or if the last line didn't contain any matching token,
+ // the sequence ends here.
+ if (Changes[i].NewlinesBefore > 1 ||
+ Changes[i].Tok->MustBreakAlignBefore || !FoundMatchOnLine)
AlignCurrentSequence();
FoundMatchOnLine = false;
@@ -408,9 +445,17 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
StartOfSequence = i;
unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
- int LineLengthAfter = -Changes[i].Spaces;
- for (unsigned j = i; j != e && Changes[j].NewlinesBefore == 0; ++j)
- LineLengthAfter += Changes[j].Spaces + Changes[j].TokenLength;
+ int LineLengthAfter = Changes[i].TokenLength;
+ for (unsigned j = i + 1; j != e && Changes[j].NewlinesBefore == 0; ++j) {
+ LineLengthAfter += Changes[j].Spaces;
+ // Changes are generally 1:1 with the tokens, but a change could also be
+ // inside of a token, in which case it's counted more than once: once for
+ // the whitespace surrounding the token (!IsInsideToken) and once for
+ // each whitespace change within it (IsInsideToken).
+ // Therefore, changes inside of a token should only count the space.
+ if (!Changes[j].IsInsideToken)
+ LineLengthAfter += Changes[j].TokenLength;
+ }
unsigned ChangeMaxColumn = Style.ColumnLimit - LineLengthAfter;
// If we are restricted by the maximum column width, end the sequence.
@@ -573,6 +618,26 @@ void WhitespaceManager::alignConsecutiveAssignments() {
Changes, /*StartAt=*/0);
}
+void WhitespaceManager::alignConsecutiveBitFields() {
+ if (!Style.AlignConsecutiveBitFields)
+ return;
+
+ AlignTokens(
+ Style,
+ [&](Change const &C) {
+ // Do not align on ':' that is first on a line.
+ if (C.NewlinesBefore > 0)
+ return false;
+
+ // Do not align on ':' that is last on a line.
+ if (&C != &Changes.back() && (&C + 1)->NewlinesBefore > 0)
+ return false;
+
+ return C.Tok->is(TT_BitFieldColon);
+ },
+ Changes, /*StartAt=*/0);
+}
+
void WhitespaceManager::alignConsecutiveDeclarations() {
if (!Style.AlignConsecutiveDeclarations)
return;
@@ -607,6 +672,50 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
Changes, /*StartAt=*/0);
}
+void WhitespaceManager::alignChainedConditionals() {
+ if (Style.BreakBeforeTernaryOperators) {
+ AlignTokens(
+ Style,
+ [](Change const &C) {
+ // Align question operators and last colon
+ return C.Tok->is(TT_ConditionalExpr) &&
+ ((C.Tok->is(tok::question) && !C.NewlinesBefore) ||
+ (C.Tok->is(tok::colon) && C.Tok->Next &&
+ (C.Tok->Next->FakeLParens.size() == 0 ||
+ C.Tok->Next->FakeLParens.back() != prec::Conditional)));
+ },
+ Changes, /*StartAt=*/0);
+ } else {
+ static auto AlignWrappedOperand = [](Change const &C) {
+ auto Previous = C.Tok->getPreviousNonComment(); // Previous;
+ return C.NewlinesBefore && Previous && Previous->is(TT_ConditionalExpr) &&
+ (Previous->is(tok::question) ||
+ (Previous->is(tok::colon) &&
+ (C.Tok->FakeLParens.size() == 0 ||
+ C.Tok->FakeLParens.back() != prec::Conditional)));
+ };
+ // Ensure we keep alignment of wrapped operands with non-wrapped operands
+ // Since we actually align the operators, the wrapped operands need the
+ // extra offset to be properly aligned.
+ for (Change &C : Changes) {
+ if (AlignWrappedOperand(C))
+ C.StartOfTokenColumn -= 2;
+ }
+ AlignTokens(
+ Style,
+ [this](Change const &C) {
+ // Align question operators if next operand is not wrapped, as
+ // well as wrapped operands after question operator or last
+ // colon in conditional sequence
+ return (C.Tok->is(TT_ConditionalExpr) && C.Tok->is(tok::question) &&
+ &C != &Changes.back() && (&C + 1)->NewlinesBefore == 0 &&
+ !(&C + 1)->IsTrailingComment) ||
+ AlignWrappedOperand(C);
+ },
+ Changes, /*StartAt=*/0);
+ }
+}
+
void WhitespaceManager::alignTrailingComments() {
unsigned MinColumn = 0;
unsigned MaxColumn = UINT_MAX;
@@ -617,6 +726,8 @@ void WhitespaceManager::alignTrailingComments() {
if (Changes[i].StartOfBlockComment)
continue;
Newlines += Changes[i].NewlinesBefore;
+ if (Changes[i].Tok->MustBreakAlignBefore)
+ BreakBeforeNext = true;
if (!Changes[i].IsTrailingComment)
continue;
@@ -761,9 +872,9 @@ void WhitespaceManager::generateChanges() {
C.EscapedNewlineColumn);
else
appendNewlineText(ReplacementText, C.NewlinesBefore);
- appendIndentText(ReplacementText, C.Tok->IndentLevel,
- std::max(0, C.Spaces),
- C.StartOfTokenColumn - std::max(0, C.Spaces));
+ appendIndentText(
+ ReplacementText, C.Tok->IndentLevel, std::max(0, C.Spaces),
+ C.StartOfTokenColumn - std::max(0, C.Spaces), C.IsAligned);
ReplacementText.append(C.CurrentLinePrefix);
storeReplacement(C.OriginalWhitespaceRange, ReplacementText);
}
@@ -809,7 +920,8 @@ void WhitespaceManager::appendEscapedNewlineText(
void WhitespaceManager::appendIndentText(std::string &Text,
unsigned IndentLevel, unsigned Spaces,
- unsigned WhitespaceStartColumn) {
+ unsigned WhitespaceStartColumn,
+ bool IsAligned) {
switch (Style.UseTab) {
case FormatStyle::UT_Never:
Text.append(Spaces, ' ');
@@ -838,28 +950,39 @@ void WhitespaceManager::appendIndentText(std::string &Text,
case FormatStyle::UT_ForIndentation:
if (WhitespaceStartColumn == 0) {
unsigned Indentation = IndentLevel * Style.IndentWidth;
- // This happens, e.g. when a line in a block comment is indented less than
- // the first one.
- if (Indentation > Spaces)
- Indentation = Spaces;
- if (Style.TabWidth) {
- unsigned Tabs = Indentation / Style.TabWidth;
- Text.append(Tabs, '\t');
- Spaces -= Tabs * Style.TabWidth;
- }
+ Spaces = appendTabIndent(Text, Spaces, Indentation);
}
Text.append(Spaces, ' ');
break;
case FormatStyle::UT_ForContinuationAndIndentation:
- if (WhitespaceStartColumn == 0 && Style.TabWidth) {
- unsigned Tabs = Spaces / Style.TabWidth;
- Text.append(Tabs, '\t');
- Spaces -= Tabs * Style.TabWidth;
+ if (WhitespaceStartColumn == 0)
+ Spaces = appendTabIndent(Text, Spaces, Spaces);
+ Text.append(Spaces, ' ');
+ break;
+ case FormatStyle::UT_AlignWithSpaces:
+ if (WhitespaceStartColumn == 0) {
+ unsigned Indentation =
+ IsAligned ? IndentLevel * Style.IndentWidth : Spaces;
+ Spaces = appendTabIndent(Text, Spaces, Indentation);
}
Text.append(Spaces, ' ');
break;
}
}
+unsigned WhitespaceManager::appendTabIndent(std::string &Text, unsigned Spaces,
+ unsigned Indentation) {
+ // This happens, e.g. when a line in a block comment is indented less than the
+ // first one.
+ if (Indentation > Spaces)
+ Indentation = Spaces;
+ if (Style.TabWidth) {
+ unsigned Tabs = Indentation / Style.TabWidth;
+ Text.append(Tabs, '\t');
+ Spaces -= Tabs * Style.TabWidth;
+ }
+ return Spaces;
+}
+
} // namespace format
} // namespace clang
diff --git a/clang/lib/Format/WhitespaceManager.h b/clang/lib/Format/WhitespaceManager.h
index f47bf40204b3..1398a3aee2b8 100644
--- a/clang/lib/Format/WhitespaceManager.h
+++ b/clang/lib/Format/WhitespaceManager.h
@@ -19,6 +19,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include <string>
+#include <tuple>
namespace clang {
namespace format {
@@ -49,7 +50,7 @@ public:
/// this replacement. It is needed for determining how \p Spaces is turned
/// into tabs and spaces for some format styles.
void replaceWhitespace(FormatToken &Tok, unsigned Newlines, unsigned Spaces,
- unsigned StartOfTokenColumn,
+ unsigned StartOfTokenColumn, bool isAligned = false,
bool InPPDirective = false);
/// Adds information about an unchangeable token's whitespace.
@@ -109,7 +110,7 @@ public:
SourceRange OriginalWhitespaceRange, int Spaces,
unsigned StartOfTokenColumn, unsigned NewlinesBefore,
StringRef PreviousLinePostfix, StringRef CurrentLinePrefix,
- bool ContinuesPPDirective, bool IsInsideToken);
+ bool IsAligned, bool ContinuesPPDirective, bool IsInsideToken);
// The kind of the token whose whitespace this change replaces, or in which
// this change inserts whitespace.
@@ -125,6 +126,7 @@ public:
unsigned NewlinesBefore;
std::string PreviousLinePostfix;
std::string CurrentLinePrefix;
+ bool IsAligned;
bool ContinuesPPDirective;
// The number of spaces in front of the token or broken part of the token.
@@ -157,11 +159,16 @@ public:
const Change *StartOfBlockComment;
int IndentationOffset;
- // A combination of indent level and nesting level, which are used in
- // tandem to compute lexical scope, for the purposes of deciding
+ // Depth of conditionals. Computed from tracking fake parenthesis, except
+ // it does not increase the indent for "chained" conditionals.
+ int ConditionalsLevel;
+
+ // A combination of indent, nesting and conditionals levels, which are used
+ // in tandem to compute lexical scope, for the purposes of deciding
// when to stop consecutive alignment runs.
- std::pair<unsigned, unsigned> indentAndNestingLevel() const {
- return std::make_pair(Tok->IndentLevel, Tok->NestingLevel);
+ std::tuple<unsigned, unsigned, unsigned> indentAndNestingLevel() const {
+ return std::make_tuple(Tok->IndentLevel, Tok->NestingLevel,
+ ConditionalsLevel);
}
};
@@ -177,9 +184,15 @@ private:
/// Align consecutive assignments over all \c Changes.
void alignConsecutiveAssignments();
+ /// Align consecutive bitfields over all \c Changes.
+ void alignConsecutiveBitFields();
+
/// Align consecutive declarations over all \c Changes.
void alignConsecutiveDeclarations();
+ /// Align consecutive declarations over all \c Changes.
+ void alignChainedConditionals();
+
/// Align trailing comments over all \c Changes.
void alignTrailingComments();
@@ -204,7 +217,10 @@ private:
unsigned PreviousEndOfTokenColumn,
unsigned EscapedNewlineColumn);
void appendIndentText(std::string &Text, unsigned IndentLevel,
- unsigned Spaces, unsigned WhitespaceStartColumn);
+ unsigned Spaces, unsigned WhitespaceStartColumn,
+ bool IsAligned);
+ unsigned appendTabIndent(std::string &Text, unsigned Spaces,
+ unsigned Indentation);
SmallVector<Change, 16> Changes;
const SourceManager &SourceMgr;
diff --git a/clang/lib/Frontend/ASTConsumers.cpp b/clang/lib/Frontend/ASTConsumers.cpp
index 043b2541b8f8..a73cc8876d5d 100644
--- a/clang/lib/Frontend/ASTConsumers.cpp
+++ b/clang/lib/Frontend/ASTConsumers.cpp
@@ -36,10 +36,10 @@ namespace {
enum Kind { DumpFull, Dump, Print, None };
ASTPrinter(std::unique_ptr<raw_ostream> Out, Kind K,
ASTDumpOutputFormat Format, StringRef FilterString,
- bool DumpLookups = false)
+ bool DumpLookups = false, bool DumpDeclTypes = false)
: Out(Out ? *Out : llvm::outs()), OwnedOut(std::move(Out)),
OutputKind(K), OutputFormat(Format), FilterString(FilterString),
- DumpLookups(DumpLookups) {}
+ DumpLookups(DumpLookups), DumpDeclTypes(DumpDeclTypes) {}
void HandleTranslationUnit(ASTContext &Context) override {
TranslationUnitDecl *D = Context.getTranslationUnitDecl();
@@ -91,8 +91,22 @@ namespace {
} else if (OutputKind == Print) {
PrintingPolicy Policy(D->getASTContext().getLangOpts());
D->print(Out, Policy, /*Indentation=*/0, /*PrintInstantiation=*/true);
- } else if (OutputKind != None)
+ } else if (OutputKind != None) {
D->dump(Out, OutputKind == DumpFull, OutputFormat);
+ }
+
+ if (DumpDeclTypes) {
+ Decl *InnerD = D;
+ if (auto *TD = dyn_cast<TemplateDecl>(D))
+ InnerD = TD->getTemplatedDecl();
+
+ // FIXME: Support OutputFormat in type dumping.
+ // FIXME: Support combining -ast-dump-decl-types with -ast-dump-lookups.
+ if (auto *VD = dyn_cast<ValueDecl>(InnerD))
+ VD->getType().dump(Out, VD->getASTContext());
+ if (auto *TD = dyn_cast<TypeDecl>(InnerD))
+ TD->getTypeForDecl()->dump(Out, TD->getASTContext());
+ }
}
raw_ostream &Out;
@@ -111,6 +125,9 @@ namespace {
/// results will be output with a format determined by OutputKind. This is
/// incompatible with OutputKind == Print.
bool DumpLookups;
+
+ /// Whether to dump the type for each declaration dumped.
+ bool DumpDeclTypes;
};
class ASTDeclNodeLister : public ASTConsumer,
@@ -146,13 +163,13 @@ clang::CreateASTPrinter(std::unique_ptr<raw_ostream> Out,
std::unique_ptr<ASTConsumer>
clang::CreateASTDumper(std::unique_ptr<raw_ostream> Out, StringRef FilterString,
bool DumpDecls, bool Deserialize, bool DumpLookups,
- ASTDumpOutputFormat Format) {
+ bool DumpDeclTypes, ASTDumpOutputFormat Format) {
assert((DumpDecls || Deserialize || DumpLookups) && "nothing to dump");
- return std::make_unique<ASTPrinter>(std::move(Out),
- Deserialize ? ASTPrinter::DumpFull :
- DumpDecls ? ASTPrinter::Dump :
- ASTPrinter::None, Format,
- FilterString, DumpLookups);
+ return std::make_unique<ASTPrinter>(
+ std::move(Out),
+ Deserialize ? ASTPrinter::DumpFull
+ : DumpDecls ? ASTPrinter::Dump : ASTPrinter::None,
+ Format, FilterString, DumpLookups, DumpDeclTypes);
}
std::unique_ptr<ASTConsumer> clang::CreateASTDeclNodeLister() {
diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp
index b3264952ff47..57d025b7c32e 100644
--- a/clang/lib/Frontend/ASTUnit.cpp
+++ b/clang/lib/Frontend/ASTUnit.cpp
@@ -224,7 +224,7 @@ struct ASTUnit::ASTWriterData {
};
void ASTUnit::clearFileLevelDecls() {
- llvm::DeleteContainerSeconds(FileDecls);
+ FileDecls.clear();
}
/// After failing to build a precompiled preamble (due to
@@ -784,7 +784,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
UserFilesAreVolatile);
AST->ModuleCache = new InMemoryModuleCache;
AST->HSOpts = std::make_shared<HeaderSearchOptions>();
- AST->HSOpts->ModuleFormat = PCHContainerRdr.getFormat();
+ AST->HSOpts->ModuleFormat = std::string(PCHContainerRdr.getFormat());
AST->HeaderInfo.reset(new HeaderSearch(AST->HSOpts,
AST->getSourceManager(),
AST->getDiagnostics(),
@@ -847,7 +847,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
return nullptr;
}
- AST->OriginalSourceFile = AST->Reader->getOriginalSourceFile();
+ AST->OriginalSourceFile = std::string(AST->Reader->getOriginalSourceFile());
PP.setCounterValue(Counter);
@@ -1131,7 +1131,8 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
CICleanup(Clang.get());
Clang->setInvocation(CCInvocation);
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+ OriginalSourceFile =
+ std::string(Clang->getFrontendOpts().Inputs[0].getFile());
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
@@ -1260,13 +1261,13 @@ makeStandaloneDiagnostic(const LangOptions &LangOpts,
ASTUnit::StandaloneDiagnostic OutDiag;
OutDiag.ID = InDiag.getID();
OutDiag.Level = InDiag.getLevel();
- OutDiag.Message = InDiag.getMessage();
+ OutDiag.Message = std::string(InDiag.getMessage());
OutDiag.LocOffset = 0;
if (InDiag.getLocation().isInvalid())
return OutDiag;
const SourceManager &SM = InDiag.getLocation().getManager();
SourceLocation FileLoc = SM.getFileLoc(InDiag.getLocation());
- OutDiag.Filename = SM.getFilename(FileLoc);
+ OutDiag.Filename = std::string(SM.getFilename(FileLoc));
if (OutDiag.Filename.empty())
return OutDiag;
OutDiag.LocOffset = SM.getFileOffset(FileLoc);
@@ -1532,7 +1533,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
if (!ResourceFilesPath.empty()) {
// Override the resources path.
- CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+ CI->getHeaderSearchOpts().ResourceDir = std::string(ResourceFilesPath);
}
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
@@ -1564,7 +1565,8 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
CICleanup(Clang.get());
Clang->setInvocation(std::move(CI));
- AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+ AST->OriginalSourceFile =
+ std::string(Clang->getFrontendOpts().Inputs[0].getFile());
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
@@ -1767,13 +1769,14 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
PPOpts.RetainExcludedConditionalBlocks = RetainExcludedConditionalBlocks;
// Override the resources path.
- CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+ CI->getHeaderSearchOpts().ResourceDir = std::string(ResourceFilesPath);
CI->getFrontendOpts().SkipFunctionBodies =
SkipFunctionBodies == SkipFunctionBodiesScope::PreambleAndMainFile;
if (ModuleFormat)
- CI->getHeaderSearchOpts().ModuleFormat = ModuleFormat.getValue();
+ CI->getHeaderSearchOpts().ModuleFormat =
+ std::string(ModuleFormat.getValue());
// Create the AST unit.
std::unique_ptr<ASTUnit> AST;
@@ -2165,7 +2168,7 @@ void ASTUnit::CodeComplete(
assert(IncludeBriefComments == this->IncludeBriefCommentsInCodeCompletion);
- FrontendOpts.CodeCompletionAt.FileName = File;
+ FrontendOpts.CodeCompletionAt.FileName = std::string(File);
FrontendOpts.CodeCompletionAt.Line = Line;
FrontendOpts.CodeCompletionAt.Column = Column;
@@ -2185,7 +2188,8 @@ void ASTUnit::CodeComplete(
auto &Inv = *CCInvocation;
Clang->setInvocation(std::move(CCInvocation));
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
+ OriginalSourceFile =
+ std::string(Clang->getFrontendOpts().Inputs[0].getFile());
// Set up diagnostics, capturing any diagnostics produced.
Clang->setDiagnostics(&Diag);
@@ -2432,9 +2436,9 @@ void ASTUnit::addFileLevelDecl(Decl *D) {
if (FID.isInvalid())
return;
- LocDeclsTy *&Decls = FileDecls[FID];
+ std::unique_ptr<LocDeclsTy> &Decls = FileDecls[FID];
if (!Decls)
- Decls = new LocDeclsTy();
+ Decls = std::make_unique<LocDeclsTy>();
std::pair<unsigned, Decl *> LocDecl(Offset, D);
diff --git a/clang/lib/Frontend/ChainedIncludesSource.cpp b/clang/lib/Frontend/ChainedIncludesSource.cpp
index dec281529b9e..1486adf70c3f 100644
--- a/clang/lib/Frontend/ChainedIncludesSource.cpp
+++ b/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -189,7 +189,7 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
Clang->getASTConsumer().GetASTDeserializationListener());
if (!Reader)
return nullptr;
- Clang->setModuleManager(Reader);
+ Clang->setASTReader(Reader);
Clang->getASTContext().setExternalSource(Reader);
}
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index 688f21dd0908..4613ed8d7f61 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -97,6 +97,10 @@ void CompilerInstance::setVerboseOutputStream(std::unique_ptr<raw_ostream> Value
void CompilerInstance::setTarget(TargetInfo *Value) { Target = Value; }
void CompilerInstance::setAuxTarget(TargetInfo *Value) { AuxTarget = Value; }
+llvm::vfs::FileSystem &CompilerInstance::getVirtualFileSystem() const {
+ return getFileManager().getVirtualFileSystem();
+}
+
void CompilerInstance::setFileManager(FileManager *Value) {
FileMgr = Value;
}
@@ -138,7 +142,7 @@ std::unique_ptr<Sema> CompilerInstance::takeSema() {
IntrusiveRefCntPtr<ASTReader> CompilerInstance::getASTReader() const {
return TheASTReader;
}
-void CompilerInstance::setModuleManager(IntrusiveRefCntPtr<ASTReader> Reader) {
+void CompilerInstance::setASTReader(IntrusiveRefCntPtr<ASTReader> Reader) {
assert(ModuleCache.get() == &Reader->getModuleManager().getModuleCache() &&
"Expected ASTReader to use the same PCM cache");
TheASTReader = std::move(Reader);
@@ -379,7 +383,7 @@ static void InitializeFileRemapping(DiagnosticsEngine &Diags,
void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
const PreprocessorOptions &PPOpts = getPreprocessorOpts();
- // The module manager holds a reference to the old preprocessor (if any).
+ // The AST reader holds a reference to the old preprocessor (if any).
TheASTReader.reset();
// Create the Preprocessor.
@@ -474,7 +478,7 @@ std::string CompilerInstance::getSpecificModuleCachePath() {
if (!SpecificModuleCache.empty() && !getHeaderSearchOpts().DisableModuleHash)
llvm::sys::path::append(SpecificModuleCache,
getInvocation().getModuleHash());
- return SpecificModuleCache.str();
+ return std::string(SpecificModuleCache.str());
}
// ASTContext
@@ -713,13 +717,13 @@ std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
std::string OutFile, TempFile;
if (!OutputPath.empty()) {
- OutFile = OutputPath;
+ OutFile = std::string(OutputPath);
} else if (InFile == "-") {
OutFile = "-";
} else if (!Extension.empty()) {
SmallString<128> Path(InFile);
llvm::sys::path::replace_extension(Path, Extension);
- OutFile = Path.str();
+ OutFile = std::string(Path.str());
} else {
OutFile = "-";
}
@@ -774,7 +778,7 @@ std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
if (!EC) {
OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
- OSFile = TempFile = TempPath.str();
+ OSFile = TempFile = std::string(TempPath.str());
}
// If we failed to create the temporary, fallback to writing to the file
// directly. This handles the corner case where we cannot write to the
@@ -811,17 +815,15 @@ std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile(
// Initialization Utilities
bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input){
- return InitializeSourceManager(
- Input, getDiagnostics(), getFileManager(), getSourceManager(),
- hasPreprocessor() ? &getPreprocessor().getHeaderSearchInfo() : nullptr,
- getDependencyOutputOpts(), getFrontendOpts());
+ return InitializeSourceManager(Input, getDiagnostics(), getFileManager(),
+ getSourceManager());
}
// static
-bool CompilerInstance::InitializeSourceManager(
- const FrontendInputFile &Input, DiagnosticsEngine &Diags,
- FileManager &FileMgr, SourceManager &SourceMgr, HeaderSearch *HS,
- DependencyOutputOptions &DepOpts, const FrontendOptions &Opts) {
+bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
+ DiagnosticsEngine &Diags,
+ FileManager &FileMgr,
+ SourceManager &SourceMgr) {
SrcMgr::CharacteristicKind Kind =
Input.getKind().getFormat() == InputKind::ModuleMap
? Input.isSystem() ? SrcMgr::C_System_ModuleMap
@@ -923,10 +925,27 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
!getFrontendOpts().AuxTriple.empty()) {
auto TO = std::make_shared<TargetOptions>();
TO->Triple = llvm::Triple::normalize(getFrontendOpts().AuxTriple);
+ if (getFrontendOpts().AuxTargetCPU)
+ TO->CPU = getFrontendOpts().AuxTargetCPU.getValue();
+ if (getFrontendOpts().AuxTargetFeatures)
+ TO->FeaturesAsWritten = getFrontendOpts().AuxTargetFeatures.getValue();
TO->HostTriple = getTarget().getTriple().str();
setAuxTarget(TargetInfo::CreateTargetInfo(getDiagnostics(), TO));
}
+ if (!getTarget().hasStrictFP() && !getLangOpts().ExpStrictFP) {
+ if (getLangOpts().getFPRoundingMode() !=
+ llvm::RoundingMode::NearestTiesToEven) {
+ getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_rounding);
+ getLangOpts().setFPRoundingMode(llvm::RoundingMode::NearestTiesToEven);
+ }
+ if (getLangOpts().getFPExceptionMode() != LangOptions::FPE_Ignore) {
+ getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_exceptions);
+ getLangOpts().setFPExceptionMode(LangOptions::FPE_Ignore);
+ }
+ // FIXME: can we disable FEnvAccess?
+ }
+
// Inform the target of the language options.
//
// FIXME: We shouldn't need to do this, the target should be immutable once
@@ -1073,7 +1092,7 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
ImportingInstance.getInvocation().getLangOpts()->ModuleName;
// Note the name of the module we're building.
- Invocation->getLangOpts()->CurrentModule = ModuleName;
+ Invocation->getLangOpts()->CurrentModule = std::string(ModuleName);
// Make sure that the failed-module structure has been allocated in
// the importing instance, and propagate the pointer to the newly-created
@@ -1093,7 +1112,7 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
FrontendOpts.DisableFree = false;
FrontendOpts.GenerateGlobalModuleIndex = false;
FrontendOpts.BuildingImplicitModule = true;
- FrontendOpts.OriginalModuleMap = OriginalModuleMapFile;
+ FrontendOpts.OriginalModuleMap = std::string(OriginalModuleMapFile);
// Force implicitly-built modules to hash the content of the module file.
HSOpts.ModulesHashContent = true;
FrontendOpts.Inputs = {Input};
@@ -1568,7 +1587,7 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) {
Stack.push_back(M);
while (!Stack.empty()) {
Module *Current = Stack.pop_back_val();
- if (Current->IsMissingRequirement) continue;
+ if (Current->IsUnimportable) continue;
Current->IsAvailable = true;
Stack.insert(Stack.end(),
Current->submodule_begin(), Current->submodule_end());
@@ -1630,10 +1649,10 @@ enum ModuleSource {
/// Select a source for loading the named module and compute the filename to
/// load it from.
-static ModuleSource
-selectModuleSource(Module *M, StringRef ModuleName, std::string &ModuleFilename,
- const std::map<std::string, std::string> &BuiltModules,
- HeaderSearch &HS) {
+static ModuleSource selectModuleSource(
+ Module *M, StringRef ModuleName, std::string &ModuleFilename,
+ const std::map<std::string, std::string, std::less<>> &BuiltModules,
+ HeaderSearch &HS) {
assert(ModuleFilename.empty() && "Already has a module source?");
// Check to see if the module has been built as part of this compilation
@@ -2077,7 +2096,7 @@ void CompilerInstance::createModuleFromSource(SourceLocation ImportLoc,
// Build the module, inheriting any modules that we've built locally.
if (compileModuleImpl(*this, ImportLoc, ModuleName, Input, StringRef(),
ModuleFileName, PreBuildStep, PostBuildStep)) {
- BuiltModules[ModuleName] = ModuleFileName.str();
+ BuiltModules[std::string(ModuleName)] = std::string(ModuleFileName.str());
llvm::sys::RemoveFileOnSignal(ModuleFileName);
}
}
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index e1e59565083b..75d7cf5d26d3 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -41,11 +41,13 @@
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Sema/CodeCompleteOptions.h"
+#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ModuleFileExtension.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/CachedHashString.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
@@ -116,6 +118,62 @@ CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
CompilerInvocationBase::~CompilerInvocationBase() = default;
//===----------------------------------------------------------------------===//
+// Normalizers
+//===----------------------------------------------------------------------===//
+
+#define SIMPLE_ENUM_VALUE_TABLE
+#include "clang/Driver/Options.inc"
+#undef SIMPLE_ENUM_VALUE_TABLE
+
+static llvm::Optional<unsigned> normalizeSimpleEnum(OptSpecifier Opt,
+ unsigned TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ assert(TableIndex < SimpleEnumValueTablesSize);
+ const SimpleEnumValueTable &Table = SimpleEnumValueTables[TableIndex];
+
+ auto *Arg = Args.getLastArg(Opt);
+ if (!Arg)
+ return None;
+
+ StringRef ArgValue = Arg->getValue();
+ for (int I = 0, E = Table.Size; I != E; ++I)
+ if (ArgValue == Table.Table[I].Name)
+ return Table.Table[I].Value;
+
+ Diags.Report(diag::err_drv_invalid_value)
+ << Arg->getAsString(Args) << ArgValue;
+ return None;
+}
+
+static const char *denormalizeSimpleEnum(CompilerInvocation::StringAllocator SA,
+ unsigned TableIndex, unsigned Value) {
+ assert(TableIndex < SimpleEnumValueTablesSize);
+ const SimpleEnumValueTable &Table = SimpleEnumValueTables[TableIndex];
+ for (int I = 0, E = Table.Size; I != E; ++I)
+ if (Value == Table.Table[I].Value)
+ return Table.Table[I].Name;
+
+ llvm_unreachable("The simple enum value was not correctly defined in "
+ "the tablegen option description");
+}
+
+static const char *denormalizeString(CompilerInvocation::StringAllocator SA,
+ unsigned TableIndex,
+ const std::string &Value) {
+ return SA(Value);
+}
+
+static Optional<std::string> normalizeTriple(OptSpecifier Opt, int TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ auto *Arg = Args.getLastArg(Opt);
+ if (!Arg)
+ return None;
+ return llvm::Triple::normalize(Arg->getValue());
+}
+
+//===----------------------------------------------------------------------===//
// Deserialization (from args)
//===----------------------------------------------------------------------===//
@@ -135,7 +193,7 @@ static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
assert(A->getOption().matches(options::OPT_O));
StringRef S(A->getValue());
- if (S == "s" || S == "z" || S.empty())
+ if (S == "s" || S == "z")
return llvm::CodeGenOpt::Default;
if (S == "g")
@@ -170,10 +228,12 @@ static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
if (A->getOption().getKind() == Option::FlagClass) {
// The argument is a pure flag (such as OPT_Wall or OPT_Wdeprecated). Add
// its name (minus the "W" or "R" at the beginning) to the warning list.
- Diagnostics.push_back(A->getOption().getName().drop_front(1));
+ Diagnostics.push_back(
+ std::string(A->getOption().getName().drop_front(1)));
} else if (A->getOption().matches(GroupWithValue)) {
// This is -Wfoo= or -Rfoo=, where foo is the name of the diagnostic group.
- Diagnostics.push_back(A->getOption().getName().drop_front(1).rtrim("=-"));
+ Diagnostics.push_back(
+ std::string(A->getOption().getName().drop_front(1).rtrim("=-")));
} else {
// Otherwise, add its value (for OPT_W_Joined and similar).
for (const auto *Arg : A->getValues())
@@ -307,14 +367,16 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Opts.visualizeExplodedGraphWithGraphViz =
Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
- Opts.DumpExplodedGraphTo = Args.getLastArgValue(OPT_analyzer_dump_egraph);
+ Opts.DumpExplodedGraphTo =
+ std::string(Args.getLastArgValue(OPT_analyzer_dump_egraph));
Opts.NoRetryExhausted = Args.hasArg(OPT_analyzer_disable_retry_exhausted);
Opts.AnalyzerWerror = Args.hasArg(OPT_analyzer_werror);
Opts.AnalyzeAll = Args.hasArg(OPT_analyzer_opt_analyze_headers);
Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
Opts.AnalyzeNestedBlocks =
Args.hasArg(OPT_analyzer_opt_analyze_nested_blocks);
- Opts.AnalyzeSpecificFunction = Args.getLastArgValue(OPT_analyze_function);
+ Opts.AnalyzeSpecificFunction =
+ std::string(Args.getLastArgValue(OPT_analyze_function));
Opts.UnoptimizedCFG = Args.hasArg(OPT_analysis_UnoptimizedCFG);
Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
Opts.maxBlockVisitOnPath =
@@ -335,7 +397,8 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
SmallVector<StringRef, 16> CheckersAndPackages;
CheckerAndPackageList.split(CheckersAndPackages, ",");
for (const StringRef &CheckerOrPackage : CheckersAndPackages)
- Opts.CheckersAndPackages.emplace_back(CheckerOrPackage, IsEnabled);
+ Opts.CheckersAndPackages.emplace_back(std::string(CheckerOrPackage),
+ IsEnabled);
}
// Go through the analyzer configuration options.
@@ -372,7 +435,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
A->claim();
- Opts.Config[key] = val;
+ Opts.Config[key] = std::string(val);
}
}
@@ -394,7 +457,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
static StringRef getStringOption(AnalyzerOptions::ConfigTable &Config,
StringRef OptionName, StringRef DefaultVal) {
- return Config.insert({OptionName, DefaultVal}).first->second;
+ return Config.insert({OptionName, std::string(DefaultVal)}).first->second;
}
static void initOption(AnalyzerOptions::ConfigTable &Config,
@@ -521,36 +584,6 @@ static void ParseCommentArgs(CommentOptions &Opts, ArgList &Args) {
Opts.ParseAllComments = Args.hasArg(OPT_fparse_all_comments);
}
-static StringRef getCodeModel(ArgList &Args, DiagnosticsEngine &Diags) {
- if (Arg *A = Args.getLastArg(OPT_mcode_model)) {
- StringRef Value = A->getValue();
- if (Value == "small" || Value == "kernel" || Value == "medium" ||
- Value == "large" || Value == "tiny")
- return Value;
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Value;
- }
- return "default";
-}
-
-static llvm::Reloc::Model getRelocModel(ArgList &Args,
- DiagnosticsEngine &Diags) {
- if (Arg *A = Args.getLastArg(OPT_mrelocation_model)) {
- StringRef Value = A->getValue();
- auto RM = llvm::StringSwitch<llvm::Optional<llvm::Reloc::Model>>(Value)
- .Case("static", llvm::Reloc::Static)
- .Case("pic", llvm::Reloc::PIC_)
- .Case("ropi", llvm::Reloc::ROPI)
- .Case("rwpi", llvm::Reloc::RWPI)
- .Case("ropi-rwpi", llvm::Reloc::ROPI_RWPI)
- .Case("dynamic-no-pic", llvm::Reloc::DynamicNoPIC)
- .Default(None);
- if (RM.hasValue())
- return *RM;
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Value;
- }
- return llvm::Reloc::PIC_;
-}
-
/// Create a new Regex instance out of the string value in \p RpassArg.
/// It returns a pointer to the newly generated Regex instance.
static std::shared_ptr<llvm::Regex>
@@ -754,7 +787,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.setDebuggerTuning(static_cast<llvm::DebuggerKind>(Val));
}
Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 0, Diags);
- Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
+ Opts.DebugColumnInfo = !Args.hasArg(OPT_gno_column_info);
Opts.EmitCodeView = Args.hasArg(OPT_gcodeview);
Opts.CodeViewGHash = Args.hasArg(OPT_gcodeview_ghash);
Opts.MacroDebugInfo = Args.hasArg(OPT_debug_info_macro);
@@ -762,19 +795,21 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.VirtualFunctionElimination =
Args.hasArg(OPT_fvirtual_function_elimination);
Opts.LTOVisibilityPublicStd = Args.hasArg(OPT_flto_visibility_public_std);
- Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
- Opts.SplitDwarfOutput = Args.getLastArgValue(OPT_split_dwarf_output);
+ Opts.SplitDwarfFile = std::string(Args.getLastArgValue(OPT_split_dwarf_file));
+ Opts.SplitDwarfOutput =
+ std::string(Args.getLastArgValue(OPT_split_dwarf_output));
Opts.SplitDwarfInlining = !Args.hasArg(OPT_fno_split_dwarf_inlining);
Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
Opts.DebugExplicitImport = Args.hasArg(OPT_dwarf_explicit_import);
Opts.DebugFwdTemplateParams = Args.hasArg(OPT_debug_forward_template_params);
Opts.EmbedSource = Args.hasArg(OPT_gembed_source);
+ Opts.ForceDwarfFrameSection = Args.hasArg(OPT_fforce_dwarf_frame);
- Opts.ForceDwarfFrameSection =
- Args.hasFlag(OPT_fforce_dwarf_frame, OPT_fno_force_dwarf_frame, false);
-
- for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ))
- Opts.DebugPrefixMap.insert(StringRef(Arg).split('='));
+ for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
+ auto Split = StringRef(Arg).split('=');
+ Opts.DebugPrefixMap.insert(
+ {std::string(Split.first), std::string(Split.second)});
+ }
if (const Arg *A =
Args.getLastArg(OPT_emit_llvm_uselists, OPT_no_emit_llvm_uselists))
@@ -785,12 +820,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
const llvm::Triple::ArchType DebugEntryValueArchs[] = {
llvm::Triple::x86, llvm::Triple::x86_64, llvm::Triple::aarch64,
- llvm::Triple::arm, llvm::Triple::armeb};
+ llvm::Triple::arm, llvm::Triple::armeb, llvm::Triple::mips,
+ llvm::Triple::mipsel, llvm::Triple::mips64, llvm::Triple::mips64el};
llvm::Triple T(TargetOpts.Triple);
if (Opts.OptimizationLevel > 0 && Opts.hasReducedDebugInfo() &&
llvm::is_contained(DebugEntryValueArchs, T.getArch()))
- Opts.EnableDebugEntryValues = Args.hasArg(OPT_femit_debug_entry_values);
+ Opts.EmitCallSiteInfo = true;
Opts.DisableO0ImplyOptNone = Args.hasArg(OPT_disable_O0_optnone);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
@@ -805,10 +841,12 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.FineGrainedBitfieldAccesses =
Args.hasFlag(OPT_ffine_grained_bitfield_accesses,
OPT_fno_fine_grained_bitfield_accesses, false);
- Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
- Opts.RecordCommandLine = Args.getLastArgValue(OPT_record_command_line);
+ Opts.DwarfDebugFlags =
+ std::string(Args.getLastArgValue(OPT_dwarf_debug_flags));
+ Opts.RecordCommandLine =
+ std::string(Args.getLastArgValue(OPT_record_command_line));
Opts.MergeAllConstants = Args.hasArg(OPT_fmerge_all_constants);
- Opts.NoCommon = Args.hasArg(OPT_fno_common);
+ Opts.NoCommon = !Args.hasArg(OPT_fcommon);
Opts.NoInlineLineTables = Args.hasArg(OPT_gno_inline_line_tables);
Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
Opts.OptimizeSize = getOptimizationLevelSize(Args);
@@ -823,7 +861,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DisableIntegratedAS = Args.hasArg(OPT_fno_integrated_as);
Opts.Autolink = !Args.hasArg(OPT_fno_autolink);
- Opts.SampleProfileFile = Args.getLastArgValue(OPT_fprofile_sample_use_EQ);
+ Opts.SampleProfileFile =
+ std::string(Args.getLastArgValue(OPT_fprofile_sample_use_EQ));
Opts.DebugInfoForProfiling = Args.hasFlag(
OPT_fdebug_info_for_profiling, OPT_fno_debug_info_for_profiling, false);
Opts.DebugNameTable = static_cast<unsigned>(
@@ -836,13 +875,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
setPGOInstrumentor(Opts, Args, Diags);
Opts.InstrProfileOutput =
- Args.getLastArgValue(OPT_fprofile_instrument_path_EQ);
+ std::string(Args.getLastArgValue(OPT_fprofile_instrument_path_EQ));
Opts.ProfileInstrumentUsePath =
- Args.getLastArgValue(OPT_fprofile_instrument_use_path_EQ);
+ std::string(Args.getLastArgValue(OPT_fprofile_instrument_use_path_EQ));
if (!Opts.ProfileInstrumentUsePath.empty())
setPGOUseInstrumentor(Opts, Opts.ProfileInstrumentUsePath);
Opts.ProfileRemappingFile =
- Args.getLastArgValue(OPT_fprofile_remapping_file_EQ);
+ std::string(Args.getLastArgValue(OPT_fprofile_remapping_file_EQ));
if (!Opts.ProfileRemappingFile.empty() && !Opts.ExperimentalNewPassManager) {
Diags.Report(diag::err_drv_argument_only_allowed_with)
<< Args.getLastArg(OPT_fprofile_remapping_file_EQ)->getAsString(Args)
@@ -852,7 +891,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.CoverageMapping =
Args.hasFlag(OPT_fcoverage_mapping, OPT_fno_coverage_mapping, false);
Opts.DumpCoverageMapping = Args.hasArg(OPT_dump_coverage_mapping);
- Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
+ Opts.AsmVerbose = !Args.hasArg(OPT_fno_verbose_asm);
Opts.PreserveAsmComments = !Args.hasArg(OPT_fno_preserve_as_comments);
Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new);
Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
@@ -861,7 +900,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_fregister_global_dtors_with_atexit);
Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
Opts.CodeModel = TargetOpts.CodeModel;
- Opts.DebugPass = Args.getLastArgValue(OPT_mdebug_pass);
+ Opts.DebugPass = std::string(Args.getLastArgValue(OPT_mdebug_pass));
// Handle -mframe-pointer option.
if (Arg *A = Args.getLastArg(OPT_mframe_pointer_EQ)) {
@@ -883,49 +922,30 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.setFramePointer(FP);
}
- // -pg may override -mframe-pointer
- // TODO: This should be merged into getFramePointerKind in Clang.cpp.
- if (Args.hasArg(OPT_pg))
- Opts.setFramePointer(CodeGenOptions::FramePointerKind::All);
-
Opts.DisableFree = Args.hasArg(OPT_disable_free);
Opts.DiscardValueNames = Args.hasArg(OPT_discard_value_names);
Opts.DisableTailCalls = Args.hasArg(OPT_mdisable_tail_calls);
Opts.NoEscapingBlockTailCalls =
Args.hasArg(OPT_fno_escaping_block_tail_calls);
- Opts.FloatABI = Args.getLastArgValue(OPT_mfloat_abi);
+ Opts.FloatABI = std::string(Args.getLastArgValue(OPT_mfloat_abi));
Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable) ||
Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
- Opts.NoInfsFPMath = (Args.hasArg(OPT_menable_no_infinities) ||
- Args.hasArg(OPT_cl_finite_math_only) ||
- Args.hasArg(OPT_cl_fast_relaxed_math));
- Opts.NoNaNsFPMath = (Args.hasArg(OPT_menable_no_nans) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_finite_math_only) ||
- Args.hasArg(OPT_cl_fast_relaxed_math));
- Opts.NoSignedZeros = (Args.hasArg(OPT_fno_signed_zeros) ||
- Args.hasArg(OPT_cl_no_signed_zeros) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math));
- Opts.Reassociate = Args.hasArg(OPT_mreassociate);
- Opts.FlushDenorm = Args.hasArg(OPT_cl_denorms_are_zero) ||
- (Args.hasArg(OPT_fcuda_is_device) &&
- Args.hasArg(OPT_fcuda_flush_denormals_to_zero));
+ Opts.LimitFloatPrecision =
+ std::string(Args.getLastArgValue(OPT_mlimit_float_precision));
Opts.CorrectlyRoundedDivSqrt =
Args.hasArg(OPT_cl_fp32_correctly_rounded_divide_sqrt);
Opts.UniformWGSize =
Args.hasArg(OPT_cl_uniform_work_group_size);
Opts.Reciprocals = Args.getAllArgValues(OPT_mrecip_EQ);
- Opts.ReciprocalMath = Args.hasArg(OPT_freciprocal_math);
- Opts.NoTrappingMath = Args.hasArg(OPT_fno_trapping_math);
Opts.StrictFloatCastOverflow =
!Args.hasArg(OPT_fno_strict_float_cast_overflow);
- Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
+ Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_fno_zero_initialized_in_bss);
Opts.NumRegisterParameters = getLastArgIntValue(Args, OPT_mregparm, 0, Diags);
Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
+ Opts.SmallDataLimit =
+ getLastArgIntValue(Args, OPT_msmall_data_limit, 0, Diags);
Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings);
Opts.NoWarn = Args.hasArg(OPT_massembler_no_warn);
Opts.EnableSegmentedStacks = Args.hasArg(OPT_split_stacks);
@@ -942,27 +962,31 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.StrictReturn = !Args.hasArg(OPT_fno_strict_return);
Opts.StrictVTablePointers = Args.hasArg(OPT_fstrict_vtable_pointers);
Opts.ForceEmitVTables = Args.hasArg(OPT_fforce_emit_vtables);
- Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
- Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
- Opts.RelocationModel = getRelocModel(Args, Diags);
- Opts.ThreadModel = Args.getLastArgValue(OPT_mthread_model, "posix");
+ Opts.ThreadModel =
+ std::string(Args.getLastArgValue(OPT_mthread_model, "posix"));
if (Opts.ThreadModel != "posix" && Opts.ThreadModel != "single")
Diags.Report(diag::err_drv_invalid_value)
<< Args.getLastArg(OPT_mthread_model)->getAsString(Args)
<< Opts.ThreadModel;
- Opts.TrapFuncName = Args.getLastArgValue(OPT_ftrap_function_EQ);
+ Opts.TrapFuncName = std::string(Args.getLastArgValue(OPT_ftrap_function_EQ));
Opts.UseInitArray = !Args.hasArg(OPT_fno_use_init_array);
- Opts.FunctionSections = Args.hasFlag(OPT_ffunction_sections,
- OPT_fno_function_sections, false);
- Opts.DataSections = Args.hasFlag(OPT_fdata_sections,
- OPT_fno_data_sections, false);
- Opts.StackSizeSection =
- Args.hasFlag(OPT_fstack_size_section, OPT_fno_stack_size_section, false);
- Opts.UniqueSectionNames = Args.hasFlag(OPT_funique_section_names,
- OPT_fno_unique_section_names, true);
+ Opts.BBSections =
+ std::string(Args.getLastArgValue(OPT_fbasic_block_sections_EQ, "none"));
+
+ // Basic Block Sections implies Function Sections.
+ Opts.FunctionSections =
+ Args.hasArg(OPT_ffunction_sections) ||
+ (Opts.BBSections != "none" && Opts.BBSections != "labels");
+
+ Opts.DataSections = Args.hasArg(OPT_fdata_sections);
+ Opts.StackSizeSection = Args.hasArg(OPT_fstack_size_section);
+ Opts.UniqueSectionNames = !Args.hasArg(OPT_fno_unique_section_names);
+ Opts.UniqueBasicBlockSectionNames =
+ Args.hasArg(OPT_funique_basic_block_section_names);
+ Opts.UniqueInternalLinkageNames =
+ Args.hasArg(OPT_funique_internal_linkage_names);
Opts.MergeFunctions = Args.hasArg(OPT_fmerge_functions);
@@ -987,7 +1011,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
if (IK.getLanguage() != Language::LLVM_IR)
Diags.Report(diag::err_drv_argument_only_allowed_with)
<< A->getAsString(Args) << "-x ir";
- Opts.ThinLTOIndexFile = Args.getLastArgValue(OPT_fthinlto_index_EQ);
+ Opts.ThinLTOIndexFile =
+ std::string(Args.getLastArgValue(OPT_fthinlto_index_EQ));
}
if (Arg *A = Args.getLastArg(OPT_save_temps_EQ))
Opts.SaveTempsFilePrefix =
@@ -995,16 +1020,18 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
.Case("obj", FrontendOpts.OutputFile)
.Default(llvm::sys::path::filename(FrontendOpts.OutputFile).str());
- Opts.ThinLinkBitcodeFile = Args.getLastArgValue(OPT_fthin_link_bitcode_EQ);
+ Opts.ThinLinkBitcodeFile =
+ std::string(Args.getLastArgValue(OPT_fthin_link_bitcode_EQ));
Opts.MSVolatile = Args.hasArg(OPT_fms_volatile);
Opts.VectorizeLoop = Args.hasArg(OPT_vectorize_loops);
Opts.VectorizeSLP = Args.hasArg(OPT_vectorize_slp);
- Opts.PreferVectorWidth = Args.getLastArgValue(OPT_mprefer_vector_width_EQ);
+ Opts.PreferVectorWidth =
+ std::string(Args.getLastArgValue(OPT_mprefer_vector_width_EQ));
- Opts.MainFileName = Args.getLastArgValue(OPT_main_file_name);
+ Opts.MainFileName = std::string(Args.getLastArgValue(OPT_main_file_name));
Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
Opts.ControlFlowGuardNoChecks = Args.hasArg(OPT_cfguard_no_checks);
@@ -1014,17 +1041,14 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
if (Opts.EmitGcovArcs || Opts.EmitGcovNotes) {
- Opts.CoverageDataFile = Args.getLastArgValue(OPT_coverage_data_file);
- Opts.CoverageNotesFile = Args.getLastArgValue(OPT_coverage_notes_file);
- Opts.CoverageExtraChecksum = Args.hasArg(OPT_coverage_cfg_checksum);
- Opts.CoverageNoFunctionNamesInData =
- Args.hasArg(OPT_coverage_no_function_names_in_data);
+ Opts.CoverageDataFile =
+ std::string(Args.getLastArgValue(OPT_coverage_data_file));
+ Opts.CoverageNotesFile =
+ std::string(Args.getLastArgValue(OPT_coverage_notes_file));
Opts.ProfileFilterFiles =
- Args.getLastArgValue(OPT_fprofile_filter_files_EQ);
+ std::string(Args.getLastArgValue(OPT_fprofile_filter_files_EQ));
Opts.ProfileExcludeFiles =
- Args.getLastArgValue(OPT_fprofile_exclude_files_EQ);
- Opts.CoverageExitBlockBeforeBody =
- Args.hasArg(OPT_coverage_exit_block_before_body);
+ std::string(Args.getLastArgValue(OPT_fprofile_exclude_files_EQ));
if (Args.hasArg(OPT_coverage_version_EQ)) {
StringRef CoverageVersion = Args.getLastArgValue(OPT_coverage_version_EQ);
if (CoverageVersion.size() != 4) {
@@ -1062,8 +1086,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
A->getOption().getID() == options::OPT_INPUT ||
A->getOption().getID() == options::OPT_x ||
A->getOption().getID() == options::OPT_fembed_bitcode ||
- (A->getOption().getGroup().isValid() &&
- A->getOption().getGroup().getID() == options::OPT_W_Group))
+ A->getOption().matches(options::OPT_W_Group))
continue;
ArgStringList ASL;
A->render(Args, ASL);
@@ -1091,6 +1114,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_fxray_always_emit_typedevents);
Opts.XRayInstructionThreshold =
getLastArgIntValue(Args, OPT_fxray_instruction_threshold_EQ, 200, Diags);
+ Opts.XRayIgnoreLoops = Args.hasArg(OPT_fxray_ignore_loops);
+ Opts.XRayOmitFunctionIndex = Args.hasArg(OPT_fno_xray_function_index);
auto XRayInstrBundles =
Args.getAllArgValues(OPT_fxray_instrumentation_bundle);
@@ -1103,6 +1128,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.PatchableFunctionEntryCount =
getLastArgIntValue(Args, OPT_fpatchable_function_entry_EQ, 0, Diags);
+ Opts.PatchableFunctionEntryOffset = getLastArgIntValue(
+ Args, OPT_fpatchable_function_entry_offset_EQ, 0, Diags);
Opts.InstrumentForProfiling = Args.hasArg(OPT_pg);
Opts.CallFEntry = Args.hasArg(OPT_mfentry);
Opts.MNopMCount = Args.hasArg(OPT_mnop_mcount);
@@ -1141,7 +1168,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
Opts.RelaxELFRelocations = Args.hasArg(OPT_mrelax_relocations);
- Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
+ Opts.DebugCompilationDir =
+ std::string(Args.getLastArgValue(OPT_fdebug_compilation_dir));
for (auto *A :
Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_builtin_bitcode)) {
CodeGenOptions::BitcodeFileToLink F;
@@ -1171,9 +1199,15 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.SanitizeCoverageNoPrune = Args.hasArg(OPT_fsanitize_coverage_no_prune);
Opts.SanitizeCoverageInline8bitCounters =
Args.hasArg(OPT_fsanitize_coverage_inline_8bit_counters);
+ Opts.SanitizeCoverageInlineBoolFlag =
+ Args.hasArg(OPT_fsanitize_coverage_inline_bool_flag);
Opts.SanitizeCoveragePCTable = Args.hasArg(OPT_fsanitize_coverage_pc_table);
Opts.SanitizeCoverageStackDepth =
Args.hasArg(OPT_fsanitize_coverage_stack_depth);
+ Opts.SanitizeCoverageAllowlistFiles =
+ Args.getAllArgValues(OPT_fsanitize_coverage_allowlist);
+ Opts.SanitizeCoverageBlocklistFiles =
+ Args.getAllArgValues(OPT_fsanitize_coverage_blocklist);
Opts.SanitizeMemoryTrackOrigins =
getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags);
Opts.SanitizeMemoryUseAfterDtor =
@@ -1225,6 +1259,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.NoStackArgProbe = Args.hasArg(OPT_mno_stack_arg_probe);
+ Opts.StackClashProtector = Args.hasArg(OPT_fstack_clash_protection);
+
if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
StringRef Name = A->getValue();
unsigned Method = llvm::StringSwitch<unsigned>(Name)
@@ -1273,15 +1309,35 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_EQ)) {
StringRef Val = A->getValue();
Opts.FPDenormalMode = llvm::parseDenormalFPAttribute(Val);
- if (Opts.FPDenormalMode == llvm::DenormalMode::Invalid)
+ if (!Opts.FPDenormalMode.isValid())
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_f32_EQ)) {
+ StringRef Val = A->getValue();
+ Opts.FP32DenormalMode = llvm::parseDenormalFPAttribute(Val);
+ if (!Opts.FP32DenormalMode.isValid())
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
- if (Arg *A = Args.getLastArg(OPT_fpcc_struct_return, OPT_freg_struct_return)) {
- if (A->getOption().matches(OPT_fpcc_struct_return)) {
+ // X86_32 has -fppc-struct-return and -freg-struct-return.
+ // PPC32 has -maix-struct-return and -msvr4-struct-return.
+ if (Arg *A =
+ Args.getLastArg(OPT_fpcc_struct_return, OPT_freg_struct_return,
+ OPT_maix_struct_return, OPT_msvr4_struct_return)) {
+ // TODO: We might want to consider enabling these options on AIX in the
+ // future.
+ if (T.isOSAIX())
+ Diags.Report(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << T.str();
+
+ const Option &O = A->getOption();
+ if (O.matches(OPT_fpcc_struct_return) ||
+ O.matches(OPT_maix_struct_return)) {
Opts.setStructReturnConvention(CodeGenOptions::SRCK_OnStack);
} else {
- assert(A->getOption().matches(OPT_freg_struct_return));
+ assert(O.matches(OPT_freg_struct_return) ||
+ O.matches(OPT_msvr4_struct_return));
Opts.setStructReturnConvention(CodeGenOptions::SRCK_InRegs);
}
}
@@ -1290,7 +1346,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.LinkerOptions = Args.getAllArgValues(OPT_linker_option);
bool NeedLocTracking = false;
- Opts.OptRecordFile = Args.getLastArgValue(OPT_opt_record_file);
+ Opts.OptRecordFile = std::string(Args.getLastArgValue(OPT_opt_record_file));
if (!Opts.OptRecordFile.empty())
NeedLocTracking = true;
@@ -1363,7 +1419,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.SanitizeTrap);
Opts.CudaGpuBinaryFileName =
- Args.getLastArgValue(OPT_fcuda_include_gpubinary);
+ std::string(Args.getLastArgValue(OPT_fcuda_include_gpubinary));
Opts.Backchain = Args.hasArg(OPT_mbackchain);
@@ -1374,38 +1430,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.Addrsig = Args.hasArg(OPT_faddrsig);
- if (Arg *A = Args.getLastArg(OPT_msign_return_address_EQ)) {
- StringRef SignScope = A->getValue();
-
- if (SignScope.equals_lower("none"))
- Opts.setSignReturnAddress(CodeGenOptions::SignReturnAddressScope::None);
- else if (SignScope.equals_lower("all"))
- Opts.setSignReturnAddress(CodeGenOptions::SignReturnAddressScope::All);
- else if (SignScope.equals_lower("non-leaf"))
- Opts.setSignReturnAddress(
- CodeGenOptions::SignReturnAddressScope::NonLeaf);
- else
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << SignScope;
-
- if (Arg *A = Args.getLastArg(OPT_msign_return_address_key_EQ)) {
- StringRef SignKey = A->getValue();
- if (!SignScope.empty() && !SignKey.empty()) {
- if (SignKey.equals_lower("a_key"))
- Opts.setSignReturnAddressKey(
- CodeGenOptions::SignReturnAddressKeyValue::AKey);
- else if (SignKey.equals_lower("b_key"))
- Opts.setSignReturnAddressKey(
- CodeGenOptions::SignReturnAddressKeyValue::BKey);
- else
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << SignKey;
- }
- }
- }
-
- Opts.BranchTargetEnforcement = Args.hasArg(OPT_mbranch_target_enforce);
-
Opts.KeepStaticConsts = Args.hasArg(OPT_fkeep_static_consts);
Opts.SpeculativeLoadHardening = Args.hasArg(OPT_mspeculative_load_hardening);
@@ -1414,20 +1438,23 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.PassPlugins = Args.getAllArgValues(OPT_fpass_plugin_EQ);
- Opts.SymbolPartition = Args.getLastArgValue(OPT_fsymbol_partition_EQ);
+ Opts.SymbolPartition =
+ std::string(Args.getLastArgValue(OPT_fsymbol_partition_EQ));
+ Opts.ForceAAPCSBitfieldLoad = Args.hasArg(OPT_ForceAAPCSBitfieldLoad);
return Success;
}
static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
ArgList &Args) {
- Opts.OutputFile = Args.getLastArgValue(OPT_dependency_file);
+ Opts.OutputFile = std::string(Args.getLastArgValue(OPT_dependency_file));
Opts.Targets = Args.getAllArgValues(OPT_MT);
Opts.IncludeSystemHeaders = Args.hasArg(OPT_sys_header_deps);
Opts.IncludeModuleFiles = Args.hasArg(OPT_module_file_deps);
Opts.UsePhonyTargets = Args.hasArg(OPT_MP);
Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
- Opts.HeaderIncludeOutputFile = Args.getLastArgValue(OPT_header_include_file);
+ Opts.HeaderIncludeOutputFile =
+ std::string(Args.getLastArgValue(OPT_header_include_file));
Opts.AddMissingHeaderDeps = Args.hasArg(OPT_MG);
if (Args.hasArg(OPT_show_includes)) {
// Writing both /showIncludes and preprocessor output to stdout
@@ -1440,9 +1467,9 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
} else {
Opts.ShowIncludesDest = ShowIncludesDestination::None;
}
- Opts.DOTOutputFile = Args.getLastArgValue(OPT_dependency_dot);
+ Opts.DOTOutputFile = std::string(Args.getLastArgValue(OPT_dependency_dot));
Opts.ModuleDependencyOutputDir =
- Args.getLastArgValue(OPT_module_dependency_dir);
+ std::string(Args.getLastArgValue(OPT_module_dependency_dir));
if (Args.hasArg(OPT_MV))
Opts.OutputFormat = DependencyOutputFormat::NMake;
// Add sanitizer blacklists as extra dependencies.
@@ -1452,13 +1479,13 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
for (const auto *A : Args.filtered(OPT_fsanitize_blacklist)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ExtraDeps.push_back(Val);
+ Opts.ExtraDeps.push_back(std::string(Val));
}
if (Opts.IncludeSystemHeaders) {
for (const auto *A : Args.filtered(OPT_fsanitize_system_blacklist)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ExtraDeps.push_back(Val);
+ Opts.ExtraDeps.push_back(std::string(Val));
}
}
}
@@ -1472,7 +1499,7 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ExtraDeps.push_back(Val);
+ Opts.ExtraDeps.push_back(std::string(Val));
}
}
@@ -1531,10 +1558,11 @@ static bool checkVerifyPrefixes(const std::vector<std::string> &VerifyPrefixes,
bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagnosticsEngine *Diags,
- bool DefaultDiagColor, bool DefaultShowOpt) {
+ bool DefaultDiagColor) {
bool Success = true;
- Opts.DiagnosticLogFile = Args.getLastArgValue(OPT_diagnostic_log_file);
+ Opts.DiagnosticLogFile =
+ std::string(Args.getLastArgValue(OPT_diagnostic_log_file));
if (Arg *A =
Args.getLastArg(OPT_diagnostic_serialized_file, OPT__serialize_diags))
Opts.DiagnosticSerializationFile = A->getValue();
@@ -1544,17 +1572,11 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.PedanticErrors = Args.hasArg(OPT_pedantic_errors);
Opts.ShowCarets = !Args.hasArg(OPT_fno_caret_diagnostics);
Opts.ShowColors = parseShowColorsArgs(Args, DefaultDiagColor);
- Opts.ShowColumn = Args.hasFlag(OPT_fshow_column,
- OPT_fno_show_column,
- /*Default=*/true);
+ Opts.ShowColumn = !Args.hasArg(OPT_fno_show_column);
Opts.ShowFixits = !Args.hasArg(OPT_fno_diagnostics_fixit_info);
Opts.ShowLocation = !Args.hasArg(OPT_fno_show_source_location);
Opts.AbsolutePath = Args.hasArg(OPT_fdiagnostics_absolute_paths);
- Opts.ShowOptionNames =
- Args.hasFlag(OPT_fdiagnostics_show_option,
- OPT_fno_diagnostics_show_option, DefaultShowOpt);
-
- llvm::sys::Process::UseANSIEscapeCodes(Args.hasArg(OPT_fansi_escape_codes));
+ Opts.ShowOptionNames = !Args.hasArg(OPT_fno_diagnostics_show_option);
// Default behavior is to not to show note include stacks.
Opts.ShowNoteIncludeStack = false;
@@ -1660,7 +1682,11 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Diags->Report(diag::warn_ignoring_ftabstop_value)
<< Opts.TabStop << DiagnosticOptions::DefaultTabStop;
}
- Opts.MessageLength = getLastArgIntValue(Args, OPT_fmessage_length, 0, Diags);
+ Opts.MessageLength =
+ getLastArgIntValue(Args, OPT_fmessage_length_EQ, 0, Diags);
+
+ Opts.UndefPrefixes = Args.getAllArgValues(OPT_Wundef_prefix_EQ);
+
addDiagnosticArgs(Args, OPT_W_Group, OPT_W_value_Group, Opts.Warnings);
addDiagnosticArgs(Args, OPT_R_Group, OPT_R_value_Group, Opts.Remarks);
@@ -1668,7 +1694,7 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
}
static void ParseFileSystemArgs(FileSystemOptions &Opts, ArgList &Args) {
- Opts.WorkingDir = Args.getLastArgValue(OPT_working_directory);
+ Opts.WorkingDir = std::string(Args.getLastArgValue(OPT_working_directory));
}
/// Parse the argument to the -ftest-module-file-extension
@@ -1686,12 +1712,12 @@ static bool parseTestModuleFileExtensionArg(StringRef Arg,
if (Args.size() < 5)
return true;
- BlockName = Args[0];
+ BlockName = std::string(Args[0]);
if (Args[1].getAsInteger(10, MajorVersion)) return true;
if (Args[2].getAsInteger(10, MinorVersion)) return true;
if (Args[3].getAsInteger(2, Hashed)) return true;
if (Args.size() > 4)
- UserInfo = Args[4];
+ UserInfo = std::string(Args[4]);
return false;
}
@@ -1724,6 +1750,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
case OPT_ast_dump:
case OPT_ast_dump_all:
case OPT_ast_dump_lookups:
+ case OPT_ast_dump_decl_types:
Opts.ProgramAction = frontend::ASTDump; break;
case OPT_ast_print:
Opts.ProgramAction = frontend::ASTPrint; break;
@@ -1766,25 +1793,26 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
StringRef ArgStr =
Args.hasArg(OPT_interface_stub_version_EQ)
? Args.getLastArgValue(OPT_interface_stub_version_EQ)
- : "experimental-ifs-v1";
+ : "experimental-ifs-v2";
if (ArgStr == "experimental-yaml-elf-v1" ||
+ ArgStr == "experimental-ifs-v1" ||
ArgStr == "experimental-tapi-elf-v1") {
std::string ErrorMessage =
"Invalid interface stub format: " + ArgStr.str() +
" is deprecated.";
Diags.Report(diag::err_drv_invalid_value)
<< "Must specify a valid interface stub format type, ie: "
- "-interface-stub-version=experimental-ifs-v1"
+ "-interface-stub-version=experimental-ifs-v2"
<< ErrorMessage;
- } else if (ArgStr != "experimental-ifs-v1") {
+ } else if (!ArgStr.startswith("experimental-ifs-")) {
std::string ErrorMessage =
"Invalid interface stub format: " + ArgStr.str() + ".";
Diags.Report(diag::err_drv_invalid_value)
<< "Must specify a valid interface stub format type, ie: "
- "-interface-stub-version=experimental-ifs-v1"
+ "-interface-stub-version=experimental-ifs-v2"
<< ErrorMessage;
} else {
- Opts.ProgramAction = frontend::GenerateInterfaceIfsExpV1;
+ Opts.ProgramAction = frontend::GenerateInterfaceStubs;
}
break;
}
@@ -1859,7 +1887,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
}
Opts.DisableFree = Args.hasArg(OPT_disable_free);
- Opts.OutputFile = Args.getLastArgValue(OPT_o);
+ Opts.OutputFile = std::string(Args.getLastArgValue(OPT_o));
Opts.Plugins = Args.getAllArgValues(OPT_load);
Opts.RelocatablePCH = Args.hasArg(OPT_relocatable_pch);
Opts.ShowHelp = Args.hasArg(OPT_help);
@@ -1878,8 +1906,9 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.FixToTemporaries = Args.hasArg(OPT_fixit_to_temp);
Opts.ASTDumpDecls = Args.hasArg(OPT_ast_dump, OPT_ast_dump_EQ);
Opts.ASTDumpAll = Args.hasArg(OPT_ast_dump_all, OPT_ast_dump_all_EQ);
- Opts.ASTDumpFilter = Args.getLastArgValue(OPT_ast_dump_filter);
+ Opts.ASTDumpFilter = std::string(Args.getLastArgValue(OPT_ast_dump_filter));
Opts.ASTDumpLookups = Args.hasArg(OPT_ast_dump_lookups);
+ Opts.ASTDumpDeclTypes = Args.hasArg(OPT_ast_dump_decl_types);
Opts.UseGlobalModuleIndex = !Args.hasArg(OPT_fno_modules_global_index);
Opts.GenerateGlobalModuleIndex = Opts.UseGlobalModuleIndex;
Opts.ModuleMapFiles = Args.getAllArgValues(OPT_fmodule_map_file);
@@ -1887,12 +1916,17 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ModuleFiles.push_back(Val);
+ Opts.ModuleFiles.push_back(std::string(Val));
}
Opts.ModulesEmbedFiles = Args.getAllArgValues(OPT_fmodules_embed_file_EQ);
Opts.ModulesEmbedAllFiles = Args.hasArg(OPT_fmodules_embed_all_files);
Opts.IncludeTimestamps = !Args.hasArg(OPT_fno_pch_timestamp);
Opts.UseTemporary = !Args.hasArg(OPT_fno_temp_file);
+ Opts.IsSystemModule = Args.hasArg(OPT_fsystem_module);
+
+ if (Opts.ProgramAction != frontend::GenerateModule && Opts.IsSystemModule)
+ Diags.Report(diag::err_drv_argument_only_allowed_with) << "-fsystem-module"
+ << "-emit-module";
Opts.CodeCompleteOpts.IncludeMacros
= Args.hasArg(OPT_code_completion_macros);
@@ -1907,10 +1941,14 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.CodeCompleteOpts.IncludeFixIts
= Args.hasArg(OPT_code_completion_with_fixits);
- Opts.OverrideRecordLayoutsFile
- = Args.getLastArgValue(OPT_foverride_record_layout_EQ);
- Opts.AuxTriple = Args.getLastArgValue(OPT_aux_triple);
- Opts.StatsFile = Args.getLastArgValue(OPT_stats_file);
+ Opts.OverrideRecordLayoutsFile =
+ std::string(Args.getLastArgValue(OPT_foverride_record_layout_EQ));
+ Opts.AuxTriple = std::string(Args.getLastArgValue(OPT_aux_triple));
+ if (Args.hasArg(OPT_aux_target_cpu))
+ Opts.AuxTargetCPU = std::string(Args.getLastArgValue(OPT_aux_target_cpu));
+ if (Args.hasArg(OPT_aux_target_feature))
+ Opts.AuxTargetFeatures = Args.getAllArgValues(OPT_aux_target_feature);
+ Opts.StatsFile = std::string(Args.getLastArgValue(OPT_stats_file));
if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
OPT_arcmt_modify,
@@ -1929,9 +1967,10 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
break;
}
}
- Opts.MTMigrateDir = Args.getLastArgValue(OPT_mt_migrate_directory);
- Opts.ARCMTMigrateReportOut
- = Args.getLastArgValue(OPT_arcmt_migrate_report_output);
+ Opts.MTMigrateDir =
+ std::string(Args.getLastArgValue(OPT_mt_migrate_directory));
+ Opts.ARCMTMigrateReportOut =
+ std::string(Args.getLastArgValue(OPT_arcmt_migrate_report_output));
Opts.ARCMTMigrateEmitARCErrors
= Args.hasArg(OPT_arcmt_migrate_emit_arc_errors);
@@ -1966,7 +2005,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (Args.hasArg(OPT_objcmt_migrate_all))
Opts.ObjCMTAction |= FrontendOptions::ObjCMT_MigrateDecls;
- Opts.ObjCMTWhiteListPath = Args.getLastArgValue(OPT_objcmt_whitelist_dir_path);
+ Opts.ObjCMTWhiteListPath =
+ std::string(Args.getLastArgValue(OPT_objcmt_whitelist_dir_path));
if (Opts.ARCMTAction != FrontendOptions::ARCMT_None &&
Opts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
@@ -2043,12 +2083,16 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
DashX = IK;
}
+ bool IsSystem = false;
+
// The -emit-module action implicitly takes a module map.
if (Opts.ProgramAction == frontend::GenerateModule &&
- IK.getFormat() == InputKind::Source)
+ IK.getFormat() == InputKind::Source) {
IK = IK.withFormat(InputKind::ModuleMap);
+ IsSystem = Opts.IsSystemModule;
+ }
- Opts.Inputs.emplace_back(std::move(Inputs[i]), IK);
+ Opts.Inputs.emplace_back(std::move(Inputs[i]), IK, IsSystem);
}
return DashX;
@@ -2063,14 +2107,14 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
const std::string &WorkingDir) {
- Opts.Sysroot = Args.getLastArgValue(OPT_isysroot, "/");
+ Opts.Sysroot = std::string(Args.getLastArgValue(OPT_isysroot, "/"));
Opts.Verbose = Args.hasArg(OPT_v);
Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
Opts.UseStandardSystemIncludes = !Args.hasArg(OPT_nostdsysteminc);
Opts.UseStandardCXXIncludes = !Args.hasArg(OPT_nostdincxx);
if (const Arg *A = Args.getLastArg(OPT_stdlib_EQ))
Opts.UseLibcxx = (strcmp(A->getValue(), "libc++") == 0);
- Opts.ResourceDir = Args.getLastArgValue(OPT_resource_dir);
+ Opts.ResourceDir = std::string(Args.getLastArgValue(OPT_resource_dir));
// Canonicalize -fmodules-cache-path before storing it.
SmallString<128> P(Args.getLastArgValue(OPT_fmodules_cache_path));
@@ -2081,20 +2125,23 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
llvm::sys::fs::make_absolute(WorkingDir, P);
}
llvm::sys::path::remove_dots(P);
- Opts.ModuleCachePath = P.str();
+ Opts.ModuleCachePath = std::string(P.str());
- Opts.ModuleUserBuildPath = Args.getLastArgValue(OPT_fmodules_user_build_path);
+ Opts.ModuleUserBuildPath =
+ std::string(Args.getLastArgValue(OPT_fmodules_user_build_path));
// Only the -fmodule-file=<name>=<file> form.
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
- if (Val.find('=') != StringRef::npos)
- Opts.PrebuiltModuleFiles.insert(Val.split('='));
+ if (Val.find('=') != StringRef::npos){
+ auto Split = Val.split('=');
+ Opts.PrebuiltModuleFiles.insert(
+ {std::string(Split.first), std::string(Split.second)});
+ }
}
for (const auto *A : Args.filtered(OPT_fprebuilt_module_path))
Opts.AddPrebuiltModulePath(A->getValue());
Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
Opts.ModulesHashContent = Args.hasArg(OPT_fmodules_hash_content);
- Opts.ModulesStrictContextHash = Args.hasArg(OPT_fmodules_strict_context_hash);
Opts.ModulesValidateDiagnosticOptions =
!Args.hasArg(OPT_fmodules_disable_diagnostic_validation);
Opts.ImplicitModuleMaps = Args.hasArg(OPT_fimplicit_module_maps);
@@ -2141,7 +2188,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
SmallString<32> Buffer;
llvm::sys::path::append(Buffer, Opts.Sysroot,
llvm::StringRef(A->getValue()).substr(1));
- Path = Buffer.str();
+ Path = std::string(Buffer.str());
}
Opts.AddPath(Path, Group, IsFramework,
@@ -2241,7 +2288,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
if (T.isPS4())
LangStd = LangStandard::lang_gnu99;
else
- LangStd = LangStandard::lang_gnu11;
+ LangStd = LangStandard::lang_gnu17;
#endif
break;
case Language::ObjC:
@@ -2278,7 +2325,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.CPlusPlus11 = Std.isCPlusPlus11();
Opts.CPlusPlus14 = Std.isCPlusPlus14();
Opts.CPlusPlus17 = Std.isCPlusPlus17();
- Opts.CPlusPlus2a = Std.isCPlusPlus2a();
+ Opts.CPlusPlus20 = Std.isCPlusPlus20();
Opts.Digraphs = Std.hasDigraphs();
Opts.GNUMode = Std.isGNUMode();
Opts.GNUInline = !Opts.C99 && !Opts.CPlusPlus;
@@ -2304,7 +2351,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.AltiVec = 0;
Opts.ZVector = 0;
Opts.setLaxVectorConversions(LangOptions::LaxVectorConversionKind::None);
- Opts.setDefaultFPContractMode(LangOptions::FPC_On);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_On);
Opts.NativeHalfType = 1;
Opts.NativeHalfArgsAndReturns = 1;
Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
@@ -2324,7 +2371,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.CUDA = IK.getLanguage() == Language::CUDA || Opts.HIP;
if (Opts.CUDA)
// Set default FP_CONTRACT to FAST.
- Opts.setDefaultFPContractMode(LangOptions::FPC_Fast);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
Opts.RenderScript = IK.getLanguage() == Language::RenderScript;
if (Opts.RenderScript) {
@@ -2513,6 +2560,24 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
LangStd = OpenCLLangStd;
}
+ Opts.SYCL = Args.hasArg(options::OPT_fsycl);
+ Opts.SYCLIsDevice = Opts.SYCL && Args.hasArg(options::OPT_fsycl_is_device);
+ if (Opts.SYCL) {
+ // -sycl-std applies to any SYCL source, not only those containing kernels,
+ // but also those using the SYCL API
+ if (const Arg *A = Args.getLastArg(OPT_sycl_std_EQ)) {
+ Opts.SYCLVersion = llvm::StringSwitch<unsigned>(A->getValue())
+ .Cases("2017", "1.2.1", "121", "sycl-1.2.1", 2017)
+ .Default(0U);
+
+ if (Opts.SYCLVersion == 0U) {
+ // User has passed an invalid value to the flag, this is an error
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+ }
+ }
+
Opts.IncludeDefaultHeader = Args.hasArg(OPT_finclude_default_header);
Opts.DeclareOpenCLBuiltins = Args.hasArg(OPT_fdeclare_opencl_builtins);
@@ -2699,7 +2764,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
// Set the handler, if one is specified.
Opts.OverflowHandler =
- Args.getLastArgValue(OPT_ftrapv_handler);
+ std::string(Args.getLastArgValue(OPT_ftrapv_handler));
}
else if (Args.hasArg(OPT_fwrapv))
Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
@@ -2750,6 +2815,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fno_threadsafe_statics))
Opts.ThreadsafeStatics = 0;
Opts.Exceptions = Args.hasArg(OPT_fexceptions);
+ Opts.IgnoreExceptions = Args.hasArg(OPT_fignore_exceptions);
Opts.ObjCExceptions = Args.hasArg(OPT_fobjc_exceptions);
Opts.CXXExceptions = Args.hasArg(OPT_fcxx_exceptions);
@@ -2788,7 +2854,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.Blocks = Args.hasArg(OPT_fblocks) || (Opts.OpenCL
&& Opts.OpenCLVersion == 200);
Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
- Opts.Coroutines = Opts.CPlusPlus2a || Args.hasArg(OPT_fcoroutines_ts);
+ Opts.Coroutines = Opts.CPlusPlus20 || Args.hasArg(OPT_fcoroutines_ts);
Opts.ConvergentFunctions = Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) ||
Args.hasArg(OPT_fconvergent_functions);
@@ -2798,7 +2864,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
OPT_fno_double_square_bracket_attributes,
Opts.DoubleSquareBracketAttributes);
- Opts.CPlusPlusModules = Opts.CPlusPlus2a;
+ Opts.CPlusPlusModules = Opts.CPlusPlus20;
Opts.ModulesTS = Args.hasArg(OPT_fmodules_ts);
Opts.Modules =
Args.hasArg(OPT_fmodules) || Opts.ModulesTS || Opts.CPlusPlusModules;
@@ -2819,7 +2885,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ImplicitModules = !Args.hasArg(OPT_fno_implicit_modules);
Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
- Opts.Char8 = Args.hasFlag(OPT_fchar8__t, OPT_fno_char8__t, Opts.CPlusPlus2a);
+ Opts.Char8 = Args.hasFlag(OPT_fchar8__t, OPT_fno_char8__t, Opts.CPlusPlus20);
if (const Arg *A = Args.getLastArg(OPT_fwchar_type_EQ)) {
Opts.WCharSize = llvm::StringSwitch<unsigned>(A->getValue())
.Case("char", 1)
@@ -2852,7 +2918,15 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
<< A->getValue();
Opts.NewAlignOverride = 0;
}
- Opts.ConceptsTS = Args.hasArg(OPT_fconcepts_ts);
+ Opts.ConceptSatisfactionCaching =
+ !Args.hasArg(OPT_fno_concept_satisfaction_caching);
+ if (Args.hasArg(OPT_fconcepts_ts))
+ Diags.Report(diag::warn_fe_concepts_ts_flag);
+ // Recovery AST still heavily relies on dependent-type machinery.
+ Opts.RecoveryAST =
+ Args.hasFlag(OPT_frecovery_ast, OPT_fno_recovery_ast, Opts.CPlusPlus);
+ Opts.RecoveryASTType =
+ Args.hasFlag(OPT_frecovery_ast_type, OPT_fno_recovery_ast_type, false);
Opts.HeinousExtensions = Args.hasArg(OPT_fheinous_gnu_extensions);
Opts.AccessControl = !Args.hasArg(OPT_fno_access_control);
Opts.ElideConstructors = !Args.hasArg(OPT_fno_elide_constructors);
@@ -2873,7 +2947,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
getLastArgIntValue(Args, OPT_Wlarge_by_value_copy_EQ, 0, Diags);
Opts.MSBitfields = Args.hasArg(OPT_mms_bitfields);
Opts.ObjCConstantStringClass =
- Args.getLastArgValue(OPT_fconstant_string_class);
+ std::string(Args.getLastArgValue(OPT_fconstant_string_class));
Opts.ObjCDefaultSynthProperties =
!Args.hasArg(OPT_disable_objc_default_synthesize_properties);
Opts.EncodeExtendedBlockSig =
@@ -2882,6 +2956,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.PackStruct = getLastArgIntValue(Args, OPT_fpack_struct_EQ, 0, Diags);
Opts.MaxTypeAlign = getLastArgIntValue(Args, OPT_fmax_type_align_EQ, 0, Diags);
Opts.AlignDouble = Args.hasArg(OPT_malign_double);
+ Opts.DoubleSize = getLastArgIntValue(Args, OPT_mdouble_EQ, 0, Diags);
Opts.LongDoubleSize = Args.hasArg(OPT_mlong_double_128)
? 128
: Args.hasArg(OPT_mlong_double_64) ? 64 : 0;
@@ -2899,6 +2974,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.NoBitFieldTypeAlign = Args.hasArg(OPT_fno_bitfield_type_align);
Opts.SinglePrecisionConstants = Args.hasArg(OPT_cl_single_precision_constant);
Opts.FastRelaxedMath = Args.hasArg(OPT_cl_fast_relaxed_math);
+ if (Opts.FastRelaxedMath)
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
Opts.HexagonQdsp6Compat = Args.hasArg(OPT_mqdsp6_compat);
Opts.FakeAddressSpaceMap = Args.hasArg(OPT_ffake_address_space_map);
Opts.ParseUnknownAnytype = Args.hasArg(OPT_funknown_anytype);
@@ -2906,7 +2983,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.DebuggerCastResultToId = Args.hasArg(OPT_fdebugger_cast_result_to_id);
Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal);
Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack);
- Opts.ModuleName = Args.getLastArgValue(OPT_fmodule_name_EQ);
+ Opts.ModuleName = std::string(Args.getLastArgValue(OPT_fmodule_name_EQ));
Opts.CurrentModule = Opts.ModuleName;
Opts.AppExt = Args.hasArg(OPT_fapplication_extension);
Opts.ModuleFeatures = Args.getAllArgValues(OPT_fmodule_feature);
@@ -3001,6 +3078,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.setDefaultCallingConv(DefaultCC);
}
+ Opts.SemanticInterposition = Args.hasArg(OPT_fsemantic_interposition);
+ // An explicit -fno-semantic-interposition infers dso_local.
+ Opts.ExplicitNoSemanticInterposition =
+ Args.hasArg(OPT_fno_semantic_interposition);
+
// -mrtd option
if (Arg *A = Args.getLastArg(OPT_mrtd)) {
if (Opts.getDefaultCallingConv() != LangOptions::DCC_None)
@@ -3016,8 +3098,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
}
- // Check if -fopenmp is specified and set default version to 4.5.
- Opts.OpenMP = Args.hasArg(options::OPT_fopenmp) ? 45 : 0;
+ // Check if -fopenmp is specified and set default version to 5.0.
+ Opts.OpenMP = Args.hasArg(options::OPT_fopenmp) ? 50 : 0;
// Check if -fopenmp-simd is specified.
bool IsSimdSpecified =
Args.hasFlag(options::OPT_fopenmp_simd, options::OPT_fno_openmp_simd,
@@ -3035,10 +3117,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Opts.OpenMP || Opts.OpenMPSimd) {
if (int Version = getLastArgIntValue(
Args, OPT_fopenmp_version_EQ,
- (IsSimdSpecified || IsTargetSpecified) ? 45 : Opts.OpenMP, Diags))
+ (IsSimdSpecified || IsTargetSpecified) ? 50 : Opts.OpenMP, Diags))
Opts.OpenMP = Version;
- else if (IsSimdSpecified || IsTargetSpecified)
- Opts.OpenMP = 45;
// Provide diagnostic when a given target is not expected to be an OpenMP
// device or host.
if (!Opts.OpenMPIsDevice) {
@@ -3057,7 +3137,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// Set the flag to prevent the implementation from emitting device exception
// handling code for those requiring so.
- if ((Opts.OpenMPIsDevice && T.isNVPTX()) || Opts.OpenCLCPlusPlus) {
+ if ((Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN())) ||
+ Opts.OpenCLCPlusPlus) {
Opts.Exceptions = 0;
Opts.CXXExceptions = 0;
}
@@ -3091,6 +3172,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
TT.getArch() == llvm::Triple::ppc64le ||
TT.getArch() == llvm::Triple::nvptx ||
TT.getArch() == llvm::Triple::nvptx64 ||
+ TT.getArch() == llvm::Triple::amdgcn ||
TT.getArch() == llvm::Triple::x86 ||
TT.getArch() == llvm::Triple::x86_64))
Diags.Report(diag::err_drv_invalid_omp_target) << A->getValue(i);
@@ -3108,15 +3190,19 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
<< Opts.OMPHostIRFile;
}
- Opts.SYCLIsDevice = Args.hasArg(options::OPT_fsycl_is_device);
-
- // Set CUDA mode for OpenMP target NVPTX if specified in options
- Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && T.isNVPTX() &&
+ // Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options
+ Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
Args.hasArg(options::OPT_fopenmp_cuda_mode);
- // Set CUDA mode for OpenMP target NVPTX if specified in options
+ // Set CUDA support for parallel execution of target regions for OpenMP target
+ // NVPTX/AMDGCN if specified in options.
+ Opts.OpenMPCUDATargetParallel =
+ Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
+ Args.hasArg(options::OPT_fopenmp_cuda_parallel_target_regions);
+
+ // Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options
Opts.OpenMPCUDAForceFullRuntime =
- Opts.OpenMPIsDevice && T.isNVPTX() &&
+ Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
Args.hasArg(options::OPT_fopenmp_cuda_force_full_runtime);
// Record whether the __DEPRECATED define was requested.
@@ -3140,30 +3226,65 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (InlineArg->getOption().matches(options::OPT_fno_inline))
Opts.NoInlineDefine = true;
- Opts.FastMath = Args.hasArg(OPT_ffast_math) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.FastMath =
+ Args.hasArg(OPT_ffast_math) || Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only) ||
- Args.hasArg(OPT_cl_finite_math_only) ||
- Args.hasArg(OPT_cl_fast_relaxed_math);
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.AllowFPReassoc = Args.hasArg(OPT_mreassociate) ||
+ Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.NoHonorNaNs =
+ Args.hasArg(OPT_menable_no_nans) || Args.hasArg(OPT_ffinite_math_only) ||
+ Args.hasArg(OPT_ffast_math) || Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.NoHonorInfs = Args.hasArg(OPT_menable_no_infinities) ||
+ Args.hasArg(OPT_ffinite_math_only) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_finite_math_only) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.NoSignedZero = Args.hasArg(OPT_fno_signed_zeros) ||
+ Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_no_signed_zeros) ||
Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.AllowRecip = Args.hasArg(OPT_freciprocal_math) ||
+ Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ // Currently there's no clang option to enable this individually
+ Opts.ApproxFunc = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_ffast_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
if (Arg *A = Args.getLastArg(OPT_ffp_contract)) {
StringRef Val = A->getValue();
if (Val == "fast")
- Opts.setDefaultFPContractMode(LangOptions::FPC_Fast);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
else if (Val == "on")
- Opts.setDefaultFPContractMode(LangOptions::FPC_On);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_On);
else if (Val == "off")
- Opts.setDefaultFPContractMode(LangOptions::FPC_Off);
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Off);
else
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
- LangOptions::FPRoundingModeKind FPRM = LangOptions::FPR_ToNearest;
+ if (Args.hasArg(OPT_fexperimental_strict_floating_point))
+ Opts.ExpStrictFP = true;
+
+ auto FPRM = llvm::RoundingMode::NearestTiesToEven;
if (Args.hasArg(OPT_frounding_math)) {
- FPRM = LangOptions::FPR_Dynamic;
+ FPRM = llvm::RoundingMode::Dynamic;
}
Opts.setFPRoundingMode(FPRM);
@@ -3217,6 +3338,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
+ if (Arg *A = Args.getLastArg(OPT_ftrivial_auto_var_init_stop_after)) {
+ int Val = std::stoi(A->getValue());
+ Opts.TrivialAutoVarInitStopAfter = Val;
+ }
+
// Parse -fsanitize= arguments.
parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
Diags, Opts.Sanitize);
@@ -3231,18 +3357,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
systemBlacklists.end());
// -fxray-instrument
- Opts.XRayInstrument =
- Args.hasFlag(OPT_fxray_instrument, OPT_fnoxray_instrument, false);
-
- // -fxray-always-emit-customevents
+ Opts.XRayInstrument = Args.hasArg(OPT_fxray_instrument);
Opts.XRayAlwaysEmitCustomEvents =
- Args.hasFlag(OPT_fxray_always_emit_customevents,
- OPT_fnoxray_always_emit_customevents, false);
-
- // -fxray-always-emit-typedevents
+ Args.hasArg(OPT_fxray_always_emit_customevents);
Opts.XRayAlwaysEmitTypedEvents =
- Args.hasFlag(OPT_fxray_always_emit_typedevents,
- OPT_fnoxray_always_emit_customevents, false);
+ Args.hasArg(OPT_fxray_always_emit_typedevents);
// -fxray-{always,never}-instrument= filenames.
Opts.XRayAlwaysInstrumentFiles =
@@ -3294,6 +3413,54 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.CompleteMemberPointers = Args.hasArg(OPT_fcomplete_member_pointers);
Opts.BuildingPCHWithObjectFile = Args.hasArg(OPT_building_pch_with_obj);
+ Opts.PCHInstantiateTemplates = Args.hasArg(OPT_fpch_instantiate_templates);
+
+ Opts.MatrixTypes = Args.hasArg(OPT_fenable_matrix);
+
+ Opts.MaxTokens = getLastArgIntValue(Args, OPT_fmax_tokens_EQ, 0, Diags);
+
+ if (Arg *A = Args.getLastArg(OPT_msign_return_address_EQ)) {
+ StringRef SignScope = A->getValue();
+
+ if (SignScope.equals_lower("none"))
+ Opts.setSignReturnAddressScope(
+ LangOptions::SignReturnAddressScopeKind::None);
+ else if (SignScope.equals_lower("all"))
+ Opts.setSignReturnAddressScope(
+ LangOptions::SignReturnAddressScopeKind::All);
+ else if (SignScope.equals_lower("non-leaf"))
+ Opts.setSignReturnAddressScope(
+ LangOptions::SignReturnAddressScopeKind::NonLeaf);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << SignScope;
+
+ if (Arg *A = Args.getLastArg(OPT_msign_return_address_key_EQ)) {
+ StringRef SignKey = A->getValue();
+ if (!SignScope.empty() && !SignKey.empty()) {
+ if (SignKey.equals_lower("a_key"))
+ Opts.setSignReturnAddressKey(
+ LangOptions::SignReturnAddressKeyKind::AKey);
+ else if (SignKey.equals_lower("b_key"))
+ Opts.setSignReturnAddressKey(
+ LangOptions::SignReturnAddressKeyKind::BKey);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << SignKey;
+ }
+ }
+ }
+
+ Opts.BranchTargetEnforcement = Args.hasArg(OPT_mbranch_target_enforce);
+ Opts.SpeculativeLoadHardening = Args.hasArg(OPT_mspeculative_load_hardening);
+
+ Opts.CompatibilityQualifiedIdBlockParamTypeChecking =
+ Args.hasArg(OPT_fcompatibility_qualified_id_block_param_type_checking);
+
+ Opts.RelativeCXXABIVTables =
+ Args.hasFlag(OPT_fexperimental_relative_cxx_abi_vtables,
+ OPT_fno_experimental_relative_cxx_abi_vtables,
+ /*default=*/false);
}
static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
@@ -3314,7 +3481,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::GenerateModuleInterface:
case frontend::GenerateHeaderModule:
case frontend::GeneratePCH:
- case frontend::GenerateInterfaceIfsExpV1:
+ case frontend::GenerateInterfaceStubs:
case frontend::ParseSyntaxOnly:
case frontend::ModuleFileInfo:
case frontend::VerifyPCH:
@@ -3343,11 +3510,12 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags,
frontend::ActionKind Action) {
- Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
+ Opts.ImplicitPCHInclude = std::string(Args.getLastArgValue(OPT_include_pch));
Opts.PCHWithHdrStop = Args.hasArg(OPT_pch_through_hdrstop_create) ||
Args.hasArg(OPT_pch_through_hdrstop_use);
Opts.PCHWithHdrStopCreate = Args.hasArg(OPT_pch_through_hdrstop_create);
- Opts.PCHThroughHeader = Args.getLastArgValue(OPT_pch_through_header_EQ);
+ Opts.PCHThroughHeader =
+ std::string(Args.getLastArgValue(OPT_pch_through_header_EQ));
Opts.UsePredefines = !Args.hasArg(OPT_undef);
Opts.DetailedRecord = Args.hasArg(OPT_detailed_preprocessing_record);
Opts.DisablePCHValidation = Args.hasArg(OPT_fno_validate_pch);
@@ -3357,8 +3525,11 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
for (const auto *A : Args.filtered(OPT_error_on_deserialized_pch_decl))
Opts.DeserializedPCHDeclsToErrorOn.insert(A->getValue());
- for (const auto &A : Args.getAllArgValues(OPT_fmacro_prefix_map_EQ))
- Opts.MacroPrefixMap.insert(StringRef(A).split('='));
+ for (const auto &A : Args.getAllArgValues(OPT_fmacro_prefix_map_EQ)) {
+ auto Split = StringRef(A).split('=');
+ Opts.MacroPrefixMap.insert(
+ {std::string(Split.first), std::string(Split.second)});
+ }
if (const Arg *A = Args.getLastArg(OPT_preamble_bytes_EQ)) {
StringRef Value(A->getValue());
@@ -3435,6 +3606,7 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.LexEditorPlaceholders = false;
Opts.SetUpStaticAnalyzer = Args.hasArg(OPT_setup_static_analyzer);
+ Opts.DisablePragmaDebugCrash = Args.hasArg(OPT_disable_pragma_debug_crash);
}
static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
@@ -3457,8 +3629,8 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
- Opts.CodeModel = getCodeModel(Args, Diags);
- Opts.ABI = Args.getLastArgValue(OPT_target_abi);
+ Opts.CodeModel = std::string(Args.getLastArgValue(OPT_mcmodel_EQ, "default"));
+ Opts.ABI = std::string(Args.getLastArgValue(OPT_target_abi));
if (Arg *A = Args.getLastArg(OPT_meabi)) {
StringRef Value = A->getValue();
llvm::EABI EABIVersion = llvm::StringSwitch<llvm::EABI>(Value)
@@ -3473,15 +3645,11 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
else
Opts.EABIVersion = EABIVersion;
}
- Opts.CPU = Args.getLastArgValue(OPT_target_cpu);
- Opts.FPMath = Args.getLastArgValue(OPT_mfpmath);
+ Opts.CPU = std::string(Args.getLastArgValue(OPT_target_cpu));
+ Opts.FPMath = std::string(Args.getLastArgValue(OPT_mfpmath));
Opts.FeaturesAsWritten = Args.getAllArgValues(OPT_target_feature);
- Opts.LinkerVersion = Args.getLastArgValue(OPT_target_linker_version);
- Opts.Triple = Args.getLastArgValue(OPT_triple);
- // Use the default target triple if unspecified.
- if (Opts.Triple.empty())
- Opts.Triple = llvm::sys::getDefaultTargetTriple();
- Opts.Triple = llvm::Triple::normalize(Opts.Triple);
+ Opts.LinkerVersion =
+ std::string(Args.getLastArgValue(OPT_target_linker_version));
Opts.OpenCLExtensionsAsWritten = Args.getAllArgValues(OPT_cl_ext_EQ);
Opts.ForceEnableInt128 = Args.hasArg(OPT_fforce_enable_int128);
Opts.NVPTXUseShortPointers = Args.hasFlag(
@@ -3496,9 +3664,35 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
}
}
+bool CompilerInvocation::parseSimpleArgs(const ArgList &Args,
+ DiagnosticsEngine &Diags) {
+#define OPTION_WITH_MARSHALLING_FLAG(PREFIX_TYPE, NAME, ID, KIND, GROUP, \
+ ALIAS, ALIASARGS, FLAGS, PARAM, HELPTEXT, \
+ METAVAR, VALUES, SPELLING, ALWAYS_EMIT, \
+ KEYPATH, DEFAULT_VALUE, IS_POSITIVE) \
+ this->KEYPATH = Args.hasArg(OPT_##ID) && IS_POSITIVE;
+
+#define OPTION_WITH_MARSHALLING_STRING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ TYPE, NORMALIZER, DENORMALIZER, TABLE_INDEX) \
+ { \
+ if (auto MaybeValue = NORMALIZER(OPT_##ID, TABLE_INDEX, Args, Diags)) \
+ this->KEYPATH = static_cast<TYPE>(*MaybeValue); \
+ else \
+ this->KEYPATH = DEFAULT_VALUE; \
+ }
+
+#include "clang/Driver/Options.inc"
+#undef OPTION_WITH_MARSHALLING_STRING
+#undef OPTION_WITH_MARSHALLING_FLAG
+ return true;
+}
+
bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
ArrayRef<const char *> CommandLineArgs,
- DiagnosticsEngine &Diags) {
+ DiagnosticsEngine &Diags,
+ const char *Argv0) {
bool Success = true;
// Parse the arguments.
@@ -3528,6 +3722,11 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Success = false;
}
+ Success &= Res.parseSimpleArgs(Args, Diags);
+
+ llvm::sys::Process::UseANSIEscapeCodes(
+ Res.DiagnosticOpts->UseANSIEscapeCodes);
+
Success &= ParseAnalyzerArgs(*Res.getAnalyzerOpts(), Args, Diags);
Success &= ParseMigratorArgs(Res.getMigratorOpts(), Args);
ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), Args);
@@ -3536,9 +3735,8 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Diags.Report(diag::err_fe_dependency_file_requires_MT);
Success = false;
}
- Success &=
- ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
- false /*DefaultDiagColor*/, false /*DefaultShowOpt*/);
+ Success &= ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
+ /*DefaultDiagColor=*/false);
ParseCommentArgs(LangOpts.CommentOpts, Args);
ParseFileSystemArgs(Res.getFileSystemOpts(), Args);
// FIXME: We shouldn't have to pass the DashX option around here
@@ -3619,6 +3817,11 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Res.getCodeGenOpts().FineGrainedBitfieldAccesses = false;
Diags.Report(diag::warn_drv_fine_grained_bitfield_accesses_ignored);
}
+
+ // Store the command-line for using in the CodeView backend.
+ Res.getCodeGenOpts().Argv0 = Argv0;
+ Res.getCodeGenOpts().CommandLineArgs = CommandLineArgs;
+
return Success;
}
@@ -3635,6 +3838,11 @@ std::string CompilerInvocation::getModuleHash() const {
// CityHash, but this will do for now.
hash_code code = hash_value(getClangFullRepositoryVersion());
+ // Also include the serialization version, in case LLVM_APPEND_VC_REV is off
+ // and getClangFullRepositoryVersion() doesn't include git revision.
+ code = hash_combine(code, serialization::VERSION_MAJOR,
+ serialization::VERSION_MINOR);
+
// Extend the signature with the language options
#define LANGOPT(Name, Bits, Default, Description) \
code = hash_combine(code, LangOpts->Name);
@@ -3647,6 +3855,10 @@ std::string CompilerInvocation::getModuleHash() const {
for (StringRef Feature : LangOpts->ModuleFeatures)
code = hash_combine(code, Feature);
+ code = hash_combine(code, LangOpts->ObjCRuntime);
+ const auto &BCN = LangOpts->CommentOpts.BlockCommandNames;
+ code = hash_combine(code, hash_combine_range(BCN.begin(), BCN.end()));
+
// Extend the signature with the target options.
code = hash_combine(code, TargetOpts->Triple, TargetOpts->CPU,
TargetOpts->ABI);
@@ -3726,6 +3938,33 @@ std::string CompilerInvocation::getModuleHash() const {
return llvm::APInt(64, code).toString(36, /*Signed=*/false);
}
+void CompilerInvocation::generateCC1CommandLine(
+ SmallVectorImpl<const char *> &Args, StringAllocator SA) const {
+#define OPTION_WITH_MARSHALLING_FLAG(PREFIX_TYPE, NAME, ID, KIND, GROUP, \
+ ALIAS, ALIASARGS, FLAGS, PARAM, HELPTEXT, \
+ METAVAR, VALUES, SPELLING, ALWAYS_EMIT, \
+ KEYPATH, DEFAULT_VALUE, IS_POSITIVE) \
+ if ((FLAGS) & options::CC1Option && \
+ (ALWAYS_EMIT || this->KEYPATH != DEFAULT_VALUE)) \
+ Args.push_back(SPELLING);
+
+#define OPTION_WITH_MARSHALLING_STRING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ NORMALIZER_RET_TY, NORMALIZER, DENORMALIZER, TABLE_INDEX) \
+ if (((FLAGS) & options::CC1Option) && \
+ (ALWAYS_EMIT || this->KEYPATH != DEFAULT_VALUE)) { \
+ if (Option::KIND##Class == Option::SeparateClass) { \
+ Args.push_back(SPELLING); \
+ Args.push_back(DENORMALIZER(SA, TABLE_INDEX, this->KEYPATH)); \
+ } \
+ }
+
+#include "clang/Driver/Options.inc"
+#undef OPTION_WITH_MARSHALLING_STRING
+#undef OPTION_WITH_MARSHALLING_FLAG
+}
+
namespace clang {
IntrusiveRefCntPtr<llvm::vfs::FileSystem>
diff --git a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index 18c4814bbd5c..1d5a6c06b34f 100644
--- a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -93,7 +93,7 @@ std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
if (CC1Args)
*CC1Args = {CCArgs.begin(), CCArgs.end()};
auto CI = std::make_unique<CompilerInvocation>();
- if (!CompilerInvocation::CreateFromArgs(*CI, CCArgs, *Diags) &&
+ if (!CompilerInvocation::CreateFromArgs(*CI, CCArgs, *Diags, Args[0]) &&
!ShouldRecoverOnErorrs)
return nullptr;
return CI;
diff --git a/clang/lib/Frontend/DependencyFile.cpp b/clang/lib/Frontend/DependencyFile.cpp
index 4bb0167bd597..c9240f4122a7 100644
--- a/clang/lib/Frontend/DependencyFile.cpp
+++ b/clang/lib/Frontend/DependencyFile.cpp
@@ -137,16 +137,17 @@ struct DepCollectorASTListener : public ASTReaderListener {
};
} // end anonymous namespace
-void DependencyCollector::maybeAddDependency(StringRef Filename, bool FromModule,
- bool IsSystem, bool IsModuleFile,
- bool IsMissing) {
+void DependencyCollector::maybeAddDependency(StringRef Filename,
+ bool FromModule, bool IsSystem,
+ bool IsModuleFile,
+ bool IsMissing) {
if (sawDependency(Filename, FromModule, IsSystem, IsModuleFile, IsMissing))
addDependency(Filename);
}
bool DependencyCollector::addDependency(StringRef Filename) {
if (Seen.insert(Filename).second) {
- Dependencies.push_back(Filename);
+ Dependencies.push_back(std::string(Filename));
return true;
}
return false;
@@ -160,8 +161,8 @@ static bool isSpecialFilename(StringRef Filename) {
}
bool DependencyCollector::sawDependency(StringRef Filename, bool FromModule,
- bool IsSystem, bool IsModuleFile,
- bool IsMissing) {
+ bool IsSystem, bool IsModuleFile,
+ bool IsMissing) {
return !isSpecialFilename(Filename) &&
(needSystemDependencies() || !IsSystem);
}
diff --git a/clang/lib/Frontend/DependencyGraph.cpp b/clang/lib/Frontend/DependencyGraph.cpp
index ccf7a2785510..8a6e491def45 100644
--- a/clang/lib/Frontend/DependencyGraph.cpp
+++ b/clang/lib/Frontend/DependencyGraph.cpp
@@ -119,8 +119,7 @@ void DependencyGraphCallback::OutputGraphFile() {
if (FileName.startswith(SysRoot))
FileName = FileName.substr(SysRoot.size());
- OS << DOT::EscapeString(FileName)
- << "\"];\n";
+ OS << DOT::EscapeString(std::string(FileName)) << "\"];\n";
}
// Write the edges
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index 934d17b3c925..59a968b5c709 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -157,10 +157,9 @@ FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
bool FoundAllPlugins = true;
for (const std::string &Arg : CI.getFrontendOpts().AddPluginActions) {
bool Found = false;
- for (FrontendPluginRegistry::iterator it = FrontendPluginRegistry::begin(),
- ie = FrontendPluginRegistry::end();
- it != ie; ++it) {
- if (it->getName() == Arg)
+ for (const FrontendPluginRegistry::entry &Plugin :
+ FrontendPluginRegistry::entries()) {
+ if (Plugin.getName() == Arg)
Found = true;
}
if (!Found) {
@@ -183,25 +182,24 @@ FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
// or after it (in AfterConsumers)
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
std::vector<std::unique_ptr<ASTConsumer>> AfterConsumers;
- for (FrontendPluginRegistry::iterator it = FrontendPluginRegistry::begin(),
- ie = FrontendPluginRegistry::end();
- it != ie; ++it) {
- std::unique_ptr<PluginASTAction> P = it->instantiate();
+ for (const FrontendPluginRegistry::entry &Plugin :
+ FrontendPluginRegistry::entries()) {
+ std::unique_ptr<PluginASTAction> P = Plugin.instantiate();
PluginASTAction::ActionType ActionType = P->getActionType();
if (ActionType == PluginASTAction::Cmdline) {
// This is O(|plugins| * |add_plugins|), but since both numbers are
// way below 50 in practice, that's ok.
- for (size_t i = 0, e = CI.getFrontendOpts().AddPluginActions.size();
- i != e; ++i) {
- if (it->getName() == CI.getFrontendOpts().AddPluginActions[i]) {
- ActionType = PluginASTAction::AddAfterMainAction;
- break;
- }
- }
+ if (llvm::any_of(CI.getFrontendOpts().AddPluginActions,
+ [&](const std::string &PluginAction) {
+ return PluginAction == Plugin.getName();
+ }))
+ ActionType = PluginASTAction::AddAfterMainAction;
}
if ((ActionType == PluginASTAction::AddBeforeMainAction ||
ActionType == PluginASTAction::AddAfterMainAction) &&
- P->ParseArgs(CI, CI.getFrontendOpts().PluginArgs[it->getName()])) {
+ P->ParseArgs(
+ CI,
+ CI.getFrontendOpts().PluginArgs[std::string(Plugin.getName())])) {
std::unique_ptr<ASTConsumer> PluginConsumer = P->CreateASTConsumer(CI, InFile);
if (ActionType == PluginASTAction::AddBeforeMainAction) {
Consumers.push_back(std::move(PluginConsumer));
@@ -363,6 +361,7 @@ static std::error_code collectModuleHeaderIncludes(
llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
+ SmallVector<std::pair<std::string, const FileEntry *>, 8> Headers;
for (llvm::vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
Dir != End && !EC; Dir.increment(EC)) {
// Check whether this entry has an extension typically associated with
@@ -393,13 +392,25 @@ static std::error_code collectModuleHeaderIncludes(
++It)
llvm::sys::path::append(RelativeHeader, *It);
- // Include this header as part of the umbrella directory.
- Module->addTopHeader(*Header);
- addHeaderInclude(RelativeHeader, Includes, LangOpts, Module->IsExternC);
+ std::string RelName = RelativeHeader.c_str();
+ Headers.push_back(std::make_pair(RelName, *Header));
}
if (EC)
return EC;
+
+ // Sort header paths and make the header inclusion order deterministic
+ // across different OSs and filesystems.
+ llvm::sort(Headers.begin(), Headers.end(), [](
+ const std::pair<std::string, const FileEntry *> &LHS,
+ const std::pair<std::string, const FileEntry *> &RHS) {
+ return LHS.first < RHS.first;
+ });
+ for (auto &H : Headers) {
+ // Include this header as part of the umbrella directory.
+ Module->addTopHeader(H.second);
+ addHeaderInclude(H.first, Includes, LangOpts, Module->IsExternC);
+ }
}
// Recurse into submodules.
@@ -564,8 +575,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
StringRef InputFile = Input.getFile();
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
- InputFile, CI.getPCHContainerReader(), ASTUnit::LoadPreprocessorOnly,
- ASTDiags, CI.getFileSystemOpts(), CI.getCodeGenOpts().DebugTypeExtRefs);
+ std::string(InputFile), CI.getPCHContainerReader(),
+ ASTUnit::LoadPreprocessorOnly, ASTDiags, CI.getFileSystemOpts(),
+ CI.getCodeGenOpts().DebugTypeExtRefs);
if (!AST)
goto failure;
@@ -592,10 +604,11 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (&MF != &PrimaryModule)
CI.getFrontendOpts().ModuleFiles.push_back(MF.FileName);
- ASTReader->visitTopLevelModuleMaps(PrimaryModule,
- [&](const FileEntry *FE) {
- CI.getFrontendOpts().ModuleMapFiles.push_back(FE->getName());
- });
+ ASTReader->visitTopLevelModuleMaps(
+ PrimaryModule, [&](const FileEntry *FE) {
+ CI.getFrontendOpts().ModuleMapFiles.push_back(
+ std::string(FE->getName()));
+ });
}
// Set up the input file for replay purposes.
@@ -630,8 +643,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
StringRef InputFile = Input.getFile();
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
- InputFile, CI.getPCHContainerReader(), ASTUnit::LoadEverything, Diags,
- CI.getFileSystemOpts(), CI.getCodeGenOpts().DebugTypeExtRefs);
+ std::string(InputFile), CI.getPCHContainerReader(),
+ ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts(),
+ CI.getCodeGenOpts().DebugTypeExtRefs);
if (!AST)
goto failure;
@@ -725,7 +739,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
Dir->path(), FileMgr, CI.getPCHContainerReader(),
CI.getLangOpts(), CI.getTargetOpts(), CI.getPreprocessorOpts(),
SpecificModuleCachePath)) {
- PPOpts.ImplicitPCHInclude = Dir->path();
+ PPOpts.ImplicitPCHInclude = std::string(Dir->path());
Found = true;
break;
}
@@ -783,7 +797,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
auto Kind = CurrentModule->IsSystem ? SrcMgr::C_System : SrcMgr::C_User;
auto &SourceMgr = CI.getSourceManager();
auto BufferID = SourceMgr.createFileID(std::move(Buffer), Kind);
- assert(BufferID.isValid() && "couldn't creaate module buffer ID");
+ assert(BufferID.isValid() && "couldn't create module buffer ID");
SourceMgr.setMainFileID(BufferID);
}
}
@@ -817,7 +831,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// For preprocessed files, check if the first line specifies the original
// source file name with a linemarker.
- std::string PresumedInputFile = getCurrentFileOrBufferName();
+ std::string PresumedInputFile = std::string(getCurrentFileOrBufferName());
if (Input.isPreprocessed())
ReadOriginalFileName(CI, PresumedInputFile);
@@ -836,7 +850,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
source = createChainedIncludesSource(CI, FinalReader);
if (!source)
goto failure;
- CI.setModuleManager(static_cast<ASTReader *>(FinalReader.get()));
+ CI.setASTReader(static_cast<ASTReader *>(FinalReader.get()));
CI.getASTContext().setExternalSource(source);
} else if (CI.getLangOpts().Modules ||
!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
@@ -866,7 +880,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!CI.getASTContext().getExternalSource())
goto failure;
}
- // If modules are enabled, create the module manager before creating
+ // If modules are enabled, create the AST reader before creating
// any builtins, so that all declarations know that they might be
// extended by an external source.
if (CI.getLangOpts().Modules || !CI.hasASTContext() ||
@@ -1077,6 +1091,9 @@ void WrapperFrontendAction::ExecuteAction() {
void WrapperFrontendAction::EndSourceFileAction() {
WrappedAction->EndSourceFileAction();
}
+bool WrapperFrontendAction::shouldEraseOutputFiles() {
+ return WrappedAction->shouldEraseOutputFiles();
+}
bool WrapperFrontendAction::usesPreprocessorOnly() const {
return WrappedAction->usesPreprocessorOnly();
diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp
index 8574d0a7e813..711e7336c820 100644
--- a/clang/lib/Frontend/FrontendActions.cpp
+++ b/clang/lib/Frontend/FrontendActions.cpp
@@ -9,6 +9,7 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/LangStandard.h"
#include "clang/Frontend/ASTConsumers.h"
#include "clang/Frontend/CompilerInstance.h"
@@ -78,7 +79,8 @@ ASTDumpAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
const FrontendOptions &Opts = CI.getFrontendOpts();
return CreateASTDumper(nullptr /*Dump to stdout.*/, Opts.ASTDumpFilter,
Opts.ASTDumpDecls, Opts.ASTDumpAll,
- Opts.ASTDumpLookups, Opts.ASTDumpFormat);
+ Opts.ASTDumpLookups, Opts.ASTDumpDeclTypes,
+ Opts.ASTDumpFormat);
}
std::unique_ptr<ASTConsumer>
@@ -115,7 +117,7 @@ GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
FrontendOpts.IncludeTimestamps, +CI.getLangOpts().CacheGeneratedPCH));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
- CI, InFile, OutputFile, std::move(OS), Buffer));
+ CI, std::string(InFile), OutputFile, std::move(OS), Buffer));
return std::make_unique<MultiplexConsumer>(std::move(Consumers));
}
@@ -181,7 +183,7 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
/*ShouldCacheASTInMemory=*/
+CI.getFrontendOpts().BuildingImplicitModule));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
- CI, InFile, OutputFile, std::move(OS), Buffer));
+ CI, std::string(InFile), OutputFile, std::move(OS), Buffer));
return std::make_unique<MultiplexConsumer>(std::move(Consumers));
}
@@ -266,7 +268,7 @@ bool GenerateHeaderModuleAction::PrepareToExecuteAction(
HeaderContents += "#include \"";
HeaderContents += FIF.getFile();
HeaderContents += "\"\n";
- ModuleHeaders.push_back(FIF.getFile());
+ ModuleHeaders.push_back(std::string(FIF.getFile()));
}
Buffer = llvm::MemoryBuffer::getMemBufferCopy(
HeaderContents, Module::getModuleInputBufferName());
@@ -295,7 +297,7 @@ bool GenerateHeaderModuleAction::BeginSourceFileAction(
<< Name;
continue;
}
- Headers.push_back({Name, &FE->getFileEntry()});
+ Headers.push_back({std::string(Name), &FE->getFileEntry()});
}
HS.getModuleMap().createHeaderModule(CI.getLangOpts().CurrentModule, Headers);
@@ -429,6 +431,14 @@ private:
return "ConstraintNormalization";
case CodeSynthesisContext::ParameterMappingSubstitution:
return "ParameterMappingSubstitution";
+ case CodeSynthesisContext::RequirementInstantiation:
+ return "RequirementInstantiation";
+ case CodeSynthesisContext::NestedRequirementConstraintsCheck:
+ return "NestedRequirementConstraintsCheck";
+ case CodeSynthesisContext::InitializingStructuredBinding:
+ return "InitializingStructuredBinding";
+ case CodeSynthesisContext::MarkingClassDllexported:
+ return "MarkingClassDllexported";
}
return "";
}
diff --git a/clang/lib/Frontend/FrontendOptions.cpp b/clang/lib/Frontend/FrontendOptions.cpp
index 5c1fbf889c23..9f080db733f1 100644
--- a/clang/lib/Frontend/FrontendOptions.cpp
+++ b/clang/lib/Frontend/FrontendOptions.cpp
@@ -25,11 +25,12 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
.Cases("mm", "M", Language::ObjCXX)
.Case("mii", InputKind(Language::ObjCXX).getPreprocessed())
.Cases("C", "cc", "cp", Language::CXX)
- .Cases("cpp", "CPP", "c++", "cxx", "hpp", Language::CXX)
+ .Cases("cpp", "CPP", "c++", "cxx", "hpp", "hxx", Language::CXX)
.Case("cppm", Language::CXX)
.Case("iim", InputKind(Language::CXX).getPreprocessed())
.Case("cl", Language::OpenCL)
.Case("cu", Language::CUDA)
+ .Case("hip", Language::HIP)
.Cases("ll", "bc", Language::LLVM_IR)
.Default(Language::Unknown);
}
diff --git a/clang/lib/Frontend/HeaderIncludeGen.cpp b/clang/lib/Frontend/HeaderIncludeGen.cpp
index 5f91157816b0..97fac8a26fae 100644
--- a/clang/lib/Frontend/HeaderIncludeGen.cpp
+++ b/clang/lib/Frontend/HeaderIncludeGen.cpp
@@ -127,8 +127,8 @@ void clang::AttachHeaderIncludeGen(Preprocessor &PP,
void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
FileChangeReason Reason,
- SrcMgr::CharacteristicKind NewFileType,
- FileID PrevFID) {
+ SrcMgr::CharacteristicKind NewFileType,
+ FileID PrevFID) {
// Unless we are exiting a #include, make sure to skip ahead to the line the
// #include directive was at.
PresumedLoc UserLoc = SM.getPresumedLoc(Loc);
@@ -167,6 +167,9 @@ void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
else if (!DepOpts.ShowIncludesPretendHeader.empty())
++IncludeDepth; // Pretend inclusion by ShowIncludesPretendHeader.
+ if (!DepOpts.IncludeSystemHeaders && isSystem(NewFileType))
+ ShowHeader = false;
+
// Dump the header include information we are past the predefines buffer or
// are showing all headers and this isn't the magic implicit <command line>
// header.
diff --git a/clang/lib/Frontend/InitHeaderSearch.cpp b/clang/lib/Frontend/InitHeaderSearch.cpp
index 5d877ee9c0d7..16f1f1670e8d 100644
--- a/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -47,11 +47,9 @@ class InitHeaderSearch {
bool HasSysroot;
public:
-
InitHeaderSearch(HeaderSearch &HS, bool verbose, StringRef sysroot)
- : Headers(HS), Verbose(verbose), IncludeSysroot(sysroot),
- HasSysroot(!(sysroot.empty() || sysroot == "/")) {
- }
+ : Headers(HS), Verbose(verbose), IncludeSysroot(std::string(sysroot)),
+ HasSysroot(!(sysroot.empty() || sysroot == "/")) {}
/// AddPath - Add the specified path to the specified group list, prefixing
/// the sysroot if used.
@@ -67,7 +65,7 @@ public:
/// AddSystemHeaderPrefix - Add the specified prefix to the system header
/// prefix list.
void AddSystemHeaderPrefix(StringRef Prefix, bool IsSystemHeader) {
- SystemHeaderPrefixes.emplace_back(Prefix, IsSystemHeader);
+ SystemHeaderPrefixes.emplace_back(std::string(Prefix), IsSystemHeader);
}
/// AddGnuCPlusPlusIncludePaths - Add the necessary paths to support a gnu
@@ -355,7 +353,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
// files is <SDK_DIR>/host_tools/lib/clang
SmallString<128> P = StringRef(HSOpts.ResourceDir);
llvm::sys::path::append(P, "../../..");
- BaseSDKPath = P.str();
+ BaseSDKPath = std::string(P.str());
}
}
AddPath(BaseSDKPath + "/target/include", System, false);
@@ -383,6 +381,7 @@ void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(
case llvm::Triple::Linux:
case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
+ case llvm::Triple::AIX:
llvm_unreachable("Include management is handled in the driver.");
break;
case llvm::Triple::Win32:
@@ -426,6 +425,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
case llvm::Triple::WASI:
+ case llvm::Triple::AIX:
return;
case llvm::Triple::Win32:
@@ -435,8 +435,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
break;
case llvm::Triple::UnknownOS:
- if (triple.getArch() == llvm::Triple::wasm32 ||
- triple.getArch() == llvm::Triple::wasm64)
+ if (triple.isWasm())
return;
break;
}
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index 2c7e3a56c043..6eef1e2376f6 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -80,9 +80,9 @@ static void AddImplicitIncludeMacros(MacroBuilder &Builder, StringRef File) {
static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
const PCHContainerReader &PCHContainerRdr,
StringRef ImplicitIncludePCH) {
- std::string OriginalFile =
- ASTReader::getOriginalSourceFile(ImplicitIncludePCH, PP.getFileManager(),
- PCHContainerRdr, PP.getDiagnostics());
+ std::string OriginalFile = ASTReader::getOriginalSourceFile(
+ std::string(ImplicitIncludePCH), PP.getFileManager(), PCHContainerRdr,
+ PP.getDiagnostics());
if (OriginalFile.empty())
return;
@@ -344,13 +344,27 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
MacroBuilder &Builder) {
+ // C++ [cpp.predefined]p1:
+ // The following macro names shall be defined by the implementation:
+
+ // -- __STDC__
+ // [C++] Whether __STDC__ is predefined and if so, what its value is,
+ // are implementation-defined.
+ // (Removed in C++20.)
if (!LangOpts.MSVCCompat && !LangOpts.TraditionalCPP)
Builder.defineMacro("__STDC__");
+ // -- __STDC_HOSTED__
+ // The integer literal 1 if the implementation is a hosted
+ // implementation or the integer literal 0 if it is not.
if (LangOpts.Freestanding)
Builder.defineMacro("__STDC_HOSTED__", "0");
else
Builder.defineMacro("__STDC_HOSTED__");
+ // -- __STDC_VERSION__
+ // [C++] Whether __STDC_VERSION__ is predefined and if so, what its
+ // value is, are implementation-defined.
+ // (Removed in C++20.)
if (!LangOpts.CPlusPlus) {
if (LangOpts.C17)
Builder.defineMacro("__STDC_VERSION__", "201710L");
@@ -361,33 +375,29 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
else if (!LangOpts.GNUMode && LangOpts.Digraphs)
Builder.defineMacro("__STDC_VERSION__", "199409L");
} else {
- // FIXME: Use correct value for C++20.
- if (LangOpts.CPlusPlus2a)
- Builder.defineMacro("__cplusplus", "201707L");
- // C++17 [cpp.predefined]p1:
- // The name __cplusplus is defined to the value 201703L when compiling a
- // C++ translation unit.
+ // -- __cplusplus
+ // [C++20] The integer literal 202002L.
+ if (LangOpts.CPlusPlus20)
+ Builder.defineMacro("__cplusplus", "202002L");
+ // [C++17] The integer literal 201703L.
else if (LangOpts.CPlusPlus17)
Builder.defineMacro("__cplusplus", "201703L");
- // C++1y [cpp.predefined]p1:
- // The name __cplusplus is defined to the value 201402L when compiling a
- // C++ translation unit.
+ // [C++14] The name __cplusplus is defined to the value 201402L when
+ // compiling a C++ translation unit.
else if (LangOpts.CPlusPlus14)
Builder.defineMacro("__cplusplus", "201402L");
- // C++11 [cpp.predefined]p1:
- // The name __cplusplus is defined to the value 201103L when compiling a
- // C++ translation unit.
+ // [C++11] The name __cplusplus is defined to the value 201103L when
+ // compiling a C++ translation unit.
else if (LangOpts.CPlusPlus11)
Builder.defineMacro("__cplusplus", "201103L");
- // C++03 [cpp.predefined]p1:
- // The name __cplusplus is defined to the value 199711L when compiling a
- // C++ translation unit.
+ // [C++03] The name __cplusplus is defined to the value 199711L when
+ // compiling a C++ translation unit.
else
Builder.defineMacro("__cplusplus", "199711L");
- // C++1z [cpp.predefined]p1:
- // An integer literal of type std::size_t whose value is the alignment
- // guaranteed by a call to operator new(std::size_t)
+ // -- __STDCPP_DEFAULT_NEW_ALIGNMENT__
+ // [C++17] An integer literal of type std::size_t whose value is the
+ // alignment guaranteed by a call to operator new(std::size_t)
//
// We provide this in all language modes, since it seems generally useful.
Builder.defineMacro("__STDCPP_DEFAULT_NEW_ALIGNMENT__",
@@ -450,6 +460,13 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
if (LangOpts.FastRelaxedMath)
Builder.defineMacro("__FAST_RELAXED_MATH__");
}
+
+ if (LangOpts.SYCL) {
+ // SYCL Version is set to a value when building SYCL applications
+ if (LangOpts.SYCLVersion == 2017)
+ Builder.defineMacro("CL_SYCL_LANGUAGE_VERSION", "121");
+ }
+
// Not "standard" per se, but available even with the -undef flag.
if (LangOpts.AsmPreprocessor)
Builder.defineMacro("__ASSEMBLER__");
@@ -481,7 +498,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_user_defined_literals", "200809L");
Builder.defineMacro("__cpp_lambdas", "200907L");
Builder.defineMacro("__cpp_constexpr",
- LangOpts.CPlusPlus2a ? "201907L" :
+ LangOpts.CPlusPlus20 ? "201907L" :
LangOpts.CPlusPlus17 ? "201603L" :
LangOpts.CPlusPlus14 ? "201304L" : "200704");
Builder.defineMacro("__cpp_constexpr_in_decltype", "201711L");
@@ -508,9 +525,9 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_binary_literals", "201304L");
Builder.defineMacro("__cpp_digit_separators", "201309L");
Builder.defineMacro("__cpp_init_captures",
- LangOpts.CPlusPlus2a ? "201803L" : "201304L");
+ LangOpts.CPlusPlus20 ? "201803L" : "201304L");
Builder.defineMacro("__cpp_generic_lambdas",
- LangOpts.CPlusPlus2a ? "201707L" : "201304L");
+ LangOpts.CPlusPlus20 ? "201707L" : "201304L");
Builder.defineMacro("__cpp_decltype_auto", "201304L");
Builder.defineMacro("__cpp_return_type_deduction", "201304L");
Builder.defineMacro("__cpp_aggregate_nsdmi", "201304L");
@@ -546,9 +563,9 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_template_template_args", "201611L");
// C++20 features.
- if (LangOpts.CPlusPlus2a) {
+ if (LangOpts.CPlusPlus20) {
//Builder.defineMacro("__cpp_aggregate_paren_init", "201902L");
- //Builder.defineMacro("__cpp_concepts", "201907L");
+ Builder.defineMacro("__cpp_concepts", "201907L");
Builder.defineMacro("__cpp_conditional_explicit", "201806L");
//Builder.defineMacro("__cpp_consteval", "201811L");
Builder.defineMacro("__cpp_constexpr_dynamic_alloc", "201907L");
@@ -564,8 +581,6 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_impl_destroying_delete", "201806L");
// TS features.
- if (LangOpts.ConceptsTS)
- Builder.defineMacro("__cpp_experimental_concepts", "1L");
if (LangOpts.Coroutines)
Builder.defineMacro("__cpp_coroutines", "201703L");
}
@@ -1061,12 +1076,12 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
case 40:
Builder.defineMacro("_OPENMP", "201307");
break;
- case 50:
- Builder.defineMacro("_OPENMP", "201811");
+ case 45:
+ Builder.defineMacro("_OPENMP", "201511");
break;
default:
- // Default version is OpenMP 4.5
- Builder.defineMacro("_OPENMP", "201511");
+ // Default version is OpenMP 5.0
+ Builder.defineMacro("_OPENMP", "201811");
break;
}
}
diff --git a/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp b/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
index 7241081d6cc0..b7c1e693413b 100644
--- a/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
+++ b/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
@@ -8,6 +8,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Sema/TemplateInstCallback.h"
@@ -289,7 +290,7 @@ public:
const ASTContext &context, StringRef Format,
raw_ostream &OS) -> void {
OS << "--- !" << Format << "\n";
- OS << "IfsVersion: 1.0\n";
+ OS << "IfsVersion: 2.0\n";
OS << "Triple: " << T.str() << "\n";
OS << "ObjectFileFormat: "
<< "ELF"
@@ -298,11 +299,11 @@ public:
for (const auto &E : Symbols) {
const MangledSymbol &Symbol = E.second;
for (auto Name : Symbol.Names) {
- OS << " \""
+ OS << " - { Name: \""
<< (Symbol.ParentName.empty() || Instance.getLangOpts().CPlusPlus
? ""
: (Symbol.ParentName + "."))
- << Name << "\" : { Type: ";
+ << Name << "\", Type: ";
switch (Symbol.Type) {
default:
llvm_unreachable(
@@ -329,15 +330,15 @@ public:
OS.flush();
};
- assert(Format == "experimental-ifs-v1" && "Unexpected IFS Format.");
+ assert(Format == "experimental-ifs-v2" && "Unexpected IFS Format.");
writeIfsV1(Instance.getTarget().getTriple(), Symbols, context, Format, *OS);
}
};
} // namespace
std::unique_ptr<ASTConsumer>
-GenerateInterfaceIfsExpV1Action::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
+GenerateInterfaceStubsAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
return std::make_unique<InterfaceStubFunctionsConsumer>(
- CI, InFile, "experimental-ifs-v1");
+ CI, InFile, "experimental-ifs-v2");
}
diff --git a/clang/lib/Frontend/LogDiagnosticPrinter.cpp b/clang/lib/Frontend/LogDiagnosticPrinter.cpp
index 4bac17553999..df8b23691a7d 100644
--- a/clang/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/clang/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -120,7 +120,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
if (FID.isValid()) {
const FileEntry *FE = SM.getFileEntryForID(FID);
if (FE && FE->isValid())
- MainFilename = FE->getName();
+ MainFilename = std::string(FE->getName());
}
}
@@ -129,12 +129,13 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
DE.DiagnosticID = Info.getID();
DE.DiagnosticLevel = Level;
- DE.WarningOption = DiagnosticIDs::getWarningOptionForDiag(DE.DiagnosticID);
+ DE.WarningOption =
+ std::string(DiagnosticIDs::getWarningOptionForDiag(DE.DiagnosticID));
// Format the message.
SmallString<100> MessageStr;
Info.FormatDiagnostic(MessageStr);
- DE.Message = MessageStr.str();
+ DE.Message = std::string(MessageStr.str());
// Set the location information.
DE.Filename = "";
@@ -149,7 +150,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
if (FID.isValid()) {
const FileEntry *FE = SM.getFileEntryForID(FID);
if (FE && FE->isValid())
- DE.Filename = FE->getName();
+ DE.Filename = std::string(FE->getName());
}
} else {
DE.Filename = PLoc.getFilename();
diff --git a/clang/lib/Frontend/ModuleDependencyCollector.cpp b/clang/lib/Frontend/ModuleDependencyCollector.cpp
index fd22433d31bd..b54eb97d6c47 100644
--- a/clang/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/clang/lib/Frontend/ModuleDependencyCollector.cpp
@@ -170,7 +170,7 @@ bool ModuleDependencyCollector::getRealPath(StringRef SrcPath,
if (DirWithSymLink == SymLinkMap.end()) {
if (llvm::sys::fs::real_path(Dir, RealPath))
return false;
- SymLinkMap[Dir] = RealPath.str();
+ SymLinkMap[Dir] = std::string(RealPath.str());
} else {
RealPath = DirWithSymLink->second;
}
diff --git a/clang/lib/Frontend/PrecompiledPreamble.cpp b/clang/lib/Frontend/PrecompiledPreamble.cpp
index 0e5a8e504dc5..6cdfc595dcae 100644
--- a/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -12,21 +12,26 @@
#include "clang/Frontend/PrecompiledPreamble.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangStandard.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTWriter.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <limits>
@@ -73,6 +78,68 @@ public:
bool needSystemDependencies() override { return true; }
};
+// Collects files whose existence would invalidate the preamble.
+// Collecting *all* of these would make validating it too slow though, so we
+// just find all the candidates for 'file not found' diagnostics.
+//
+// A caveat that may be significant for generated files: we'll omit files under
+// search path entries whose roots don't exist when the preamble is built.
+// These are pruned by InitHeaderSearch and so we don't see the search path.
+// It would be nice to include them but we don't want to duplicate all the rest
+// of the InitHeaderSearch logic to reconstruct them.
+class MissingFileCollector : public PPCallbacks {
+ llvm::StringSet<> &Out;
+ const HeaderSearch &Search;
+ const SourceManager &SM;
+
+public:
+ MissingFileCollector(llvm::StringSet<> &Out, const HeaderSearch &Search,
+ const SourceManager &SM)
+ : Out(Out), Search(Search), SM(SM) {}
+
+ void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+ StringRef FileName, bool IsAngled,
+ CharSourceRange FilenameRange, const FileEntry *File,
+ StringRef SearchPath, StringRef RelativePath,
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) override {
+ // File is null if it wasn't found.
+ // (We have some false negatives if PP recovered e.g. <foo> -> "foo")
+ if (File != nullptr)
+ return;
+
+ // If it's a rare absolute include, we know the full path already.
+ if (llvm::sys::path::is_absolute(FileName)) {
+ Out.insert(FileName);
+ return;
+ }
+
+ // Reconstruct the filenames that would satisfy this directive...
+ llvm::SmallString<256> Buf;
+ auto NotFoundRelativeTo = [&](const DirectoryEntry *DE) {
+ Buf = DE->getName();
+ llvm::sys::path::append(Buf, FileName);
+ llvm::sys::path::remove_dots(Buf, /*remove_dot_dot=*/true);
+ Out.insert(Buf);
+ };
+ // ...relative to the including file.
+ if (!IsAngled) {
+ if (const FileEntry *IncludingFile =
+ SM.getFileEntryForID(SM.getFileID(IncludeTok.getLocation())))
+ if (IncludingFile->getDir())
+ NotFoundRelativeTo(IncludingFile->getDir());
+ }
+ // ...relative to the search paths.
+ for (const auto &Dir : llvm::make_range(
+ IsAngled ? Search.angled_dir_begin() : Search.search_dir_begin(),
+ Search.search_dir_end())) {
+ // No support for frameworks or header maps yet.
+ if (Dir.isNormalDir())
+ NotFoundRelativeTo(Dir.getDir());
+ }
+ }
+};
+
/// Keeps a track of files to be deleted in destructor.
class TemporaryFiles {
public:
@@ -188,6 +255,10 @@ public:
Action.setEmittedPreamblePCH(getWriter());
}
+ bool shouldSkipFunctionBody(Decl *D) override {
+ return Action.Callbacks.shouldSkipFunctionBody(D);
+ }
+
private:
PrecompilePreambleAction &Action;
std::unique_ptr<raw_ostream> Out;
@@ -227,7 +298,7 @@ template <class T> bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
} // namespace
PreambleBounds clang::ComputePreambleBounds(const LangOptions &LangOpts,
- llvm::MemoryBuffer *Buffer,
+ const llvm::MemoryBuffer *Buffer,
unsigned MaxLines) {
return Lexer::ComputePreamble(Buffer->getBuffer(), LangOpts, MaxLines);
}
@@ -269,8 +340,9 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
// Tell the compiler invocation to generate a temporary precompiled header.
FrontendOpts.ProgramAction = frontend::GeneratePCH;
- FrontendOpts.OutputFile = StoreInMemory ? getInMemoryPreamblePath()
- : Storage.asFile().getFilePath();
+ FrontendOpts.OutputFile =
+ std::string(StoreInMemory ? getInMemoryPreamblePath()
+ : Storage.asFile().getFilePath());
PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
// Inform preprocessor to record conditional stack when building the preamble.
@@ -351,6 +423,11 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
Clang->getPreprocessor().addPPCallbacks(std::move(DelegatedPPCallbacks));
if (auto CommentHandler = Callbacks.getCommentHandler())
Clang->getPreprocessor().addCommentHandler(CommentHandler);
+ llvm::StringSet<> MissingFiles;
+ Clang->getPreprocessor().addPPCallbacks(
+ std::make_unique<MissingFileCollector>(
+ MissingFiles, Clang->getPreprocessor().getHeaderSearchInfo(),
+ Clang->getSourceManager()));
if (llvm::Error Err = Act->Execute())
return errorToErrorCode(std::move(Err));
@@ -385,9 +462,9 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
}
}
- return PrecompiledPreamble(std::move(Storage), std::move(PreambleBytes),
- PreambleEndsAtStartOfLine,
- std::move(FilesInPreamble));
+ return PrecompiledPreamble(
+ std::move(Storage), std::move(PreambleBytes), PreambleEndsAtStartOfLine,
+ std::move(FilesInPreamble), std::move(MissingFiles));
}
PreambleBounds PrecompiledPreamble::getBounds() const {
@@ -444,6 +521,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
// First, make a record of those files that have been overridden via
// remapping or unsaved_files.
std::map<llvm::sys::fs::UniqueID, PreambleFileHash> OverriddenFiles;
+ llvm::StringSet<> OverriddenAbsPaths; // Either by buffers or files.
for (const auto &R : PreprocessorOpts.RemappedFiles) {
llvm::vfs::Status Status;
if (!moveOnNoError(VFS->status(R.second), Status)) {
@@ -451,6 +529,10 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
// horrible happened.
return false;
}
+ // If a mapped file was previously missing, then it has changed.
+ llvm::SmallString<128> MappedPath(R.first);
+ if (!VFS->makeAbsolute(MappedPath))
+ OverriddenAbsPaths.insert(MappedPath);
OverriddenFiles[Status.getUniqueID()] = PreambleFileHash::createForFile(
Status.getSize(), llvm::sys::toTimeT(Status.getLastModificationTime()));
@@ -466,6 +548,10 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
OverriddenFiles[Status.getUniqueID()] = PreambleHash;
else
OverridenFileBuffers[RB.first] = PreambleHash;
+
+ llvm::SmallString<128> MappedPath(RB.first);
+ if (!VFS->makeAbsolute(MappedPath))
+ OverriddenAbsPaths.insert(MappedPath);
}
// Check whether anything has changed.
@@ -503,6 +589,17 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
F.second.ModTime)
return false;
}
+ for (const auto &F : MissingFiles) {
+ // A missing file may be "provided" by an override buffer or file.
+ if (OverriddenAbsPaths.count(F.getKey()))
+ return false;
+ // If a file previously recorded as missing exists as a regular file, then
+ // consider the preamble out-of-date.
+ if (auto Status = VFS->status(F.getKey())) {
+ if (Status->isRegularFile())
+ return false;
+ }
+ }
return true;
}
@@ -523,8 +620,10 @@ void PrecompiledPreamble::OverridePreamble(
PrecompiledPreamble::PrecompiledPreamble(
PCHStorage Storage, std::vector<char> PreambleBytes,
bool PreambleEndsAtStartOfLine,
- llvm::StringMap<PreambleFileHash> FilesInPreamble)
+ llvm::StringMap<PreambleFileHash> FilesInPreamble,
+ llvm::StringSet<> MissingFiles)
: Storage(std::move(Storage)), FilesInPreamble(std::move(FilesInPreamble)),
+ MissingFiles(std::move(MissingFiles)),
PreambleBytes(std::move(PreambleBytes)),
PreambleEndsAtStartOfLine(PreambleEndsAtStartOfLine) {
assert(this->Storage.getKind() != PCHStorage::Kind::Empty);
@@ -548,7 +647,7 @@ PrecompiledPreamble::TempPCHFile::CreateNewPreamblePCHFile() {
return EC;
// We only needed to make sure the file exists, close the file right away.
llvm::sys::Process::SafelyCloseFileDescriptor(FD);
- return TempPCHFile(std::move(File).str());
+ return TempPCHFile(std::string(std::move(File).str()));
}
PrecompiledPreamble::TempPCHFile::TempPCHFile(std::string FilePath)
@@ -715,7 +814,7 @@ void PrecompiledPreamble::setupPreambleStorage(
IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS) {
if (Storage.getKind() == PCHStorage::Kind::TempFile) {
const TempPCHFile &PCHFile = Storage.asFile();
- PreprocessorOpts.ImplicitPCHInclude = PCHFile.getFilePath();
+ PreprocessorOpts.ImplicitPCHInclude = std::string(PCHFile.getFilePath());
// Make sure we can access the PCH file even if we're using a VFS
IntrusiveRefCntPtr<llvm::vfs::FileSystem> RealFS =
@@ -739,7 +838,7 @@ void PrecompiledPreamble::setupPreambleStorage(
// For in-memory preamble, we have to provide a VFS overlay that makes it
// accessible.
StringRef PCHPath = getInMemoryPreamblePath();
- PreprocessorOpts.ImplicitPCHInclude = PCHPath;
+ PreprocessorOpts.ImplicitPCHInclude = std::string(PCHPath);
auto Buf = llvm::MemoryBuffer::getMemBuffer(Storage.asMemory().Data);
VFS = createVFSOverlayForPreamblePCH(PCHPath, std::move(Buf), VFS);
diff --git a/clang/lib/Frontend/Rewrite/FixItRewriter.cpp b/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
index 0217b3385a51..4fe64b96cb15 100644
--- a/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
+++ b/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
@@ -95,7 +95,8 @@ bool FixItRewriter::WriteFixedFiles(
for (iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first);
int fd;
- std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd);
+ std::string Filename =
+ FixItOpts->RewriteFilename(std::string(Entry->getName()), fd);
std::error_code EC;
std::unique_ptr<llvm::raw_fd_ostream> OS;
if (fd != -1) {
@@ -113,7 +114,8 @@ bool FixItRewriter::WriteFixedFiles(
OS->flush();
if (RewrittenFiles)
- RewrittenFiles->push_back(std::make_pair(Entry->getName(), Filename));
+ RewrittenFiles->push_back(
+ std::make_pair(std::string(Entry->getName()), Filename));
}
return false;
diff --git a/clang/lib/Frontend/Rewrite/FrontendActions.cpp b/clang/lib/Frontend/Rewrite/FrontendActions.cpp
index aaffbde3309b..5351ff0593ed 100644
--- a/clang/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/clang/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -77,7 +77,7 @@ public:
SmallString<128> Path(Filename);
llvm::sys::path::replace_extension(Path,
NewSuffix + llvm::sys::path::extension(Path));
- return Path.str();
+ return std::string(Path.str());
}
};
@@ -88,7 +88,7 @@ public:
llvm::sys::fs::createTemporaryFile(llvm::sys::path::filename(Filename),
llvm::sys::path::extension(Filename).drop_front(), fd,
Path);
- return Path.str();
+ return std::string(Path.str());
}
};
} // end anonymous namespace
@@ -166,11 +166,11 @@ RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.createDefaultOutputFile(false, InFile, "cpp")) {
if (CI.getLangOpts().ObjCRuntime.isNonFragile())
return CreateModernObjCRewriter(
- InFile, std::move(OS), CI.getDiagnostics(), CI.getLangOpts(),
- CI.getDiagnosticOpts().NoRewriteMacros,
+ std::string(InFile), std::move(OS), CI.getDiagnostics(),
+ CI.getLangOpts(), CI.getDiagnosticOpts().NoRewriteMacros,
(CI.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo));
- return CreateObjCRewriter(InFile, std::move(OS), CI.getDiagnostics(),
- CI.getLangOpts(),
+ return CreateObjCRewriter(std::string(InFile), std::move(OS),
+ CI.getDiagnostics(), CI.getLangOpts(),
CI.getDiagnosticOpts().NoRewriteMacros);
}
return nullptr;
diff --git a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 831f95e8c6be..e122b10e76d3 100644
--- a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -860,7 +860,7 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
// ivar in class extensions requires special treatment.
if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
CDecl = CatDecl->getClassInterface();
- std::string RecName = CDecl->getName();
+ std::string RecName = std::string(CDecl->getName());
RecName += "_IMPL";
RecordDecl *RD =
RecordDecl::Create(*Context, TTK_Struct, TUDecl, SourceLocation(),
@@ -941,9 +941,10 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
unsigned Attributes = PD->getPropertyAttributes();
if (mustSynthesizeSetterGetterMethod(IMD, PD, true /*getter*/)) {
- bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
- (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy));
+ bool GenGetProperty =
+ !(Attributes & ObjCPropertyAttribute::kind_nonatomic) &&
+ (Attributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy));
std::string Getr;
if (GenGetProperty && !objcGetPropertyDefined) {
objcGetPropertyDefined = true;
@@ -1002,8 +1003,8 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
// Generate the 'setter' function.
std::string Setr;
- bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy);
+ bool GenSetProperty = Attributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy);
if (GenSetProperty && !objcSetPropertyDefined) {
objcSetPropertyDefined = true;
// FIXME. Is this attribute correct in all cases?
@@ -1022,11 +1023,11 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
Setr += ", (id)";
Setr += PD->getName();
Setr += ", ";
- if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ if (Attributes & ObjCPropertyAttribute::kind_nonatomic)
Setr += "0, ";
else
Setr += "1, ";
- if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ if (Attributes & ObjCPropertyAttribute::kind_copy)
Setr += "1)";
else
Setr += "0)";
@@ -2586,9 +2587,10 @@ Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
strType, nullptr, SC_Static);
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, NewVD, false, strType, VK_LValue, SourceLocation());
- Expr *Unop = new (Context)
- UnaryOperator(DRE, UO_AddrOf, Context->getPointerType(DRE->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(), false);
+ Expr *Unop = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
@@ -2688,7 +2690,7 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
- const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ auto *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
VK_RValue, EndLoc);
ReplaceStmt(Exp, CE);
@@ -3282,10 +3284,10 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// we need the cast below. For example:
// (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -3300,10 +3302,10 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
superType, VK_LValue,
ILE, false);
// struct __rw_objc_super *
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
}
MsgExprs.push_back(SuperRep);
break;
@@ -3377,10 +3379,10 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// we need the cast below. For example:
// (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -4442,7 +4444,7 @@ void RewriteModernObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
static void BuildUniqueMethodName(std::string &Name,
ObjCMethodDecl *MD) {
ObjCInterfaceDecl *IFace = MD->getClassInterface();
- Name = IFace->getName();
+ Name = std::string(IFace->getName());
Name += "__" + MD->getSelector().getAsString();
// Convert colons to underscores.
std::string::size_type loc = 0;
@@ -4704,9 +4706,9 @@ Stmt *RewriteModernObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
if (VarDecl *Var = dyn_cast<VarDecl>(VD))
if (!ImportedLocalExternalDecls.count(Var))
return DRE;
- Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
- VK_LValue, OK_Ordinary,
- DRE->getLocation(), false);
+ Expr *Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary, DRE->getLocation(), false, FPOptionsOverride());
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Exp);
@@ -5292,11 +5294,12 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
VarDecl *NewVD = VarDecl::Create(
*Context, TUDecl, SourceLocation(), SourceLocation(),
&Context->Idents.get(DescData), Context->VoidPtrTy, nullptr, SC_Static);
- UnaryOperator *DescRefExpr = new (Context) UnaryOperator(
+ UnaryOperator *DescRefExpr = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context),
new (Context) DeclRefExpr(*Context, NewVD, false, Context->VoidPtrTy,
VK_LValue, SourceLocation()),
UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
@@ -5313,9 +5316,9 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
@@ -5330,9 +5333,9 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
}
}
@@ -5370,10 +5373,10 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
// captured nested byref variable has its address passed. Do not take
// its address again.
if (!isNestedCapturedVar)
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
- Context->getPointerType(Exp->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
}
@@ -5397,9 +5400,10 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
NewRep = DRE;
}
- NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
- Context->getPointerType(NewRep->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(), false);
+ NewRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), NewRep, UO_AddrOf,
+ Context->getPointerType(NewRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
// Put Paren around the call.
@@ -7484,10 +7488,10 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, NewVD, false, Context->UnsignedLongTy,
VK_LValue, SourceLocation());
- BinaryOperator *addExpr =
- new (Context) BinaryOperator(castExpr, DRE, BO_Add,
- Context->getPointerType(Context->CharTy),
- VK_RValue, OK_Ordinary, SourceLocation(), FPOptions());
+ BinaryOperator *addExpr = BinaryOperator::Create(
+ *Context, castExpr, DRE, BO_Add,
+ Context->getPointerType(Context->CharTy), VK_RValue, OK_Ordinary,
+ SourceLocation(), FPOptionsOverride());
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(),
SourceLocation(),
@@ -7501,12 +7505,11 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
// decltype(((Foo_IMPL*)0)->bar) *
- ObjCContainerDecl *CDecl =
- dyn_cast<ObjCContainerDecl>(D->getDeclContext());
+ auto *CDecl = cast<ObjCContainerDecl>(D->getDeclContext());
// ivar in class extensions requires special treatment.
if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl))
CDecl = CatDecl->getClassInterface();
- std::string RecName = CDecl->getName();
+ std::string RecName = std::string(CDecl->getName());
RecName += "_IMPL";
RecordDecl *RD = RecordDecl::Create(
*Context, TTK_Struct, TUDecl, SourceLocation(), SourceLocation(),
@@ -7539,10 +7542,9 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
CK_BitCast,
PE);
-
- Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT,
- VK_LValue, OK_Ordinary,
- SourceLocation(), false);
+ Expr *Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), castExpr, UO_Deref, IvarT,
+ VK_LValue, OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
PE = new (Context) ParenExpr(OldRange.getBegin(),
OldRange.getEnd(),
Exp);
diff --git a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
index 0cb7592b9982..3f320dc57aa6 100644
--- a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -789,9 +789,10 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
unsigned Attributes = PD->getPropertyAttributes();
if (PID->getGetterMethodDecl() && !PID->getGetterMethodDecl()->isDefined()) {
- bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
- (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy));
+ bool GenGetProperty =
+ !(Attributes & ObjCPropertyAttribute::kind_nonatomic) &&
+ (Attributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy));
std::string Getr;
if (GenGetProperty && !objcGetPropertyDefined) {
objcGetPropertyDefined = true;
@@ -850,8 +851,8 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
// Generate the 'setter' function.
std::string Setr;
- bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy);
+ bool GenSetProperty = Attributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy);
if (GenSetProperty && !objcSetPropertyDefined) {
objcSetPropertyDefined = true;
// FIXME. Is this attribute correct in all cases?
@@ -870,11 +871,11 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
Setr += ", (id)";
Setr += PD->getName();
Setr += ", ";
- if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ if (Attributes & ObjCPropertyAttribute::kind_nonatomic)
Setr += "0, ";
else
Setr += "1, ";
- if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ if (Attributes & ObjCPropertyAttribute::kind_copy)
Setr += "1)";
else
Setr += "0)";
@@ -2513,9 +2514,10 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
strType, nullptr, SC_Static);
DeclRefExpr *DRE = new (Context)
DeclRefExpr(*Context, NewVD, false, strType, VK_LValue, SourceLocation());
- Expr *Unop = new (Context)
- UnaryOperator(DRE, UO_AddrOf, Context->getPointerType(DRE->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(), false);
+ Expr *Unop = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
@@ -2713,10 +2715,10 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// we need the cast below. For example:
// (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -2731,10 +2733,10 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
superType, VK_LValue,
ILE, false);
// struct objc_super *
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
}
MsgExprs.push_back(SuperRep);
break;
@@ -2808,10 +2810,10 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// we need the cast below. For example:
// (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
//
- SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SuperRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -2995,10 +2997,9 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
llvm::APInt(IntSize, 8),
Context->IntTy,
SourceLocation());
- BinaryOperator *lessThanExpr =
- new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
- VK_RValue, OK_Ordinary, SourceLocation(),
- FPOptions());
+ BinaryOperator *lessThanExpr = BinaryOperator::Create(
+ *Context, sizeofExpr, limit, BO_LE, Context->IntTy, VK_RValue,
+ OK_Ordinary, SourceLocation(), FPOptionsOverride());
// (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
ConditionalOperator *CondExpr =
new (Context) ConditionalOperator(lessThanExpr,
@@ -3048,9 +3049,10 @@ Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
nullptr, SC_Extern);
DeclRefExpr *DRE = new (Context) DeclRefExpr(
*Context, VD, false, getProtocolType(), VK_LValue, SourceLocation());
- Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
- Context->getPointerType(DRE->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(), false);
+ Expr *DerefExpr = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
CK_BitCast,
DerefExpr);
@@ -3631,7 +3633,7 @@ void RewriteObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
static void BuildUniqueMethodName(std::string &Name,
ObjCMethodDecl *MD) {
ObjCInterfaceDecl *IFace = MD->getClassInterface();
- Name = IFace->getName();
+ Name = std::string(IFace->getName());
Name += "__" + MD->getSelector().getAsString();
// Convert colons to underscores.
std::string::size_type loc = 0;
@@ -3875,9 +3877,9 @@ Stmt *RewriteObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
if (VarDecl *Var = dyn_cast<VarDecl>(VD))
if (!ImportedLocalExternalDecls.count(Var))
return DRE;
- Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
- VK_LValue, OK_Ordinary,
- DRE->getLocation(), false);
+ Expr *Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary, DRE->getLocation(), false, FPOptionsOverride());
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Exp);
@@ -4432,11 +4434,12 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
VarDecl *NewVD = VarDecl::Create(
*Context, TUDecl, SourceLocation(), SourceLocation(),
&Context->Idents.get(DescData), Context->VoidPtrTy, nullptr, SC_Static);
- UnaryOperator *DescRefExpr = new (Context) UnaryOperator(
+ UnaryOperator *DescRefExpr = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context),
new (Context) DeclRefExpr(*Context, NewVD, false, Context->VoidPtrTy,
VK_LValue, SourceLocation()),
UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
@@ -4453,9 +4456,9 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
@@ -4470,9 +4473,9 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(),
- false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
}
}
InitExprs.push_back(Exp);
@@ -4509,9 +4512,10 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
// captured nested byref variable has its address passed. Do not take
// its address again.
if (!isNestedCapturedVar)
- Exp = new (Context) UnaryOperator(
- Exp, UO_AddrOf, Context->getPointerType(Exp->getType()), VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ Exp = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
}
@@ -4527,9 +4531,10 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
}
NewRep = CallExpr::Create(*Context, DRE, InitExprs, FType, VK_LValue,
SourceLocation());
- NewRep = new (Context) UnaryOperator(
- NewRep, UO_AddrOf, Context->getPointerType(NewRep->getType()), VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ NewRep = UnaryOperator::Create(
+ const_cast<ASTContext &>(*Context), NewRep, UO_AddrOf,
+ Context->getPointerType(NewRep->getType()), VK_RValue, OK_Ordinary,
+ SourceLocation(), false, FPOptionsOverride());
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
BlockDeclRefs.clear();
@@ -5819,7 +5824,8 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
// Synthesize an explicit cast to gain access to the ivar.
- std::string RecName = clsDeclared->getIdentifier()->getName();
+ std::string RecName =
+ std::string(clsDeclared->getIdentifier()->getName());
RecName += "_IMPL";
IdentifierInfo *II = &Context->Idents.get(RecName);
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
@@ -5859,7 +5865,8 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
// Synthesize an explicit cast to gain access to the ivar.
- std::string RecName = clsDeclared->getIdentifier()->getName();
+ std::string RecName =
+ std::string(clsDeclared->getIdentifier()->getName());
RecName += "_IMPL";
IdentifierInfo *II = &Context->Idents.get(RecName);
RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
diff --git a/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 8042b52ddc03..462aeda6e027 100644
--- a/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -22,6 +22,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitstream/BitCodes.h"
#include "llvm/Bitstream/BitstreamReader.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>
@@ -238,6 +239,9 @@ private:
/// generated from child processes.
bool MergeChildRecords;
+ /// Whether we've started finishing and tearing down this instance.
+ bool IsFinishing = false;
+
/// State that is shared among the various clones of this diagnostic
/// consumer.
struct SharedState {
@@ -567,6 +571,17 @@ unsigned SDiagsWriter::getEmitDiagnosticFlag(StringRef FlagName) {
void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info) {
+ assert(!IsFinishing &&
+ "Received a diagnostic after we've already started teardown.");
+ if (IsFinishing) {
+ SmallString<256> diagnostic;
+ Info.FormatDiagnostic(diagnostic);
+ getMetaDiags()->Report(
+ diag::warn_fe_serialized_diag_failure_during_finalisation)
+ << diagnostic;
+ return;
+ }
+
// Enter the block for a non-note diagnostic immediately, rather than waiting
// for beginDiagnostic, in case associated notes are emitted before we get
// there.
@@ -760,6 +775,9 @@ void SDiagsWriter::RemoveOldDiagnostics() {
}
void SDiagsWriter::finish() {
+ assert(!IsFinishing);
+ IsFinishing = true;
+
// The original instance is responsible for writing the file.
if (!OriginalInstance)
return;
@@ -785,12 +803,20 @@ void SDiagsWriter::finish() {
if (EC) {
getMetaDiags()->Report(diag::warn_fe_serialized_diag_failure)
<< State->OutputFile << EC.message();
+ OS->clear_error();
return;
}
// Write the generated bitstream to "Out".
OS->write((char *)&State->Buffer.front(), State->Buffer.size());
OS->flush();
+
+ assert(!OS->has_error());
+ if (OS->has_error()) {
+ getMetaDiags()->Report(diag::warn_fe_serialized_diag_failure)
+ << State->OutputFile << OS->error().message();
+ OS->clear_error();
+ }
}
std::error_code SDiagsMerger::visitStartOfDiagnostic() {
diff --git a/clang/lib/Frontend/TextDiagnosticBuffer.cpp b/clang/lib/Frontend/TextDiagnosticBuffer.cpp
index b2497f56cbcd..90f273e65f88 100644
--- a/clang/lib/Frontend/TextDiagnosticBuffer.cpp
+++ b/clang/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -32,20 +32,20 @@ void TextDiagnosticBuffer::HandleDiagnostic(DiagnosticsEngine::Level Level,
"Diagnostic not handled during diagnostic buffering!");
case DiagnosticsEngine::Note:
All.emplace_back(Level, Notes.size());
- Notes.emplace_back(Info.getLocation(), Buf.str());
+ Notes.emplace_back(Info.getLocation(), std::string(Buf.str()));
break;
case DiagnosticsEngine::Warning:
All.emplace_back(Level, Warnings.size());
- Warnings.emplace_back(Info.getLocation(), Buf.str());
+ Warnings.emplace_back(Info.getLocation(), std::string(Buf.str()));
break;
case DiagnosticsEngine::Remark:
All.emplace_back(Level, Remarks.size());
- Remarks.emplace_back(Info.getLocation(), Buf.str());
+ Remarks.emplace_back(Info.getLocation(), std::string(Buf.str()));
break;
case DiagnosticsEngine::Error:
case DiagnosticsEngine::Fatal:
All.emplace_back(Level, Errors.size());
- Errors.emplace_back(Info.getLocation(), Buf.str());
+ Errors.emplace_back(Info.getLocation(), std::string(Buf.str()));
break;
}
}
diff --git a/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 82c2af87706e..56e05242f7c9 100644
--- a/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -89,9 +89,10 @@ namespace {
class StandardDirective : public Directive {
public:
StandardDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
- bool MatchAnyLine, StringRef Text, unsigned Min,
- unsigned Max)
- : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max) {}
+ bool MatchAnyFileAndLine, bool MatchAnyLine, StringRef Text,
+ unsigned Min, unsigned Max)
+ : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyFileAndLine,
+ MatchAnyLine, Text, Min, Max) {}
bool isValid(std::string &Error) override {
// all strings are considered valid; even empty ones
@@ -107,9 +108,10 @@ public:
class RegexDirective : public Directive {
public:
RegexDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
- bool MatchAnyLine, StringRef Text, unsigned Min, unsigned Max,
- StringRef RegexStr)
- : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max),
+ bool MatchAnyFileAndLine, bool MatchAnyLine, StringRef Text,
+ unsigned Min, unsigned Max, StringRef RegexStr)
+ : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyFileAndLine,
+ MatchAnyLine, Text, Min, Max),
Regex(RegexStr) {}
bool isValid(std::string &Error) override {
@@ -294,11 +296,13 @@ struct UnattachedDirective {
// Attach the specified directive to the line of code indicated by
// \p ExpectedLoc.
void attachDirective(DiagnosticsEngine &Diags, const UnattachedDirective &UD,
- SourceLocation ExpectedLoc, bool MatchAnyLine = false) {
+ SourceLocation ExpectedLoc,
+ bool MatchAnyFileAndLine = false,
+ bool MatchAnyLine = false) {
// Construct new directive.
- std::unique_ptr<Directive> D =
- Directive::create(UD.RegexKind, UD.DirectivePos, ExpectedLoc,
- MatchAnyLine, UD.Text, UD.Min, UD.Max);
+ std::unique_ptr<Directive> D = Directive::create(
+ UD.RegexKind, UD.DirectivePos, ExpectedLoc, MatchAnyFileAndLine,
+ MatchAnyLine, UD.Text, UD.Min, UD.Max);
std::string Error;
if (!D->isValid(Error)) {
@@ -498,6 +502,7 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
// Next optional token: @
SourceLocation ExpectedLoc;
StringRef Marker;
+ bool MatchAnyFileAndLine = false;
bool MatchAnyLine = false;
if (!PH.Next("@")) {
ExpectedLoc = Pos;
@@ -526,26 +531,39 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
StringRef Filename(PH.C, PH.P-PH.C);
PH.Advance();
- // Lookup file via Preprocessor, like a #include.
- const DirectoryLookup *CurDir;
- Optional<FileEntryRef> File =
- PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
- nullptr, nullptr, nullptr, nullptr, nullptr);
- if (!File) {
- Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
- diag::err_verify_missing_file) << Filename << KindStr;
- continue;
- }
-
- const FileEntry *FE = &File->getFileEntry();
- if (SM.translateFile(FE).isInvalid())
- SM.createFileID(FE, Pos, SrcMgr::C_User);
-
- if (PH.Next(Line) && Line > 0)
- ExpectedLoc = SM.translateFileLineCol(FE, Line, 1);
- else if (PH.Next("*")) {
+ if (Filename == "*") {
+ MatchAnyFileAndLine = true;
+ if (!PH.Next("*")) {
+ Diags.Report(Pos.getLocWithOffset(PH.C - PH.Begin),
+ diag::err_verify_missing_line)
+ << "'*'";
+ continue;
+ }
MatchAnyLine = true;
- ExpectedLoc = SM.translateFileLineCol(FE, 1, 1);
+ ExpectedLoc = SourceLocation();
+ } else {
+ // Lookup file via Preprocessor, like a #include.
+ const DirectoryLookup *CurDir;
+ Optional<FileEntryRef> File =
+ PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
+ nullptr, nullptr, nullptr, nullptr, nullptr);
+ if (!File) {
+ Diags.Report(Pos.getLocWithOffset(PH.C - PH.Begin),
+ diag::err_verify_missing_file)
+ << Filename << KindStr;
+ continue;
+ }
+
+ const FileEntry *FE = &File->getFileEntry();
+ if (SM.translateFile(FE).isInvalid())
+ SM.createFileID(FE, Pos, SrcMgr::C_User);
+
+ if (PH.Next(Line) && Line > 0)
+ ExpectedLoc = SM.translateFileLineCol(FE, Line, 1);
+ else if (PH.Next("*")) {
+ MatchAnyLine = true;
+ ExpectedLoc = SM.translateFileLineCol(FE, 1, 1);
+ }
}
} else if (PH.Next("*")) {
MatchAnyLine = true;
@@ -631,7 +649,7 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
}
if (Marker.empty())
- attachDirective(Diags, D, ExpectedLoc, MatchAnyLine);
+ attachDirective(Diags, D, ExpectedLoc, MatchAnyFileAndLine, MatchAnyLine);
else
Markers.addDirective(Marker, D);
FoundDirective = true;
@@ -877,7 +895,7 @@ static unsigned PrintExpected(DiagnosticsEngine &Diags,
SmallString<256> Fmt;
llvm::raw_svector_ostream OS(Fmt);
for (const auto *D : DL) {
- if (D->DiagnosticLoc.isInvalid())
+ if (D->DiagnosticLoc.isInvalid() || D->MatchAnyFileAndLine)
OS << "\n File *";
else
OS << "\n File " << SourceMgr.getFilename(D->DiagnosticLoc);
@@ -937,7 +955,7 @@ static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
continue;
}
- if (!D.DiagnosticLoc.isInvalid() &&
+ if (!D.DiagnosticLoc.isInvalid() && !D.MatchAnyFileAndLine &&
!IsFromSameFile(SourceMgr, D.DiagnosticLoc, II->first))
continue;
@@ -1114,11 +1132,13 @@ void VerifyDiagnosticConsumer::CheckDiagnostics() {
std::unique_ptr<Directive> Directive::create(bool RegexKind,
SourceLocation DirectiveLoc,
SourceLocation DiagnosticLoc,
+ bool MatchAnyFileAndLine,
bool MatchAnyLine, StringRef Text,
unsigned Min, unsigned Max) {
if (!RegexKind)
return std::make_unique<StandardDirective>(DirectiveLoc, DiagnosticLoc,
- MatchAnyLine, Text, Min, Max);
+ MatchAnyFileAndLine,
+ MatchAnyLine, Text, Min, Max);
// Parse the directive into a regular expression.
std::string RegexStr;
@@ -1143,6 +1163,7 @@ std::unique_ptr<Directive> Directive::create(bool RegexKind,
}
}
- return std::make_unique<RegexDirective>(
- DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max, RegexStr);
+ return std::make_unique<RegexDirective>(DirectiveLoc, DiagnosticLoc,
+ MatchAnyFileAndLine, MatchAnyLine,
+ Text, Min, Max, RegexStr);
}
diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 9bf70b793d9b..ac64e1708da6 100644
--- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -23,6 +23,7 @@
#include "clang/Frontend/Utils.h"
#include "clang/FrontendTool/Utils.h"
#include "clang/Rewrite/Frontend/FrontendActions.h"
+#include "clang/StaticAnalyzer/Frontend/AnalyzerHelpFlags.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
@@ -64,8 +65,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case GenerateHeaderModule:
return std::make_unique<GenerateHeaderModuleAction>();
case GeneratePCH: return std::make_unique<GeneratePCHAction>();
- case GenerateInterfaceIfsExpV1:
- return std::make_unique<GenerateInterfaceIfsExpV1Action>();
+ case GenerateInterfaceStubs:
+ return std::make_unique<GenerateInterfaceStubsAction>();
case InitOnly: return std::make_unique<InitOnlyAction>();
case ParseSyntaxOnly: return std::make_unique<SyntaxOnlyAction>();
case ModuleFileInfo: return std::make_unique<DumpModuleInfoAction>();
@@ -73,14 +74,15 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case TemplightDump: return std::make_unique<TemplightDumpAction>();
case PluginAction: {
- for (FrontendPluginRegistry::iterator it =
- FrontendPluginRegistry::begin(), ie = FrontendPluginRegistry::end();
- it != ie; ++it) {
- if (it->getName() == CI.getFrontendOpts().ActionName) {
- std::unique_ptr<PluginASTAction> P(it->instantiate());
+ for (const FrontendPluginRegistry::entry &Plugin :
+ FrontendPluginRegistry::entries()) {
+ if (Plugin.getName() == CI.getFrontendOpts().ActionName) {
+ std::unique_ptr<PluginASTAction> P(Plugin.instantiate());
if ((P->getActionType() != PluginASTAction::ReplaceAction &&
P->getActionType() != PluginASTAction::Cmdline) ||
- !P->ParseArgs(CI, CI.getFrontendOpts().PluginArgs[it->getName()]))
+ !P->ParseArgs(
+ CI,
+ CI.getFrontendOpts().PluginArgs[std::string(Plugin.getName())]))
return nullptr;
return std::move(P);
}
@@ -202,9 +204,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
}
// Load any requested plugins.
- for (unsigned i = 0,
- e = Clang->getFrontendOpts().Plugins.size(); i != e; ++i) {
- const std::string &Path = Clang->getFrontendOpts().Plugins[i];
+ for (const std::string &Path : Clang->getFrontendOpts().Plugins) {
std::string Error;
if (llvm::sys::DynamicLibrary::LoadLibraryPermanently(Path.c_str(), &Error))
Clang->getDiagnostics().Report(diag::err_fe_unable_to_load_plugin)
@@ -212,13 +212,12 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
}
// Check if any of the loaded plugins replaces the main AST action
- for (FrontendPluginRegistry::iterator it = FrontendPluginRegistry::begin(),
- ie = FrontendPluginRegistry::end();
- it != ie; ++it) {
- std::unique_ptr<PluginASTAction> P(it->instantiate());
+ for (const FrontendPluginRegistry::entry &Plugin :
+ FrontendPluginRegistry::entries()) {
+ std::unique_ptr<PluginASTAction> P(Plugin.instantiate());
if (P->getActionType() == PluginASTAction::ReplaceAction) {
Clang->getFrontendOpts().ProgramAction = clang::frontend::PluginAction;
- Clang->getFrontendOpts().ActionName = it->getName();
+ Clang->getFrontendOpts().ActionName = Plugin.getName().str();
break;
}
}
@@ -241,35 +240,24 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
// These should happen AFTER plugins have been loaded!
AnalyzerOptions &AnOpts = *Clang->getAnalyzerOpts();
+
// Honor -analyzer-checker-help and -analyzer-checker-help-hidden.
if (AnOpts.ShowCheckerHelp || AnOpts.ShowCheckerHelpAlpha ||
AnOpts.ShowCheckerHelpDeveloper) {
- ento::printCheckerHelp(llvm::outs(),
- Clang->getFrontendOpts().Plugins,
- AnOpts,
- Clang->getDiagnostics(),
- Clang->getLangOpts());
+ ento::printCheckerHelp(llvm::outs(), *Clang);
return true;
}
// Honor -analyzer-checker-option-help.
if (AnOpts.ShowCheckerOptionList || AnOpts.ShowCheckerOptionAlphaList ||
AnOpts.ShowCheckerOptionDeveloperList) {
- ento::printCheckerConfigList(llvm::outs(),
- Clang->getFrontendOpts().Plugins,
- *Clang->getAnalyzerOpts(),
- Clang->getDiagnostics(),
- Clang->getLangOpts());
+ ento::printCheckerConfigList(llvm::outs(), *Clang);
return true;
}
// Honor -analyzer-list-enabled-checkers.
if (AnOpts.ShowEnabledCheckerList) {
- ento::printEnabledCheckerList(llvm::outs(),
- Clang->getFrontendOpts().Plugins,
- AnOpts,
- Clang->getDiagnostics(),
- Clang->getLangOpts());
+ ento::printEnabledCheckerList(llvm::outs(), *Clang);
return true;
}
diff --git a/clang/lib/Headers/__clang_cuda_cmath.h b/clang/lib/Headers/__clang_cuda_cmath.h
index 834a2e3fd134..8ba182689a4f 100644
--- a/clang/lib/Headers/__clang_cuda_cmath.h
+++ b/clang/lib/Headers/__clang_cuda_cmath.h
@@ -12,7 +12,9 @@
#error "This file is for CUDA compilation only."
#endif
+#ifndef __OPENMP_NVPTX__
#include <limits>
+#endif
// CUDA lets us use various std math functions on the device side. This file
// works in concert with __clang_cuda_math_forward_declares.h to make this work.
@@ -30,32 +32,16 @@
// implementation. Declaring in the global namespace and pulling into namespace
// std covers all of the known knowns.
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
+#ifdef __OPENMP_NVPTX__
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
#else
#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
#endif
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
-#if !(defined(_OPENMP) && defined(__cplusplus))
__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
__DEVICE__ long abs(long __n) { return ::labs(__n); }
__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
__DEVICE__ double abs(double __x) { return ::fabs(__x); }
-#endif
-// TODO: remove once variat is supported.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const float abs(const float __x) { return ::fabsf((float)__x); }
-__DEVICE__ const double abs(const double __x) { return ::fabs((double)__x); }
-#endif
__DEVICE__ float acos(float __x) { return ::acosf(__x); }
__DEVICE__ float asin(float __x) { return ::asinf(__x); }
__DEVICE__ float atan(float __x) { return ::atanf(__x); }
@@ -64,11 +50,9 @@ __DEVICE__ float ceil(float __x) { return ::ceilf(__x); }
__DEVICE__ float cos(float __x) { return ::cosf(__x); }
__DEVICE__ float cosh(float __x) { return ::coshf(__x); }
__DEVICE__ float exp(float __x) { return ::expf(__x); }
-__DEVICE__ float fabs(float __x) __NOEXCEPT { return ::fabsf(__x); }
+__DEVICE__ float fabs(float __x) { return ::fabsf(__x); }
__DEVICE__ float floor(float __x) { return ::floorf(__x); }
__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }
-// TODO: remove when variant is supported
-#ifndef _OPENMP
__DEVICE__ int fpclassify(float __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
@@ -77,14 +61,15 @@ __DEVICE__ int fpclassify(double __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
}
-#endif
__DEVICE__ float frexp(float __arg, int *__exp) {
return ::frexpf(__arg, __exp);
}
// For inscrutable reasons, the CUDA headers define these functions for us on
-// Windows.
-#ifndef _MSC_VER
+// Windows. For OpenMP we omit these as some old system headers have
+// non-conforming `isinf(float)` and `isnan(float)` implementations that return
+// an `int`. The system versions of these functions should be fine anyway.
+#if !defined(_MSC_VER) && !defined(__OPENMP_NVPTX__)
__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
@@ -161,6 +146,8 @@ __DEVICE__ float tanh(float __x) { return ::tanhf(__x); }
// libdevice doesn't provide an implementation, and we don't want to be in the
// business of implementing tricky libm functions in this header.
+#ifndef __OPENMP_NVPTX__
+
// Now we've defined everything we promised we'd define in
// __clang_cuda_math_forward_declares.h. We need to do two additional things to
// fix up our math functions.
@@ -457,10 +444,7 @@ using ::remainderf;
using ::remquof;
using ::rintf;
using ::roundf;
-// TODO: remove once variant is supported
-#ifndef _OPENMP
using ::scalblnf;
-#endif
using ::scalbnf;
using ::sinf;
using ::sinhf;
@@ -479,7 +463,8 @@ _GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
-#undef __NOEXCEPT
+#endif // __OPENMP_NVPTX__
+
#undef __DEVICE__
#endif
diff --git a/clang/lib/Headers/__clang_cuda_complex_builtins.h b/clang/lib/Headers/__clang_cuda_complex_builtins.h
index 576a958b16bb..8c10ff6b461f 100644
--- a/clang/lib/Headers/__clang_cuda_complex_builtins.h
+++ b/clang/lib/Headers/__clang_cuda_complex_builtins.h
@@ -13,10 +13,57 @@
// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3. These are
// libgcc functions that clang assumes are available when compiling c99 complex
// operations. (These implementations come from libc++, and have been modified
-// to work with CUDA.)
+// to work with CUDA and OpenMP target offloading [in C and C++ mode].)
-extern "C" inline __device__ double _Complex __muldc3(double __a, double __b,
- double __c, double __d) {
+#pragma push_macro("__DEVICE__")
+#ifdef _OPENMP
+#pragma omp declare target
+#define __DEVICE__ __attribute__((noinline, nothrow, cold, weak))
+#else
+#define __DEVICE__ __device__ inline
+#endif
+
+// To make the algorithms available for C and C++ in CUDA and OpenMP we select
+// different but equivalent function versions. TODO: For OpenMP we currently
+// select the native builtins as the overload support for templates is lacking.
+#if !defined(_OPENMP)
+#define _ISNANd std::isnan
+#define _ISNANf std::isnan
+#define _ISINFd std::isinf
+#define _ISINFf std::isinf
+#define _ISFINITEd std::isfinite
+#define _ISFINITEf std::isfinite
+#define _COPYSIGNd std::copysign
+#define _COPYSIGNf std::copysign
+#define _SCALBNd std::scalbn
+#define _SCALBNf std::scalbn
+#define _ABSd std::abs
+#define _ABSf std::abs
+#define _LOGBd std::logb
+#define _LOGBf std::logb
+#else
+#define _ISNANd __nv_isnand
+#define _ISNANf __nv_isnanf
+#define _ISINFd __nv_isinfd
+#define _ISINFf __nv_isinff
+#define _ISFINITEd __nv_isfinited
+#define _ISFINITEf __nv_finitef
+#define _COPYSIGNd __nv_copysign
+#define _COPYSIGNf __nv_copysignf
+#define _SCALBNd __nv_scalbn
+#define _SCALBNf __nv_scalbnf
+#define _ABSd __nv_fabs
+#define _ABSf __nv_fabsf
+#define _LOGBd __nv_logb
+#define _LOGBf __nv_logbf
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+__DEVICE__ double _Complex __muldc3(double __a, double __b, double __c,
+ double __d) {
double __ac = __a * __c;
double __bd = __b * __d;
double __ad = __a * __d;
@@ -24,50 +71,49 @@ extern "C" inline __device__ double _Complex __muldc3(double __a, double __b,
double _Complex z;
__real__(z) = __ac - __bd;
__imag__(z) = __ad + __bc;
- if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
+ if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {
int __recalc = 0;
- if (std::isinf(__a) || std::isinf(__b)) {
- __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
- __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
- if (std::isnan(__c))
- __c = std::copysign(0, __c);
- if (std::isnan(__d))
- __d = std::copysign(0, __d);
+ if (_ISINFd(__a) || _ISINFd(__b)) {
+ __a = _COPYSIGNd(_ISINFd(__a) ? 1 : 0, __a);
+ __b = _COPYSIGNd(_ISINFd(__b) ? 1 : 0, __b);
+ if (_ISNANd(__c))
+ __c = _COPYSIGNd(0, __c);
+ if (_ISNANd(__d))
+ __d = _COPYSIGNd(0, __d);
__recalc = 1;
}
- if (std::isinf(__c) || std::isinf(__d)) {
- __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
- __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
- if (std::isnan(__a))
- __a = std::copysign(0, __a);
- if (std::isnan(__b))
- __b = std::copysign(0, __b);
+ if (_ISINFd(__c) || _ISINFd(__d)) {
+ __c = _COPYSIGNd(_ISINFd(__c) ? 1 : 0, __c);
+ __d = _COPYSIGNd(_ISINFd(__d) ? 1 : 0, __d);
+ if (_ISNANd(__a))
+ __a = _COPYSIGNd(0, __a);
+ if (_ISNANd(__b))
+ __b = _COPYSIGNd(0, __b);
__recalc = 1;
}
- if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
- std::isinf(__ad) || std::isinf(__bc))) {
- if (std::isnan(__a))
- __a = std::copysign(0, __a);
- if (std::isnan(__b))
- __b = std::copysign(0, __b);
- if (std::isnan(__c))
- __c = std::copysign(0, __c);
- if (std::isnan(__d))
- __d = std::copysign(0, __d);
+ if (!__recalc &&
+ (_ISINFd(__ac) || _ISINFd(__bd) || _ISINFd(__ad) || _ISINFd(__bc))) {
+ if (_ISNANd(__a))
+ __a = _COPYSIGNd(0, __a);
+ if (_ISNANd(__b))
+ __b = _COPYSIGNd(0, __b);
+ if (_ISNANd(__c))
+ __c = _COPYSIGNd(0, __c);
+ if (_ISNANd(__d))
+ __d = _COPYSIGNd(0, __d);
__recalc = 1;
}
if (__recalc) {
// Can't use std::numeric_limits<double>::infinity() -- that doesn't have
// a device overload (and isn't constexpr before C++11, naturally).
- __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
- __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
+ __real__(z) = __builtin_huge_val() * (__a * __c - __b * __d);
+ __imag__(z) = __builtin_huge_val() * (__a * __d + __b * __c);
}
}
return z;
}
-extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b,
- float __c, float __d) {
+__DEVICE__ float _Complex __mulsc3(float __a, float __b, float __c, float __d) {
float __ac = __a * __c;
float __bd = __b * __d;
float __ad = __a * __d;
@@ -75,36 +121,36 @@ extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b,
float _Complex z;
__real__(z) = __ac - __bd;
__imag__(z) = __ad + __bc;
- if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
+ if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {
int __recalc = 0;
- if (std::isinf(__a) || std::isinf(__b)) {
- __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
- __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
- if (std::isnan(__c))
- __c = std::copysign(0, __c);
- if (std::isnan(__d))
- __d = std::copysign(0, __d);
+ if (_ISINFf(__a) || _ISINFf(__b)) {
+ __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);
+ __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);
+ if (_ISNANf(__c))
+ __c = _COPYSIGNf(0, __c);
+ if (_ISNANf(__d))
+ __d = _COPYSIGNf(0, __d);
__recalc = 1;
}
- if (std::isinf(__c) || std::isinf(__d)) {
- __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
- __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
- if (std::isnan(__a))
- __a = std::copysign(0, __a);
- if (std::isnan(__b))
- __b = std::copysign(0, __b);
+ if (_ISINFf(__c) || _ISINFf(__d)) {
+ __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);
+ __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);
+ if (_ISNANf(__a))
+ __a = _COPYSIGNf(0, __a);
+ if (_ISNANf(__b))
+ __b = _COPYSIGNf(0, __b);
__recalc = 1;
}
- if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
- std::isinf(__ad) || std::isinf(__bc))) {
- if (std::isnan(__a))
- __a = std::copysign(0, __a);
- if (std::isnan(__b))
- __b = std::copysign(0, __b);
- if (std::isnan(__c))
- __c = std::copysign(0, __c);
- if (std::isnan(__d))
- __d = std::copysign(0, __d);
+ if (!__recalc &&
+ (_ISINFf(__ac) || _ISINFf(__bd) || _ISINFf(__ad) || _ISINFf(__bc))) {
+ if (_ISNANf(__a))
+ __a = _COPYSIGNf(0, __a);
+ if (_ISNANf(__b))
+ __b = _COPYSIGNf(0, __b);
+ if (_ISNANf(__c))
+ __c = _COPYSIGNf(0, __c);
+ if (_ISNANf(__d))
+ __d = _COPYSIGNf(0, __d);
__recalc = 1;
}
if (__recalc) {
@@ -115,36 +161,36 @@ extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b,
return z;
}
-extern "C" inline __device__ double _Complex __divdc3(double __a, double __b,
- double __c, double __d) {
+__DEVICE__ double _Complex __divdc3(double __a, double __b, double __c,
+ double __d) {
int __ilogbw = 0;
// Can't use std::max, because that's defined in <algorithm>, and we don't
// want to pull that in for every compile. The CUDA headers define
// ::max(float, float) and ::max(double, double), which is sufficient for us.
- double __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
- if (std::isfinite(__logbw)) {
+ double __logbw = _LOGBd(max(_ABSd(__c), _ABSd(__d)));
+ if (_ISFINITEd(__logbw)) {
__ilogbw = (int)__logbw;
- __c = std::scalbn(__c, -__ilogbw);
- __d = std::scalbn(__d, -__ilogbw);
+ __c = _SCALBNd(__c, -__ilogbw);
+ __d = _SCALBNd(__d, -__ilogbw);
}
double __denom = __c * __c + __d * __d;
double _Complex z;
- __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
- __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
- if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
- if ((__denom == 0.0) && (!std::isnan(__a) || !std::isnan(__b))) {
- __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
- __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
- } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
- std::isfinite(__d)) {
- __a = std::copysign(std::isinf(__a) ? 1.0 : 0.0, __a);
- __b = std::copysign(std::isinf(__b) ? 1.0 : 0.0, __b);
- __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
- __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
- } else if (std::isinf(__logbw) && __logbw > 0.0 && std::isfinite(__a) &&
- std::isfinite(__b)) {
- __c = std::copysign(std::isinf(__c) ? 1.0 : 0.0, __c);
- __d = std::copysign(std::isinf(__d) ? 1.0 : 0.0, __d);
+ __real__(z) = _SCALBNd((__a * __c + __b * __d) / __denom, -__ilogbw);
+ __imag__(z) = _SCALBNd((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {
+ if ((__denom == 0.0) && (!_ISNANd(__a) || !_ISNANd(__b))) {
+ __real__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __a;
+ __imag__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __b;
+ } else if ((_ISINFd(__a) || _ISINFd(__b)) && _ISFINITEd(__c) &&
+ _ISFINITEd(__d)) {
+ __a = _COPYSIGNd(_ISINFd(__a) ? 1.0 : 0.0, __a);
+ __b = _COPYSIGNd(_ISINFd(__b) ? 1.0 : 0.0, __b);
+ __real__(z) = __builtin_huge_val() * (__a * __c + __b * __d);
+ __imag__(z) = __builtin_huge_val() * (__b * __c - __a * __d);
+ } else if (_ISINFd(__logbw) && __logbw > 0.0 && _ISFINITEd(__a) &&
+ _ISFINITEd(__b)) {
+ __c = _COPYSIGNd(_ISINFd(__c) ? 1.0 : 0.0, __c);
+ __d = _COPYSIGNd(_ISINFd(__d) ? 1.0 : 0.0, __d);
__real__(z) = 0.0 * (__a * __c + __b * __d);
__imag__(z) = 0.0 * (__b * __c - __a * __d);
}
@@ -152,33 +198,32 @@ extern "C" inline __device__ double _Complex __divdc3(double __a, double __b,
return z;
}
-extern "C" inline __device__ float _Complex __divsc3(float __a, float __b,
- float __c, float __d) {
+__DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) {
int __ilogbw = 0;
- float __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
- if (std::isfinite(__logbw)) {
+ float __logbw = _LOGBf(max(_ABSf(__c), _ABSf(__d)));
+ if (_ISFINITEf(__logbw)) {
__ilogbw = (int)__logbw;
- __c = std::scalbn(__c, -__ilogbw);
- __d = std::scalbn(__d, -__ilogbw);
+ __c = _SCALBNf(__c, -__ilogbw);
+ __d = _SCALBNf(__d, -__ilogbw);
}
float __denom = __c * __c + __d * __d;
float _Complex z;
- __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
- __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
- if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
- if ((__denom == 0) && (!std::isnan(__a) || !std::isnan(__b))) {
- __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
- __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
- } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
- std::isfinite(__d)) {
- __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
- __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
+ __real__(z) = _SCALBNf((__a * __c + __b * __d) / __denom, -__ilogbw);
+ __imag__(z) = _SCALBNf((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {
+ if ((__denom == 0) && (!_ISNANf(__a) || !_ISNANf(__b))) {
+ __real__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __a;
+ __imag__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __b;
+ } else if ((_ISINFf(__a) || _ISINFf(__b)) && _ISFINITEf(__c) &&
+ _ISFINITEf(__d)) {
+ __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);
+ __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);
__real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
__imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
- } else if (std::isinf(__logbw) && __logbw > 0 && std::isfinite(__a) &&
- std::isfinite(__b)) {
- __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
- __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
+ } else if (_ISINFf(__logbw) && __logbw > 0 && _ISFINITEf(__a) &&
+ _ISFINITEf(__b)) {
+ __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);
+ __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);
__real__(z) = 0 * (__a * __c + __b * __d);
__imag__(z) = 0 * (__b * __c - __a * __d);
}
@@ -186,4 +231,29 @@ extern "C" inline __device__ float _Complex __divsc3(float __a, float __b,
return z;
}
+#if defined(__cplusplus)
+} // extern "C"
+#endif
+
+#undef _ISNANd
+#undef _ISNANf
+#undef _ISINFd
+#undef _ISINFf
+#undef _COPYSIGNd
+#undef _COPYSIGNf
+#undef _ISFINITEd
+#undef _ISFINITEf
+#undef _SCALBNd
+#undef _SCALBNf
+#undef _ABSd
+#undef _ABSf
+#undef _LOGBd
+#undef _LOGBf
+
+#ifdef _OPENMP
+#pragma omp end declare target
+#endif
+
+#pragma pop_macro("__DEVICE__")
+
#endif // __CLANG_CUDA_COMPLEX_BUILTINS
diff --git a/clang/lib/Headers/__clang_cuda_device_functions.h b/clang/lib/Headers/__clang_cuda_device_functions.h
index 50ad674f9483..f801e5426aa4 100644
--- a/clang/lib/Headers/__clang_cuda_device_functions.h
+++ b/clang/lib/Headers/__clang_cuda_device_functions.h
@@ -10,7 +10,7 @@
#ifndef __CLANG_CUDA_DEVICE_FUNCTIONS_H__
#define __CLANG_CUDA_DEVICE_FUNCTIONS_H__
-#ifndef _OPENMP
+#ifndef __OPENMP_NVPTX__
#if CUDA_VERSION < 9000
#error This file is intended to be used with CUDA-9+ only.
#endif
@@ -20,32 +20,12 @@
// we implement in this file. We need static in order to avoid emitting unused
// functions and __forceinline__ helps inlining these wrappers at -O1.
#pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
+#ifdef __OPENMP_NVPTX__
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
#else
#define __DEVICE__ static __device__ __forceinline__
#endif
-// libdevice provides fast low precision and slow full-recision implementations
-// for some functions. Which one gets selected depends on
-// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
-// -ffast-math or -fcuda-approx-transcendentals are in effect.
-#pragma push_macro("__FAST_OR_SLOW")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __FAST_OR_SLOW(fast, slow) fast
-#else
-#define __FAST_OR_SLOW(fast, slow) slow
-#endif
-
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
__DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); }
__DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); }
__DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); }
@@ -359,10 +339,10 @@ __DEVICE__ int __iAtomicAdd(int *__p, int __v) {
return __nvvm_atom_add_gen_i(__p, __v);
}
__DEVICE__ int __iAtomicAdd_block(int *__p, int __v) {
- __nvvm_atom_cta_add_gen_i(__p, __v);
+ return __nvvm_atom_cta_add_gen_i(__p, __v);
}
__DEVICE__ int __iAtomicAdd_system(int *__p, int __v) {
- __nvvm_atom_sys_add_gen_i(__p, __v);
+ return __nvvm_atom_sys_add_gen_i(__p, __v);
}
__DEVICE__ int __iAtomicAnd(int *__p, int __v) {
return __nvvm_atom_and_gen_i(__p, __v);
@@ -1483,152 +1463,17 @@ __DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) {
return r;
}
#endif // CUDA_VERSION >= 9020
-__DEVICE__ int abs(int __a) __NOEXCEPT { return __nv_abs(__a); }
-__DEVICE__ double fabs(double __a) __NOEXCEPT { return __nv_fabs(__a); }
-__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
-__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
-__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
-__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
-__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
-__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
-__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
-__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
-__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
-__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
-__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
-__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
-__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
-__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
-__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
-__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
-__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
-__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
-#ifndef _OPENMP
-__DEVICE__ int clock() { return __nvvm_read_ptx_sreg_clock(); }
-__DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }
-#endif
-__DEVICE__ double copysign(double __a, double __b) {
- return __nv_copysign(__a, __b);
-}
-__DEVICE__ float copysignf(float __a, float __b) {
- return __nv_copysignf(__a, __b);
-}
-__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
-__DEVICE__ float cosf(float __a) {
- return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
-}
-__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
-__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
-__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
-__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
-__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
-__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
-__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
-__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
-__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
-__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
-__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
-__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
-__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
-__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
-__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
-__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
-__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
-__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
-__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
-__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
-__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
-__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
-__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
-__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
-__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
-__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
-__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
-__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
-__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
-__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
-__DEVICE__ float fdividef(float __a, float __b) {
-#if __FAST_MATH__ && !__CUDA_PREC_DIV
- return __nv_fast_fdividef(__a, __b);
-#else
- return __a / __b;
-#endif
-}
-__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
-__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
-__DEVICE__ double fma(double __a, double __b, double __c) {
- return __nv_fma(__a, __b, __c);
-}
-__DEVICE__ float fmaf(float __a, float __b, float __c) {
- return __nv_fmaf(__a, __b, __c);
-}
-__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
-__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
-__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
-__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
-__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
-__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
-__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
-__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
-__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
-__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
-__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
-__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
-__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
-__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
-__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
-__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
-__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
-__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_llabs(__a); };
-#else
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_abs(__a); };
-#endif
-__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
-__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
-__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
-__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
-__DEVICE__ long long llabs(long long __a) __NOEXCEPT { return __nv_llabs(__a); }
-__DEVICE__ long long llmax(long long __a, long long __b) {
- return __nv_llmax(__a, __b);
-}
-__DEVICE__ long long llmin(long long __a, long long __b) {
- return __nv_llmin(__a, __b);
-}
-__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
-__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
-__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
-__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
-__DEVICE__ double log(double __a) { return __nv_log(__a); }
-__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
-__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
-__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
-__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
-__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
-__DEVICE__ float log2f(float __a) {
- return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
-}
-__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
-__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
-__DEVICE__ float logf(float __a) {
- return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
-}
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long lrint(double __a) { return llrint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
-__DEVICE__ long lround(double __a) { return llround(__a); }
-__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
-#else
-__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
-__DEVICE__ long lround(double __a) { return round(__a); }
-__DEVICE__ long lroundf(float __a) { return roundf(__a); }
+
+// For OpenMP we require the user to include <time.h> as we need to know what
+// clock_t is on the system.
+#ifndef __OPENMP_NVPTX__
+__DEVICE__ /* clock_t= */ int clock() { return __nvvm_read_ptx_sreg_clock(); }
#endif
-__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+__DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }
+
// These functions shouldn't be declared when including this header
// for math function resolution purposes.
-#ifndef _OPENMP
+#ifndef __OPENMP_NVPTX__
__DEVICE__ void *memcpy(void *__a, const void *__b, size_t __c) {
return __builtin_memcpy(__a, __b, __c);
}
@@ -1636,158 +1481,6 @@ __DEVICE__ void *memset(void *__a, int __b, size_t __c) {
return __builtin_memset(__a, __b, __c);
}
#endif
-__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
-__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
-__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
-__DEVICE__ double nextafter(double __a, double __b) {
- return __nv_nextafter(__a, __b);
-}
-__DEVICE__ float nextafterf(float __a, float __b) {
- return __nv_nextafterf(__a, __b);
-}
-__DEVICE__ double norm(int __dim, const double *__t) {
- return __nv_norm(__dim, __t);
-}
-__DEVICE__ double norm3d(double __a, double __b, double __c) {
- return __nv_norm3d(__a, __b, __c);
-}
-__DEVICE__ float norm3df(float __a, float __b, float __c) {
- return __nv_norm3df(__a, __b, __c);
-}
-__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
- return __nv_norm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
- return __nv_norm4df(__a, __b, __c, __d);
-}
-__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
-__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
-__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
-__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
-__DEVICE__ float normf(int __dim, const float *__t) {
- return __nv_normf(__dim, __t);
-}
-__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
-__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
-__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
-__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
-__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
-__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
-__DEVICE__ double remainder(double __a, double __b) {
- return __nv_remainder(__a, __b);
-}
-__DEVICE__ float remainderf(float __a, float __b) {
- return __nv_remainderf(__a, __b);
-}
-__DEVICE__ double remquo(double __a, double __b, int *__c) {
- return __nv_remquo(__a, __b, __c);
-}
-__DEVICE__ float remquof(float __a, float __b, int *__c) {
- return __nv_remquof(__a, __b, __c);
-}
-__DEVICE__ double rhypot(double __a, double __b) {
- return __nv_rhypot(__a, __b);
-}
-__DEVICE__ float rhypotf(float __a, float __b) {
- return __nv_rhypotf(__a, __b);
-}
-__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
-__DEVICE__ double rnorm(int __a, const double *__b) {
- return __nv_rnorm(__a, __b);
-}
-__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
- return __nv_rnorm3d(__a, __b, __c);
-}
-__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
- return __nv_rnorm3df(__a, __b, __c);
-}
-__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
- return __nv_rnorm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
- return __nv_rnorm4df(__a, __b, __c, __d);
-}
-__DEVICE__ float rnormf(int __dim, const float *__t) {
- return __nv_rnormf(__dim, __t);
-}
-__DEVICE__ double round(double __a) { return __nv_round(__a); }
-__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
-__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
-__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
-__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
-__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
-// TODO: remove once variant is supported
-#ifndef _OPENMP
-__DEVICE__ double scalbln(double __a, long __b) {
- if (__b > INT_MAX)
- return __a > 0 ? HUGE_VAL : -HUGE_VAL;
- if (__b < INT_MIN)
- return __a > 0 ? 0.0 : -0.0;
- return scalbn(__a, (int)__b);
-}
-__DEVICE__ float scalblnf(float __a, long __b) {
- if (__b > INT_MAX)
- return __a > 0 ? HUGE_VALF : -HUGE_VALF;
- if (__b < INT_MIN)
- return __a > 0 ? 0.f : -0.f;
- return scalbnf(__a, (int)__b);
-}
-#endif
-__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
-__DEVICE__ void sincos(double __a, double *__s, double *__c) {
- return __nv_sincos(__a, __s, __c);
-}
-__DEVICE__ void sincosf(float __a, float *__s, float *__c) {
- return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
-}
-__DEVICE__ void sincospi(double __a, double *__s, double *__c) {
- return __nv_sincospi(__a, __s, __c);
-}
-__DEVICE__ void sincospif(float __a, float *__s, float *__c) {
- return __nv_sincospif(__a, __s, __c);
-}
-__DEVICE__ float sinf(float __a) {
- return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
-}
-__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
-__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
-__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
-__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
-__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
-__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
-__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
-__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
-__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
-__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
-__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
-__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
-__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
-__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
-__DEVICE__ unsigned long long ullmax(unsigned long long __a,
- unsigned long long __b) {
- return __nv_ullmax(__a, __b);
-}
-__DEVICE__ unsigned long long ullmin(unsigned long long __a,
- unsigned long long __b) {
- return __nv_ullmin(__a, __b);
-}
-__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
- return __nv_umax(__a, __b);
-}
-__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
- return __nv_umin(__a, __b);
-}
-__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
-__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
-__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
-__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
-__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
-__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
-#undef __NOEXCEPT
#pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__FAST_OR_SLOW")
#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/clang/lib/Headers/__clang_cuda_intrinsics.h b/clang/lib/Headers/__clang_cuda_intrinsics.h
index b67461a146fc..c7bff6a9d8fe 100644
--- a/clang/lib/Headers/__clang_cuda_intrinsics.h
+++ b/clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -45,7 +45,7 @@
_Static_assert(sizeof(__val) == sizeof(__Bits)); \
_Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \
__Bits __tmp; \
- memcpy(&__val, &__tmp, sizeof(__val)); \
+ memcpy(&__tmp, &__val, sizeof(__val)); \
__tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \
__tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \
long long __ret; \
@@ -129,7 +129,7 @@ __MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f,
_Static_assert(sizeof(__val) == sizeof(__Bits)); \
_Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \
__Bits __tmp; \
- memcpy(&__val, &__tmp, sizeof(__val)); \
+ memcpy(&__tmp, &__val, sizeof(__val)); \
__tmp.__a = ::__FnName(__mask, __tmp.__a, __offset, __width); \
__tmp.__b = ::__FnName(__mask, __tmp.__b, __offset, __width); \
long long __ret; \
diff --git a/clang/lib/Headers/__clang_cuda_libdevice_declares.h b/clang/lib/Headers/__clang_cuda_libdevice_declares.h
index 4d70353394c8..6173b589e3ef 100644
--- a/clang/lib/Headers/__clang_cuda_libdevice_declares.h
+++ b/clang/lib/Headers/__clang_cuda_libdevice_declares.h
@@ -14,7 +14,7 @@
extern "C" {
#endif
-#if defined(_OPENMP)
+#if defined(__OPENMP_NVPTX__)
#define __DEVICE__
#elif defined(__CUDA__)
#define __DEVICE__ __device__
diff --git a/clang/lib/Headers/__clang_cuda_math.h b/clang/lib/Headers/__clang_cuda_math.h
new file mode 100644
index 000000000000..332e616702ac
--- /dev/null
+++ b/clang/lib/Headers/__clang_cuda_math.h
@@ -0,0 +1,347 @@
+/*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_CUDA_MATH_H__
+#define __CLANG_CUDA_MATH_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+#ifndef __OPENMP_NVPTX__
+#if CUDA_VERSION < 9000
+#error This file is intended to be used with CUDA-9+ only.
+#endif
+#endif
+
+// __DEVICE__ is a helper macro with common set of attributes for the wrappers
+// we implement in this file. We need static in order to avoid emitting unused
+// functions and __forceinline__ helps inlining these wrappers at -O1.
+#pragma push_macro("__DEVICE__")
+#ifdef __OPENMP_NVPTX__
+#if defined(__cplusplus)
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
+#endif
+#else
+#define __DEVICE__ static __device__ __forceinline__
+#endif
+
+// Specialized version of __DEVICE__ for functions with void return type. Needed
+// because the OpenMP overlay requires constexpr functions here but prior to
+// c++14 void return functions could not be constexpr.
+#pragma push_macro("__DEVICE_VOID__")
+#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
+#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE_VOID__ __DEVICE__
+#endif
+
+// libdevice provides fast low precision and slow full-recision implementations
+// for some functions. Which one gets selected depends on
+// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
+// -ffast-math or -fcuda-approx-transcendentals are in effect.
+#pragma push_macro("__FAST_OR_SLOW")
+#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
+#define __FAST_OR_SLOW(fast, slow) fast
+#else
+#define __FAST_OR_SLOW(fast, slow) slow
+#endif
+
+__DEVICE__ int abs(int __a) { return __nv_abs(__a); }
+__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }
+__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
+__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
+__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
+__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
+__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
+__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
+__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
+__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
+__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
+__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
+__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
+__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
+__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
+__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
+__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
+__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
+__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
+__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
+__DEVICE__ double copysign(double __a, double __b) {
+ return __nv_copysign(__a, __b);
+}
+__DEVICE__ float copysignf(float __a, float __b) {
+ return __nv_copysignf(__a, __b);
+}
+__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
+__DEVICE__ float cosf(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
+}
+__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
+__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
+__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
+__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
+__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
+__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
+__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
+__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
+__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
+__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
+__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
+__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
+__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
+__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
+__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
+__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
+__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
+__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
+__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
+__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
+__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
+__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
+__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
+__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
+__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
+__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
+__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
+__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
+__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
+__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
+__DEVICE__ float fdividef(float __a, float __b) {
+#if __FAST_MATH__ && !__CUDA_PREC_DIV
+ return __nv_fast_fdividef(__a, __b);
+#else
+ return __a / __b;
+#endif
+}
+__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
+__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
+__DEVICE__ double fma(double __a, double __b, double __c) {
+ return __nv_fma(__a, __b, __c);
+}
+__DEVICE__ float fmaf(float __a, float __b, float __c) {
+ return __nv_fmaf(__a, __b, __c);
+}
+__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
+__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
+__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
+__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
+__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
+__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
+__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
+__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
+__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
+__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
+__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
+__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
+__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
+__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
+__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
+__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
+__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
+__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
+#if defined(__LP64__) || defined(_WIN64)
+__DEVICE__ long labs(long __a) { return __nv_llabs(__a); };
+#else
+__DEVICE__ long labs(long __a) { return __nv_abs(__a); };
+#endif
+__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
+__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
+__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
+__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
+__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }
+__DEVICE__ long long llmax(long long __a, long long __b) {
+ return __nv_llmax(__a, __b);
+}
+__DEVICE__ long long llmin(long long __a, long long __b) {
+ return __nv_llmin(__a, __b);
+}
+__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
+__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
+__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
+__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
+__DEVICE__ double log(double __a) { return __nv_log(__a); }
+__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
+__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
+__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
+__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
+__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
+__DEVICE__ float log2f(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
+}
+__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
+__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
+__DEVICE__ float logf(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
+}
+#if defined(__LP64__) || defined(_WIN64)
+__DEVICE__ long lrint(double __a) { return llrint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
+__DEVICE__ long lround(double __a) { return llround(__a); }
+__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
+#else
+__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
+__DEVICE__ long lround(double __a) { return round(__a); }
+__DEVICE__ long lroundf(float __a) { return roundf(__a); }
+#endif
+__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
+__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
+__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
+__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
+__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
+__DEVICE__ double nextafter(double __a, double __b) {
+ return __nv_nextafter(__a, __b);
+}
+__DEVICE__ float nextafterf(float __a, float __b) {
+ return __nv_nextafterf(__a, __b);
+}
+__DEVICE__ double norm(int __dim, const double *__t) {
+ return __nv_norm(__dim, __t);
+}
+__DEVICE__ double norm3d(double __a, double __b, double __c) {
+ return __nv_norm3d(__a, __b, __c);
+}
+__DEVICE__ float norm3df(float __a, float __b, float __c) {
+ return __nv_norm3df(__a, __b, __c);
+}
+__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
+ return __nv_norm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
+ return __nv_norm4df(__a, __b, __c, __d);
+}
+__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
+__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
+__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
+__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
+__DEVICE__ float normf(int __dim, const float *__t) {
+ return __nv_normf(__dim, __t);
+}
+__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
+__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
+__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
+__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
+__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
+__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
+__DEVICE__ double remainder(double __a, double __b) {
+ return __nv_remainder(__a, __b);
+}
+__DEVICE__ float remainderf(float __a, float __b) {
+ return __nv_remainderf(__a, __b);
+}
+__DEVICE__ double remquo(double __a, double __b, int *__c) {
+ return __nv_remquo(__a, __b, __c);
+}
+__DEVICE__ float remquof(float __a, float __b, int *__c) {
+ return __nv_remquof(__a, __b, __c);
+}
+__DEVICE__ double rhypot(double __a, double __b) {
+ return __nv_rhypot(__a, __b);
+}
+__DEVICE__ float rhypotf(float __a, float __b) {
+ return __nv_rhypotf(__a, __b);
+}
+__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
+__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
+__DEVICE__ double rnorm(int __a, const double *__b) {
+ return __nv_rnorm(__a, __b);
+}
+__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
+ return __nv_rnorm3d(__a, __b, __c);
+}
+__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
+ return __nv_rnorm3df(__a, __b, __c);
+}
+__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
+ return __nv_rnorm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
+ return __nv_rnorm4df(__a, __b, __c, __d);
+}
+__DEVICE__ float rnormf(int __dim, const float *__t) {
+ return __nv_rnormf(__dim, __t);
+}
+__DEVICE__ double round(double __a) { return __nv_round(__a); }
+__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
+__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
+__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
+__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
+__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
+__DEVICE__ double scalbln(double __a, long __b) {
+ if (__b > INT_MAX)
+ return __a > 0 ? HUGE_VAL : -HUGE_VAL;
+ if (__b < INT_MIN)
+ return __a > 0 ? 0.0 : -0.0;
+ return scalbn(__a, (int)__b);
+}
+__DEVICE__ float scalblnf(float __a, long __b) {
+ if (__b > INT_MAX)
+ return __a > 0 ? HUGE_VALF : -HUGE_VALF;
+ if (__b < INT_MIN)
+ return __a > 0 ? 0.f : -0.f;
+ return scalbnf(__a, (int)__b);
+}
+__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
+__DEVICE_VOID__ void sincos(double __a, double *__s, double *__c) {
+ return __nv_sincos(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincosf(float __a, float *__s, float *__c) {
+ return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c) {
+ return __nv_sincospi(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c) {
+ return __nv_sincospif(__a, __s, __c);
+}
+__DEVICE__ float sinf(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
+}
+__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
+__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
+__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
+__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
+__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
+__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
+__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
+__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
+__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
+__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
+__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
+__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
+__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
+__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
+__DEVICE__ unsigned long long ullmax(unsigned long long __a,
+ unsigned long long __b) {
+ return __nv_ullmax(__a, __b);
+}
+__DEVICE__ unsigned long long ullmin(unsigned long long __a,
+ unsigned long long __b) {
+ return __nv_ullmin(__a, __b);
+}
+__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
+ return __nv_umax(__a, __b);
+}
+__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
+ return __nv_umin(__a, __b);
+}
+__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
+__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
+__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
+__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
+__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
+__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
+
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__DEVICE_VOID__")
+#pragma pop_macro("__FAST_OR_SLOW")
+
+#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/clang/lib/Headers/__clang_cuda_math_forward_declares.h b/clang/lib/Headers/__clang_cuda_math_forward_declares.h
index 0afe4db556db..8a270859e4a5 100644
--- a/clang/lib/Headers/__clang_cuda_math_forward_declares.h
+++ b/clang/lib/Headers/__clang_cuda_math_forward_declares.h
@@ -8,8 +8,8 @@
*/
#ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
#define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
+#if !defined(__CUDA__) && !__HIP__
+#error "This file is for CUDA/HIP compilation only."
#endif
// This file forward-declares of some math functions we (or the CUDA headers)
@@ -20,37 +20,14 @@
// would preclude the use of our own __device__ overloads for these functions.
#pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __inline__ __attribute__((always_inline))
-#else
#define __DEVICE__ \
static __inline__ __attribute__((always_inline)) __attribute__((device))
-#endif
-
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-#if !(defined(_OPENMP) && defined(__cplusplus))
__DEVICE__ long abs(long);
__DEVICE__ long long abs(long long);
__DEVICE__ double abs(double);
__DEVICE__ float abs(float);
-#endif
-// While providing the CUDA declarations and definitions for math functions,
-// we may manually define additional functions.
-// TODO: Once variant is supported the additional functions will have
-// to be removed.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const double abs(const double);
-__DEVICE__ const float abs(const float);
-#endif
-__DEVICE__ int abs(int) __NOEXCEPT;
+__DEVICE__ int abs(int);
__DEVICE__ double acos(double);
__DEVICE__ float acos(float);
__DEVICE__ double acosh(double);
@@ -85,8 +62,8 @@ __DEVICE__ double exp(double);
__DEVICE__ float exp(float);
__DEVICE__ double expm1(double);
__DEVICE__ float expm1(float);
-__DEVICE__ double fabs(double) __NOEXCEPT;
-__DEVICE__ float fabs(float) __NOEXCEPT;
+__DEVICE__ double fabs(double);
+__DEVICE__ float fabs(float);
__DEVICE__ double fdim(double, double);
__DEVICE__ float fdim(float, float);
__DEVICE__ double floor(double);
@@ -136,12 +113,12 @@ __DEVICE__ bool isnormal(double);
__DEVICE__ bool isnormal(float);
__DEVICE__ bool isunordered(double, double);
__DEVICE__ bool isunordered(float, float);
-__DEVICE__ long labs(long) __NOEXCEPT;
+__DEVICE__ long labs(long);
__DEVICE__ double ldexp(double, int);
__DEVICE__ float ldexp(float, int);
__DEVICE__ double lgamma(double);
__DEVICE__ float lgamma(float);
-__DEVICE__ long long llabs(long long) __NOEXCEPT;
+__DEVICE__ long long llabs(long long);
__DEVICE__ long long llrint(double);
__DEVICE__ long long llrint(float);
__DEVICE__ double log10(double);
@@ -152,9 +129,6 @@ __DEVICE__ double log2(double);
__DEVICE__ float log2(float);
__DEVICE__ double logb(double);
__DEVICE__ float logb(float);
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ long double log(long double);
-#endif
__DEVICE__ double log(double);
__DEVICE__ float log(float);
__DEVICE__ long lrint(double);
@@ -302,7 +276,6 @@ _GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
-#undef __NOEXCEPT
#pragma pop_macro("__DEVICE__")
#endif
diff --git a/clang/lib/Headers/__clang_cuda_runtime_wrapper.h b/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
index 3e362dd967db..f43ed55de489 100644
--- a/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -31,11 +31,17 @@
// Include some forward declares that must come before cmath.
#include <__clang_cuda_math_forward_declares.h>
+// Define __CUDACC__ early as libstdc++ standard headers with GNU extensions
+// enabled depend on it to avoid using __float128, which is unsupported in
+// CUDA.
+#define __CUDACC__
+
// Include some standard headers to avoid CUDA headers including them
// while some required macros (like __THROW) are in a weird state.
#include <cmath>
#include <cstdlib>
#include <stdlib.h>
+#undef __CUDACC__
// Preserve common macros that will be changed below by us or by CUDA
// headers.
@@ -48,7 +54,7 @@
#include "cuda.h"
#if !defined(CUDA_VERSION)
#error "cuda.h did not define CUDA_VERSION"
-#elif CUDA_VERSION < 7000 || CUDA_VERSION > 10010
+#elif CUDA_VERSION < 7000
#error "Unsupported CUDA version!"
#endif
@@ -83,13 +89,15 @@
#if CUDA_VERSION < 9000
#define __CUDABE__
#else
+#define __CUDACC__
#define __CUDA_LIBDEVICE__
#endif
// Disables definitions of device-side runtime support stubs in
// cuda_device_runtime_api.h
+#include "host_defines.h"
+#undef __CUDACC__
#include "driver_types.h"
#include "host_config.h"
-#include "host_defines.h"
// Temporarily replace "nv_weak" with weak, so __attribute__((nv_weak)) in
// cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the
@@ -141,11 +149,12 @@ inline __host__ double __signbitd(double x) {
// to provide our own.
#include <__clang_cuda_libdevice_declares.h>
-// Wrappers for many device-side standard library functions became compiler
-// builtins in CUDA-9 and have been removed from the CUDA headers. Clang now
-// provides its own implementation of the wrappers.
+// Wrappers for many device-side standard library functions, incl. math
+// functions, became compiler builtins in CUDA-9 and have been removed from the
+// CUDA headers. Clang now provides its own implementation of the wrappers.
#if CUDA_VERSION >= 9000
#include <__clang_cuda_device_functions.h>
+#include <__clang_cuda_math.h>
#endif
// __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's
diff --git a/clang/lib/Headers/__clang_hip_libdevice_declares.h b/clang/lib/Headers/__clang_hip_libdevice_declares.h
new file mode 100644
index 000000000000..e1cd49a39c65
--- /dev/null
+++ b/clang/lib/Headers/__clang_hip_libdevice_declares.h
@@ -0,0 +1,326 @@
+/*===---- __clang_hip_libdevice_declares.h - HIP device library decls -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
+#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
+
+extern "C" {
+
+// BEGIN FLOAT
+__device__ __attribute__((const)) float __ocml_acos_f32(float);
+__device__ __attribute__((pure)) float __ocml_acosh_f32(float);
+__device__ __attribute__((const)) float __ocml_asin_f32(float);
+__device__ __attribute__((pure)) float __ocml_asinh_f32(float);
+__device__ __attribute__((const)) float __ocml_atan2_f32(float, float);
+__device__ __attribute__((const)) float __ocml_atan_f32(float);
+__device__ __attribute__((pure)) float __ocml_atanh_f32(float);
+__device__ __attribute__((pure)) float __ocml_cbrt_f32(float);
+__device__ __attribute__((const)) float __ocml_ceil_f32(float);
+__device__ __attribute__((const)) __device__ float __ocml_copysign_f32(float,
+ float);
+__device__ float __ocml_cos_f32(float);
+__device__ float __ocml_native_cos_f32(float);
+__device__ __attribute__((pure)) __device__ float __ocml_cosh_f32(float);
+__device__ float __ocml_cospi_f32(float);
+__device__ float __ocml_i0_f32(float);
+__device__ float __ocml_i1_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfc_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfcinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfcx_f32(float);
+__device__ __attribute__((pure)) float __ocml_erf_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp10_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_exp10_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp2_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_exp_f32(float);
+__device__ __attribute__((pure)) float __ocml_expm1_f32(float);
+__device__ __attribute__((const)) float __ocml_fabs_f32(float);
+__device__ __attribute__((const)) float __ocml_fdim_f32(float, float);
+__device__ __attribute__((const)) float __ocml_floor_f32(float);
+__device__ __attribute__((const)) float __ocml_fma_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fmax_f32(float, float);
+__device__ __attribute__((const)) float __ocml_fmin_f32(float, float);
+__device__ __attribute__((const)) __device__ float __ocml_fmod_f32(float,
+ float);
+__device__ float __ocml_frexp_f32(float,
+ __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) float __ocml_hypot_f32(float, float);
+__device__ __attribute__((const)) int __ocml_ilogb_f32(float);
+__device__ __attribute__((const)) int __ocml_isfinite_f32(float);
+__device__ __attribute__((const)) int __ocml_isinf_f32(float);
+__device__ __attribute__((const)) int __ocml_isnan_f32(float);
+__device__ float __ocml_j0_f32(float);
+__device__ float __ocml_j1_f32(float);
+__device__ __attribute__((const)) float __ocml_ldexp_f32(float, int);
+__device__ float __ocml_lgamma_f32(float);
+__device__ __attribute__((pure)) float __ocml_log10_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log10_f32(float);
+__device__ __attribute__((pure)) float __ocml_log1p_f32(float);
+__device__ __attribute__((pure)) float __ocml_log2_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log2_f32(float);
+__device__ __attribute__((const)) float __ocml_logb_f32(float);
+__device__ __attribute__((pure)) float __ocml_log_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log_f32(float);
+__device__ float __ocml_modf_f32(float,
+ __attribute__((address_space(5))) float *);
+__device__ __attribute__((const)) float __ocml_nearbyint_f32(float);
+__device__ __attribute__((const)) float __ocml_nextafter_f32(float, float);
+__device__ __attribute__((const)) float __ocml_len3_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_len4_f32(float, float, float,
+ float);
+__device__ __attribute__((pure)) float __ocml_ncdf_f32(float);
+__device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_pow_f32(float, float);
+__device__ __attribute__((pure)) float __ocml_rcbrt_f32(float);
+__device__ __attribute__((const)) float __ocml_remainder_f32(float, float);
+__device__ float __ocml_remquo_f32(float, float,
+ __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) float __ocml_rhypot_f32(float, float);
+__device__ __attribute__((const)) float __ocml_rint_f32(float);
+__device__ __attribute__((const)) float __ocml_rlen3_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_rlen4_f32(float, float, float,
+ float);
+__device__ __attribute__((const)) float __ocml_round_f32(float);
+__device__ __attribute__((pure)) float __ocml_rsqrt_f32(float);
+__device__ __attribute__((const)) float __ocml_scalb_f32(float, float);
+__device__ __attribute__((const)) float __ocml_scalbn_f32(float, int);
+__device__ __attribute__((const)) int __ocml_signbit_f32(float);
+__device__ float __ocml_sincos_f32(float,
+ __attribute__((address_space(5))) float *);
+__device__ float __ocml_sincospi_f32(float,
+ __attribute__((address_space(5))) float *);
+__device__ float __ocml_sin_f32(float);
+__device__ float __ocml_native_sin_f32(float);
+__device__ __attribute__((pure)) float __ocml_sinh_f32(float);
+__device__ float __ocml_sinpi_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_f32(float);
+__device__ __attribute__((const)) float __ocml_native_sqrt_f32(float);
+__device__ float __ocml_tan_f32(float);
+__device__ __attribute__((pure)) float __ocml_tanh_f32(float);
+__device__ float __ocml_tgamma_f32(float);
+__device__ __attribute__((const)) float __ocml_trunc_f32(float);
+__device__ float __ocml_y0_f32(float);
+__device__ float __ocml_y1_f32(float);
+
+// BEGIN INTRINSICS
+__device__ __attribute__((const)) float __ocml_add_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
+
+__device__ __attribute__((const)) float
+__llvm_amdgcn_cos_f32(float) __asm("llvm.amdgcn.cos.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_rcp_f32(float) __asm("llvm.amdgcn.rcp.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_rsq_f32(float) __asm("llvm.amdgcn.rsq.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_sin_f32(float) __asm("llvm.amdgcn.sin.f32");
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__device__ __attribute__((const)) double __ocml_acos_f64(double);
+__device__ __attribute__((pure)) double __ocml_acosh_f64(double);
+__device__ __attribute__((const)) double __ocml_asin_f64(double);
+__device__ __attribute__((pure)) double __ocml_asinh_f64(double);
+__device__ __attribute__((const)) double __ocml_atan2_f64(double, double);
+__device__ __attribute__((const)) double __ocml_atan_f64(double);
+__device__ __attribute__((pure)) double __ocml_atanh_f64(double);
+__device__ __attribute__((pure)) double __ocml_cbrt_f64(double);
+__device__ __attribute__((const)) double __ocml_ceil_f64(double);
+__device__ __attribute__((const)) double __ocml_copysign_f64(double, double);
+__device__ double __ocml_cos_f64(double);
+__device__ __attribute__((pure)) double __ocml_cosh_f64(double);
+__device__ double __ocml_cospi_f64(double);
+__device__ double __ocml_i0_f64(double);
+__device__ double __ocml_i1_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfc_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfcinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfcx_f64(double);
+__device__ __attribute__((pure)) double __ocml_erf_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp10_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp2_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp_f64(double);
+__device__ __attribute__((pure)) double __ocml_expm1_f64(double);
+__device__ __attribute__((const)) double __ocml_fabs_f64(double);
+__device__ __attribute__((const)) double __ocml_fdim_f64(double, double);
+__device__ __attribute__((const)) double __ocml_floor_f64(double);
+__device__ __attribute__((const)) double __ocml_fma_f64(double, double, double);
+__device__ __attribute__((const)) double __ocml_fmax_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fmin_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fmod_f64(double, double);
+__device__ double __ocml_frexp_f64(double,
+ __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) double __ocml_hypot_f64(double, double);
+__device__ __attribute__((const)) int __ocml_ilogb_f64(double);
+__device__ __attribute__((const)) int __ocml_isfinite_f64(double);
+__device__ __attribute__((const)) int __ocml_isinf_f64(double);
+__device__ __attribute__((const)) int __ocml_isnan_f64(double);
+__device__ double __ocml_j0_f64(double);
+__device__ double __ocml_j1_f64(double);
+__device__ __attribute__((const)) double __ocml_ldexp_f64(double, int);
+__device__ double __ocml_lgamma_f64(double);
+__device__ __attribute__((pure)) double __ocml_log10_f64(double);
+__device__ __attribute__((pure)) double __ocml_log1p_f64(double);
+__device__ __attribute__((pure)) double __ocml_log2_f64(double);
+__device__ __attribute__((const)) double __ocml_logb_f64(double);
+__device__ __attribute__((pure)) double __ocml_log_f64(double);
+__device__ double __ocml_modf_f64(double,
+ __attribute__((address_space(5))) double *);
+__device__ __attribute__((const)) double __ocml_nearbyint_f64(double);
+__device__ __attribute__((const)) double __ocml_nextafter_f64(double, double);
+__device__ __attribute__((const)) double __ocml_len3_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_len4_f64(double, double, double,
+ double);
+__device__ __attribute__((pure)) double __ocml_ncdf_f64(double);
+__device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_pow_f64(double, double);
+__device__ __attribute__((pure)) double __ocml_rcbrt_f64(double);
+__device__ __attribute__((const)) double __ocml_remainder_f64(double, double);
+__device__ double __ocml_remquo_f64(double, double,
+ __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) double __ocml_rhypot_f64(double, double);
+__device__ __attribute__((const)) double __ocml_rint_f64(double);
+__device__ __attribute__((const)) double __ocml_rlen3_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_rlen4_f64(double, double,
+ double, double);
+__device__ __attribute__((const)) double __ocml_round_f64(double);
+__device__ __attribute__((pure)) double __ocml_rsqrt_f64(double);
+__device__ __attribute__((const)) double __ocml_scalb_f64(double, double);
+__device__ __attribute__((const)) double __ocml_scalbn_f64(double, int);
+__device__ __attribute__((const)) int __ocml_signbit_f64(double);
+__device__ double __ocml_sincos_f64(double,
+ __attribute__((address_space(5))) double *);
+__device__ double
+__ocml_sincospi_f64(double, __attribute__((address_space(5))) double *);
+__device__ double __ocml_sin_f64(double);
+__device__ __attribute__((pure)) double __ocml_sinh_f64(double);
+__device__ double __ocml_sinpi_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_f64(double);
+__device__ double __ocml_tan_f64(double);
+__device__ __attribute__((pure)) double __ocml_tanh_f64(double);
+__device__ double __ocml_tgamma_f64(double);
+__device__ __attribute__((const)) double __ocml_trunc_f64(double);
+__device__ double __ocml_y0_f64(double);
+__device__ double __ocml_y1_f64(double);
+
+// BEGIN INTRINSICS
+__device__ __attribute__((const)) double __ocml_add_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
+ double);
+__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
+ double);
+
+__device__ __attribute__((const)) double
+__llvm_amdgcn_rcp_f64(double) __asm("llvm.amdgcn.rcp.f64");
+__device__ __attribute__((const)) double
+__llvm_amdgcn_rsq_f64(double) __asm("llvm.amdgcn.rsq.f64");
+
+__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
+__device__ _Float16 __ocml_cos_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp10_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp2_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_floor_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fma_f16(_Float16, _Float16,
+ _Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fabs_f16(_Float16);
+__device__ __attribute__((const)) int __ocml_isinf_f16(_Float16);
+__device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
+__device__ _Float16 __ocml_sin_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16);
+
+typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
+typedef short __2i16 __attribute__((ext_vector_type(2)));
+
+__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,
+ float c, bool s);
+__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);
+__device__ __2f16 __ocml_cos_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp10_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp2_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_floor_2f16(__2f16);
+__device__ __attribute__((const))
+__2f16 __ocml_fma_2f16(__2f16, __2f16, __2f16);
+__device__ __attribute__((const)) __2i16 __ocml_isinf_2f16(__2f16);
+__device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
+__device__ inline __2f16
+__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
+{
+ return __2f16{__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)};
+}
+__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
+__device__ __2f16 __ocml_sin_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16);
+
+} // extern "C"
+
+#endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__
diff --git a/clang/lib/Headers/__clang_hip_math.h b/clang/lib/Headers/__clang_hip_math.h
new file mode 100644
index 000000000000..cf7014b9aefe
--- /dev/null
+++ b/clang/lib/Headers/__clang_hip_math.h
@@ -0,0 +1,1185 @@
+/*===---- __clang_hip_math.h - HIP math decls -------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_MATH_H__
+#define __CLANG_HIP_MATH_H__
+
+#include <algorithm>
+#include <limits.h>
+#include <limits>
+#include <stdint.h>
+
+#pragma push_macro("__DEVICE__")
+#pragma push_macro("__RETURN_TYPE")
+
+// to be consistent with __clang_cuda_math_forward_declares
+#define __DEVICE__ static __device__
+#define __RETURN_TYPE bool
+
+__DEVICE__
+inline uint64_t __make_mantissa_base8(const char *__tagp) {
+ uint64_t __r = 0;
+ while (__tagp) {
+ char __tmp = *__tagp;
+
+ if (__tmp >= '0' && __tmp <= '7')
+ __r = (__r * 8u) + __tmp - '0';
+ else
+ return 0;
+
+ ++__tagp;
+ }
+
+ return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa_base10(const char *__tagp) {
+ uint64_t __r = 0;
+ while (__tagp) {
+ char __tmp = *__tagp;
+
+ if (__tmp >= '0' && __tmp <= '9')
+ __r = (__r * 10u) + __tmp - '0';
+ else
+ return 0;
+
+ ++__tagp;
+ }
+
+ return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa_base16(const char *__tagp) {
+ uint64_t __r = 0;
+ while (__tagp) {
+ char __tmp = *__tagp;
+
+ if (__tmp >= '0' && __tmp <= '9')
+ __r = (__r * 16u) + __tmp - '0';
+ else if (__tmp >= 'a' && __tmp <= 'f')
+ __r = (__r * 16u) + __tmp - 'a' + 10;
+ else if (__tmp >= 'A' && __tmp <= 'F')
+ __r = (__r * 16u) + __tmp - 'A' + 10;
+ else
+ return 0;
+
+ ++__tagp;
+ }
+
+ return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa(const char *__tagp) {
+ if (!__tagp)
+ return 0u;
+
+ if (*__tagp == '0') {
+ ++__tagp;
+
+ if (*__tagp == 'x' || *__tagp == 'X')
+ return __make_mantissa_base16(__tagp);
+ else
+ return __make_mantissa_base8(__tagp);
+ }
+
+ return __make_mantissa_base10(__tagp);
+}
+
+// BEGIN FLOAT
+__DEVICE__
+inline float abs(float __x) { return __ocml_fabs_f32(__x); }
+__DEVICE__
+inline float acosf(float __x) { return __ocml_acos_f32(__x); }
+__DEVICE__
+inline float acoshf(float __x) { return __ocml_acosh_f32(__x); }
+__DEVICE__
+inline float asinf(float __x) { return __ocml_asin_f32(__x); }
+__DEVICE__
+inline float asinhf(float __x) { return __ocml_asinh_f32(__x); }
+__DEVICE__
+inline float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
+__DEVICE__
+inline float atanf(float __x) { return __ocml_atan_f32(__x); }
+__DEVICE__
+inline float atanhf(float __x) { return __ocml_atanh_f32(__x); }
+__DEVICE__
+inline float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
+__DEVICE__
+inline float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+__DEVICE__
+inline float copysignf(float __x, float __y) {
+ return __ocml_copysign_f32(__x, __y);
+}
+__DEVICE__
+inline float cosf(float __x) { return __ocml_cos_f32(__x); }
+__DEVICE__
+inline float coshf(float __x) { return __ocml_cosh_f32(__x); }
+__DEVICE__
+inline float cospif(float __x) { return __ocml_cospi_f32(__x); }
+__DEVICE__
+inline float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
+__DEVICE__
+inline float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
+__DEVICE__
+inline float erfcf(float __x) { return __ocml_erfc_f32(__x); }
+__DEVICE__
+inline float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
+__DEVICE__
+inline float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
+__DEVICE__
+inline float erff(float __x) { return __ocml_erf_f32(__x); }
+__DEVICE__
+inline float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
+__DEVICE__
+inline float exp10f(float __x) { return __ocml_exp10_f32(__x); }
+__DEVICE__
+inline float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+__DEVICE__
+inline float expf(float __x) { return __ocml_exp_f32(__x); }
+__DEVICE__
+inline float expm1f(float __x) { return __ocml_expm1_f32(__x); }
+__DEVICE__
+inline float fabsf(float __x) { return __ocml_fabs_f32(__x); }
+__DEVICE__
+inline float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
+__DEVICE__
+inline float fdividef(float __x, float __y) { return __x / __y; }
+__DEVICE__
+inline float floorf(float __x) { return __ocml_floor_f32(__x); }
+__DEVICE__
+inline float fmaf(float __x, float __y, float __z) {
+ return __ocml_fma_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+__DEVICE__
+inline float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+__DEVICE__
+inline float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
+__DEVICE__
+inline float frexpf(float __x, int *__nptr) {
+ int __tmp;
+ float __r =
+ __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
+ *__nptr = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
+__DEVICE__
+inline int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isfinite(float __x) { return __ocml_isfinite_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isinf(float __x) { return __ocml_isinf_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isnan(float __x) { return __ocml_isnan_f32(__x); }
+__DEVICE__
+inline float j0f(float __x) { return __ocml_j0_f32(__x); }
+__DEVICE__
+inline float j1f(float __x) { return __ocml_j1_f32(__x); }
+__DEVICE__
+inline float jnf(int __n,
+ float __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
+ // for linear recurrences to get O(log n) steps, but it's unclear if
+ // it'd be beneficial in this case.
+ if (__n == 0)
+ return j0f(__x);
+ if (__n == 1)
+ return j1f(__x);
+
+ float __x0 = j0f(__x);
+ float __x1 = j1f(__x);
+ for (int __i = 1; __i < __n; ++__i) {
+ float __x2 = (2 * __i) / __x * __x1 - __x0;
+ __x0 = __x1;
+ __x1 = __x2;
+ }
+
+ return __x1;
+}
+__DEVICE__
+inline float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+__DEVICE__
+inline float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
+__DEVICE__
+inline long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float log10f(float __x) { return __ocml_log10_f32(__x); }
+__DEVICE__
+inline float log1pf(float __x) { return __ocml_log1p_f32(__x); }
+__DEVICE__
+inline float log2f(float __x) { return __ocml_log2_f32(__x); }
+__DEVICE__
+inline float logbf(float __x) { return __ocml_logb_f32(__x); }
+__DEVICE__
+inline float logf(float __x) { return __ocml_log_f32(__x); }
+__DEVICE__
+inline long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline long int lroundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float modff(float __x, float *__iptr) {
+ float __tmp;
+ float __r =
+ __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+ *__iptr = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline float nanf(const char *__tagp) {
+ union {
+ float val;
+ struct ieee_float {
+ uint32_t mantissa : 22;
+ uint32_t quiet : 1;
+ uint32_t exponent : 8;
+ uint32_t sign : 1;
+ } bits;
+
+ static_assert(sizeof(float) == sizeof(ieee_float), "");
+ } __tmp;
+
+ __tmp.bits.sign = 0u;
+ __tmp.bits.exponent = ~0u;
+ __tmp.bits.quiet = 1u;
+ __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+ return __tmp.val;
+}
+__DEVICE__
+inline float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+__DEVICE__
+inline float nextafterf(float __x, float __y) {
+ return __ocml_nextafter_f32(__x, __y);
+}
+__DEVICE__
+inline float norm3df(float __x, float __y, float __z) {
+ return __ocml_len3_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float norm4df(float __x, float __y, float __z, float __w) {
+ return __ocml_len4_f32(__x, __y, __z, __w);
+}
+__DEVICE__
+inline float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
+__DEVICE__
+inline float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
+__DEVICE__
+inline float
+normf(int __dim,
+ const float *__a) { // TODO: placeholder until OCML adds support.
+ float __r = 0;
+ while (__dim--) {
+ __r += __a[0] * __a[0];
+ ++__a;
+ }
+
+ return __ocml_sqrt_f32(__r);
+}
+__DEVICE__
+inline float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+__DEVICE__
+inline float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
+__DEVICE__
+inline float remainderf(float __x, float __y) {
+ return __ocml_remainder_f32(__x, __y);
+}
+__DEVICE__
+inline float remquof(float __x, float __y, int *__quo) {
+ int __tmp;
+ float __r = __ocml_remquo_f32(
+ __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+ *__quo = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline float rhypotf(float __x, float __y) {
+ return __ocml_rhypot_f32(__x, __y);
+}
+__DEVICE__
+inline float rintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline float rnorm3df(float __x, float __y, float __z) {
+ return __ocml_rlen3_f32(__x, __y, __z);
+}
+
+__DEVICE__
+inline float rnorm4df(float __x, float __y, float __z, float __w) {
+ return __ocml_rlen4_f32(__x, __y, __z, __w);
+}
+__DEVICE__
+inline float
+rnormf(int __dim,
+ const float *__a) { // TODO: placeholder until OCML adds support.
+ float __r = 0;
+ while (__dim--) {
+ __r += __a[0] * __a[0];
+ ++__a;
+ }
+
+ return __ocml_rsqrt_f32(__r);
+}
+__DEVICE__
+inline float roundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
+__DEVICE__
+inline float scalblnf(float __x, long int __n) {
+ return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
+ : __ocml_scalb_f32(__x, __n);
+}
+__DEVICE__
+inline float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+__DEVICE__
+inline __RETURN_TYPE signbit(float __x) { return __ocml_signbit_f32(__x); }
+__DEVICE__
+inline void sincosf(float __x, float *__sinptr, float *__cosptr) {
+ float __tmp;
+
+ *__sinptr =
+ __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+ *__cosptr = __tmp;
+}
+__DEVICE__
+inline void sincospif(float __x, float *__sinptr, float *__cosptr) {
+ float __tmp;
+
+ *__sinptr = __ocml_sincospi_f32(
+ __x, (__attribute__((address_space(5))) float *)&__tmp);
+ *__cosptr = __tmp;
+}
+__DEVICE__
+inline float sinf(float __x) { return __ocml_sin_f32(__x); }
+__DEVICE__
+inline float sinhf(float __x) { return __ocml_sinh_f32(__x); }
+__DEVICE__
+inline float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
+__DEVICE__
+inline float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
+__DEVICE__
+inline float tanf(float __x) { return __ocml_tan_f32(__x); }
+__DEVICE__
+inline float tanhf(float __x) { return __ocml_tanh_f32(__x); }
+__DEVICE__
+inline float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
+__DEVICE__
+inline float truncf(float __x) { return __ocml_trunc_f32(__x); }
+__DEVICE__
+inline float y0f(float __x) { return __ocml_y0_f32(__x); }
+__DEVICE__
+inline float y1f(float __x) { return __ocml_y1_f32(__x); }
+__DEVICE__
+inline float ynf(int __n,
+ float __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
+ // for linear recurrences to get O(log n) steps, but it's unclear if
+ // it'd be beneficial in this case. Placeholder until OCML adds
+ // support.
+ if (__n == 0)
+ return y0f(__x);
+ if (__n == 1)
+ return y1f(__x);
+
+ float __x0 = y0f(__x);
+ float __x1 = y1f(__x);
+ for (int __i = 1; __i < __n; ++__i) {
+ float __x2 = (2 * __i) / __x * __x1 - __x0;
+ __x0 = __x1;
+ __x1 = __x2;
+ }
+
+ return __x1;
+}
+
+// BEGIN INTRINSICS
+__DEVICE__
+inline float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
+__DEVICE__
+inline float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
+__DEVICE__
+inline float __expf(float __x) { return __ocml_native_exp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fadd_rd(float __x, float __y) {
+ return __ocml_add_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fadd_rn(float __x, float __y) { return __x + __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fadd_ru(float __x, float __y) {
+ return __ocml_add_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fadd_rz(float __x, float __y) {
+ return __ocml_add_rtz_f32(__x, __y);
+}
+__DEVICE__
+inline float __fdiv_rd(float __x, float __y) {
+ return __ocml_div_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fdiv_rn(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fdiv_ru(float __x, float __y) {
+ return __ocml_div_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fdiv_rz(float __x, float __y) {
+ return __ocml_div_rtz_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fdividef(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmaf_rd(float __x, float __y, float __z) {
+ return __ocml_fma_rtn_f32(__x, __y, __z);
+}
+#endif
+__DEVICE__
+inline float __fmaf_rn(float __x, float __y, float __z) {
+ return __ocml_fma_f32(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmaf_ru(float __x, float __y, float __z) {
+ return __ocml_fma_rtp_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float __fmaf_rz(float __x, float __y, float __z) {
+ return __ocml_fma_rtz_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float __fmul_rd(float __x, float __y) {
+ return __ocml_mul_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fmul_rn(float __x, float __y) { return __x * __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmul_ru(float __x, float __y) {
+ return __ocml_mul_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fmul_rz(float __x, float __y) {
+ return __ocml_mul_rtz_f32(__x, __y);
+}
+__DEVICE__
+inline float __frcp_rd(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+inline float __frcp_rn(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __frcp_ru(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+__DEVICE__
+inline float __frcp_rz(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+inline float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
+#endif
+__DEVICE__
+inline float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
+__DEVICE__
+inline float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
+__DEVICE__
+inline float __fsub_rd(float __x, float __y) {
+ return __ocml_sub_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fsub_rn(float __x, float __y) { return __x - __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsub_ru(float __x, float __y) {
+ return __ocml_sub_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fsub_rz(float __x, float __y) {
+ return __ocml_sub_rtz_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
+__DEVICE__
+inline float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
+__DEVICE__
+inline float __logf(float __x) { return __ocml_native_log_f32(__x); }
+__DEVICE__
+inline float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+__DEVICE__
+inline float __saturatef(float __x) {
+ return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x);
+}
+__DEVICE__
+inline void __sincosf(float __x, float *__sinptr, float *__cosptr) {
+ *__sinptr = __ocml_native_sin_f32(__x);
+ *__cosptr = __ocml_native_cos_f32(__x);
+}
+__DEVICE__
+inline float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
+__DEVICE__
+inline float __tanf(float __x) { return __ocml_tan_f32(__x); }
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__DEVICE__
+inline double abs(double __x) { return __ocml_fabs_f64(__x); }
+__DEVICE__
+inline double acos(double __x) { return __ocml_acos_f64(__x); }
+__DEVICE__
+inline double acosh(double __x) { return __ocml_acosh_f64(__x); }
+__DEVICE__
+inline double asin(double __x) { return __ocml_asin_f64(__x); }
+__DEVICE__
+inline double asinh(double __x) { return __ocml_asinh_f64(__x); }
+__DEVICE__
+inline double atan(double __x) { return __ocml_atan_f64(__x); }
+__DEVICE__
+inline double atan2(double __x, double __y) {
+ return __ocml_atan2_f64(__x, __y);
+}
+__DEVICE__
+inline double atanh(double __x) { return __ocml_atanh_f64(__x); }
+__DEVICE__
+inline double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
+__DEVICE__
+inline double ceil(double __x) { return __ocml_ceil_f64(__x); }
+__DEVICE__
+inline double copysign(double __x, double __y) {
+ return __ocml_copysign_f64(__x, __y);
+}
+__DEVICE__
+inline double cos(double __x) { return __ocml_cos_f64(__x); }
+__DEVICE__
+inline double cosh(double __x) { return __ocml_cosh_f64(__x); }
+__DEVICE__
+inline double cospi(double __x) { return __ocml_cospi_f64(__x); }
+__DEVICE__
+inline double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
+__DEVICE__
+inline double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
+__DEVICE__
+inline double erf(double __x) { return __ocml_erf_f64(__x); }
+__DEVICE__
+inline double erfc(double __x) { return __ocml_erfc_f64(__x); }
+__DEVICE__
+inline double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
+__DEVICE__
+inline double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
+__DEVICE__
+inline double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
+__DEVICE__
+inline double exp(double __x) { return __ocml_exp_f64(__x); }
+__DEVICE__
+inline double exp10(double __x) { return __ocml_exp10_f64(__x); }
+__DEVICE__
+inline double exp2(double __x) { return __ocml_exp2_f64(__x); }
+__DEVICE__
+inline double expm1(double __x) { return __ocml_expm1_f64(__x); }
+__DEVICE__
+inline double fabs(double __x) { return __ocml_fabs_f64(__x); }
+__DEVICE__
+inline double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
+__DEVICE__
+inline double floor(double __x) { return __ocml_floor_f64(__x); }
+__DEVICE__
+inline double fma(double __x, double __y, double __z) {
+ return __ocml_fma_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+__DEVICE__
+inline double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+__DEVICE__
+inline double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
+__DEVICE__
+inline double frexp(double __x, int *__nptr) {
+ int __tmp;
+ double __r =
+ __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
+ *__nptr = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline double hypot(double __x, double __y) {
+ return __ocml_hypot_f64(__x, __y);
+}
+__DEVICE__
+inline int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isfinite(double __x) { return __ocml_isfinite_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isinf(double __x) { return __ocml_isinf_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isnan(double __x) { return __ocml_isnan_f64(__x); }
+__DEVICE__
+inline double j0(double __x) { return __ocml_j0_f64(__x); }
+__DEVICE__
+inline double j1(double __x) { return __ocml_j1_f64(__x); }
+__DEVICE__
+inline double jn(int __n,
+ double __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
+ // for linear recurrences to get O(log n) steps, but it's unclear if
+ // it'd be beneficial in this case. Placeholder until OCML adds
+ // support.
+ if (__n == 0)
+ return j0f(__x);
+ if (__n == 1)
+ return j1f(__x);
+
+ double __x0 = j0f(__x);
+ double __x1 = j1f(__x);
+ for (int __i = 1; __i < __n; ++__i) {
+ double __x2 = (2 * __i) / __x * __x1 - __x0;
+ __x0 = __x1;
+ __x1 = __x2;
+ }
+
+ return __x1;
+}
+__DEVICE__
+inline double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+__DEVICE__
+inline double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
+__DEVICE__
+inline long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline long long int llround(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double log(double __x) { return __ocml_log_f64(__x); }
+__DEVICE__
+inline double log10(double __x) { return __ocml_log10_f64(__x); }
+__DEVICE__
+inline double log1p(double __x) { return __ocml_log1p_f64(__x); }
+__DEVICE__
+inline double log2(double __x) { return __ocml_log2_f64(__x); }
+__DEVICE__
+inline double logb(double __x) { return __ocml_logb_f64(__x); }
+__DEVICE__
+inline long int lrint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline long int lround(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double modf(double __x, double *__iptr) {
+ double __tmp;
+ double __r =
+ __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
+ *__iptr = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline double nan(const char *__tagp) {
+#if !_WIN32
+ union {
+ double val;
+ struct ieee_double {
+ uint64_t mantissa : 51;
+ uint32_t quiet : 1;
+ uint32_t exponent : 11;
+ uint32_t sign : 1;
+ } bits;
+ static_assert(sizeof(double) == sizeof(ieee_double), "");
+ } __tmp;
+
+ __tmp.bits.sign = 0u;
+ __tmp.bits.exponent = ~0u;
+ __tmp.bits.quiet = 1u;
+ __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+ return __tmp.val;
+#else
+ static_assert(sizeof(uint64_t) == sizeof(double));
+ uint64_t val = __make_mantissa(__tagp);
+ val |= 0xFFF << 51;
+ return *reinterpret_cast<double *>(&val);
+#endif
+}
+__DEVICE__
+inline double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+__DEVICE__
+inline double nextafter(double __x, double __y) {
+ return __ocml_nextafter_f64(__x, __y);
+}
+__DEVICE__
+inline double
+norm(int __dim,
+ const double *__a) { // TODO: placeholder until OCML adds support.
+ double __r = 0;
+ while (__dim--) {
+ __r += __a[0] * __a[0];
+ ++__a;
+ }
+
+ return __ocml_sqrt_f64(__r);
+}
+__DEVICE__
+inline double norm3d(double __x, double __y, double __z) {
+ return __ocml_len3_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double norm4d(double __x, double __y, double __z, double __w) {
+ return __ocml_len4_f64(__x, __y, __z, __w);
+}
+__DEVICE__
+inline double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
+__DEVICE__
+inline double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
+__DEVICE__
+inline double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
+__DEVICE__
+inline double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
+__DEVICE__
+inline double remainder(double __x, double __y) {
+ return __ocml_remainder_f64(__x, __y);
+}
+__DEVICE__
+inline double remquo(double __x, double __y, int *__quo) {
+ int __tmp;
+ double __r = __ocml_remquo_f64(
+ __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+ *__quo = __tmp;
+
+ return __r;
+}
+__DEVICE__
+inline double rhypot(double __x, double __y) {
+ return __ocml_rhypot_f64(__x, __y);
+}
+__DEVICE__
+inline double rint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline double
+rnorm(int __dim,
+ const double *__a) { // TODO: placeholder until OCML adds support.
+ double __r = 0;
+ while (__dim--) {
+ __r += __a[0] * __a[0];
+ ++__a;
+ }
+
+ return __ocml_rsqrt_f64(__r);
+}
+__DEVICE__
+inline double rnorm3d(double __x, double __y, double __z) {
+ return __ocml_rlen3_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double rnorm4d(double __x, double __y, double __z, double __w) {
+ return __ocml_rlen4_f64(__x, __y, __z, __w);
+}
+__DEVICE__
+inline double round(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
+__DEVICE__
+inline double scalbln(double __x, long int __n) {
+ return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
+ : __ocml_scalb_f64(__x, __n);
+}
+__DEVICE__
+inline double scalbn(double __x, int __n) {
+ return __ocml_scalbn_f64(__x, __n);
+}
+__DEVICE__
+inline __RETURN_TYPE signbit(double __x) { return __ocml_signbit_f64(__x); }
+__DEVICE__
+inline double sin(double __x) { return __ocml_sin_f64(__x); }
+__DEVICE__
+inline void sincos(double __x, double *__sinptr, double *__cosptr) {
+ double __tmp;
+ *__sinptr = __ocml_sincos_f64(
+ __x, (__attribute__((address_space(5))) double *)&__tmp);
+ *__cosptr = __tmp;
+}
+__DEVICE__
+inline void sincospi(double __x, double *__sinptr, double *__cosptr) {
+ double __tmp;
+ *__sinptr = __ocml_sincospi_f64(
+ __x, (__attribute__((address_space(5))) double *)&__tmp);
+ *__cosptr = __tmp;
+}
+__DEVICE__
+inline double sinh(double __x) { return __ocml_sinh_f64(__x); }
+__DEVICE__
+inline double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
+__DEVICE__
+inline double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
+__DEVICE__
+inline double tan(double __x) { return __ocml_tan_f64(__x); }
+__DEVICE__
+inline double tanh(double __x) { return __ocml_tanh_f64(__x); }
+__DEVICE__
+inline double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
+__DEVICE__
+inline double trunc(double __x) { return __ocml_trunc_f64(__x); }
+__DEVICE__
+inline double y0(double __x) { return __ocml_y0_f64(__x); }
+__DEVICE__
+inline double y1(double __x) { return __ocml_y1_f64(__x); }
+__DEVICE__
+inline double yn(int __n,
+ double __x) { // TODO: we could use Ahmes multiplication
+ // and the Miller & Brown algorithm
+ // for linear recurrences to get O(log n) steps, but it's unclear if
+ // it'd be beneficial in this case. Placeholder until OCML adds
+ // support.
+ if (__n == 0)
+ return j0f(__x);
+ if (__n == 1)
+ return j1f(__x);
+
+ double __x0 = j0f(__x);
+ double __x1 = j1f(__x);
+ for (int __i = 1; __i < __n; ++__i) {
+ double __x2 = (2 * __i) / __x * __x1 - __x0;
+ __x0 = __x1;
+ __x1 = __x2;
+ }
+
+ return __x1;
+}
+
+// BEGIN INTRINSICS
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dadd_rd(double __x, double __y) {
+ return __ocml_add_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dadd_rn(double __x, double __y) { return __x + __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dadd_ru(double __x, double __y) {
+ return __ocml_add_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dadd_rz(double __x, double __y) {
+ return __ocml_add_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __ddiv_rd(double __x, double __y) {
+ return __ocml_div_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __ddiv_rn(double __x, double __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __ddiv_ru(double __x, double __y) {
+ return __ocml_div_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __ddiv_rz(double __x, double __y) {
+ return __ocml_div_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __dmul_rd(double __x, double __y) {
+ return __ocml_mul_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dmul_rn(double __x, double __y) { return __x * __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dmul_ru(double __x, double __y) {
+ return __ocml_mul_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dmul_rz(double __x, double __y) {
+ return __ocml_mul_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __drcp_rd(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#endif
+__DEVICE__
+inline double __drcp_rn(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __drcp_ru(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+__DEVICE__
+inline double __drcp_rz(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+__DEVICE__
+inline double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
+#endif
+__DEVICE__
+inline double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
+__DEVICE__
+inline double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
+__DEVICE__
+inline double __dsub_rd(double __x, double __y) {
+ return __ocml_sub_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dsub_rn(double __x, double __y) { return __x - __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dsub_ru(double __x, double __y) {
+ return __ocml_sub_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dsub_rz(double __x, double __y) {
+ return __ocml_sub_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __fma_rd(double __x, double __y, double __z) {
+ return __ocml_fma_rtn_f64(__x, __y, __z);
+}
+#endif
+__DEVICE__
+inline double __fma_rn(double __x, double __y, double __z) {
+ return __ocml_fma_f64(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __fma_ru(double __x, double __y, double __z) {
+ return __ocml_fma_rtp_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double __fma_rz(double __x, double __y, double __z) {
+ return __ocml_fma_rtz_f64(__x, __y, __z);
+}
+#endif
+// END INTRINSICS
+// END DOUBLE
+
+// BEGIN INTEGER
+__DEVICE__
+inline int abs(int __x) {
+ int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+inline long labs(long __x) {
+ long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+inline long long llabs(long long __x) {
+ long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+
+#if defined(__cplusplus)
+__DEVICE__
+inline long abs(long __x) { return labs(__x); }
+__DEVICE__
+inline long long abs(long long __x) { return llabs(__x); }
+#endif
+// END INTEGER
+
+__DEVICE__
+inline _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
+ return __ocml_fma_f16(__x, __y, __z);
+}
+
+__DEVICE__
+inline float fma(float __x, float __y, float __z) {
+ return fmaf(__x, __y, __z);
+}
+
+#pragma push_macro("__DEF_FUN1")
+#pragma push_macro("__DEF_FUN2")
+#pragma push_macro("__DEF_FUNI")
+#pragma push_macro("__DEF_FLOAT_FUN2I")
+#pragma push_macro("__HIP_OVERLOAD1")
+#pragma push_macro("__HIP_OVERLOAD2")
+
+// __hip_enable_if::type is a type function which returns __T if __B is true.
+template <bool __B, class __T = void> struct __hip_enable_if {};
+
+template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
+
+// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
+// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
+// floor(double).
+#define __HIP_OVERLOAD1(__retty, __fn) \
+ template <typename __T> \
+ __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, \
+ __retty>::type \
+ __fn(__T __x) { \
+ return ::__fn((double)__x); \
+ }
+
+// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
+// or integer argument to avoid compilation error due to ambibuity. e.g.
+// max(5.0f, 6.0) is resolved with max(double, double).
+#define __HIP_OVERLOAD2(__retty, __fn) \
+ template <typename __T1, typename __T2> \
+ __DEVICE__ \
+ typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized && \
+ std::numeric_limits<__T2>::is_specialized, \
+ __retty>::type \
+ __fn(__T1 __x, __T2 __y) { \
+ return __fn((double)__x, (double)__y); \
+ }
+
+// Define cmath functions with float argument and returns float.
+#define __DEF_FUN1(__retty, __func) \
+ __DEVICE__ \
+ inline float __func(float __x) { return __func##f(__x); } \
+ __HIP_OVERLOAD1(__retty, __func)
+
+// Define cmath functions with float argument and returns __retty.
+#define __DEF_FUNI(__retty, __func) \
+ __DEVICE__ \
+ inline __retty __func(float __x) { return __func##f(__x); } \
+ __HIP_OVERLOAD1(__retty, __func)
+
+// define cmath functions with two float arguments.
+#define __DEF_FUN2(__retty, __func) \
+ __DEVICE__ \
+ inline float __func(float __x, float __y) { return __func##f(__x, __y); } \
+ __HIP_OVERLOAD2(__retty, __func)
+
+__DEF_FUN1(double, acos)
+__DEF_FUN1(double, acosh)
+__DEF_FUN1(double, asin)
+__DEF_FUN1(double, asinh)
+__DEF_FUN1(double, atan)
+__DEF_FUN2(double, atan2);
+__DEF_FUN1(double, atanh)
+__DEF_FUN1(double, cbrt)
+__DEF_FUN1(double, ceil)
+__DEF_FUN2(double, copysign);
+__DEF_FUN1(double, cos)
+__DEF_FUN1(double, cosh)
+__DEF_FUN1(double, erf)
+__DEF_FUN1(double, erfc)
+__DEF_FUN1(double, exp)
+__DEF_FUN1(double, exp2)
+__DEF_FUN1(double, expm1)
+__DEF_FUN1(double, fabs)
+__DEF_FUN2(double, fdim);
+__DEF_FUN1(double, floor)
+__DEF_FUN2(double, fmax);
+__DEF_FUN2(double, fmin);
+__DEF_FUN2(double, fmod);
+//__HIP_OVERLOAD1(int, fpclassify)
+__DEF_FUN2(double, hypot);
+__DEF_FUNI(int, ilogb)
+__HIP_OVERLOAD1(bool, isfinite)
+__HIP_OVERLOAD2(bool, isgreater);
+__HIP_OVERLOAD2(bool, isgreaterequal);
+__HIP_OVERLOAD1(bool, isinf);
+__HIP_OVERLOAD2(bool, isless);
+__HIP_OVERLOAD2(bool, islessequal);
+__HIP_OVERLOAD2(bool, islessgreater);
+__HIP_OVERLOAD1(bool, isnan);
+//__HIP_OVERLOAD1(bool, isnormal)
+__HIP_OVERLOAD2(bool, isunordered);
+__DEF_FUN1(double, lgamma)
+__DEF_FUN1(double, log)
+__DEF_FUN1(double, log10)
+__DEF_FUN1(double, log1p)
+__DEF_FUN1(double, log2)
+__DEF_FUN1(double, logb)
+__DEF_FUNI(long long, llrint)
+__DEF_FUNI(long long, llround)
+__DEF_FUNI(long, lrint)
+__DEF_FUNI(long, lround)
+__DEF_FUN1(double, nearbyint);
+__DEF_FUN2(double, nextafter);
+__DEF_FUN2(double, pow);
+__DEF_FUN2(double, remainder);
+__DEF_FUN1(double, rint);
+__DEF_FUN1(double, round);
+__HIP_OVERLOAD1(bool, signbit)
+__DEF_FUN1(double, sin)
+__DEF_FUN1(double, sinh)
+__DEF_FUN1(double, sqrt)
+__DEF_FUN1(double, tan)
+__DEF_FUN1(double, tanh)
+__DEF_FUN1(double, tgamma)
+__DEF_FUN1(double, trunc);
+
+// define cmath functions with a float and an integer argument.
+#define __DEF_FLOAT_FUN2I(__func) \
+ __DEVICE__ \
+ inline float __func(float __x, int __y) { return __func##f(__x, __y); }
+__DEF_FLOAT_FUN2I(scalbn)
+
+template <class T> __DEVICE__ inline T min(T __arg1, T __arg2) {
+ return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+
+template <class T> __DEVICE__ inline T max(T __arg1, T __arg2) {
+ return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__ inline int min(int __arg1, int __arg2) {
+ return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+__DEVICE__ inline int max(int __arg1, int __arg2) {
+ return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__
+inline float max(float __x, float __y) { return fmaxf(__x, __y); }
+
+__DEVICE__
+inline double max(double __x, double __y) { return fmax(__x, __y); }
+
+__DEVICE__
+inline float min(float __x, float __y) { return fminf(__x, __y); }
+
+__DEVICE__
+inline double min(double __x, double __y) { return fmin(__x, __y); }
+
+__HIP_OVERLOAD2(double, max)
+__HIP_OVERLOAD2(double, min)
+
+__host__ inline static int min(int __arg1, int __arg2) {
+ return std::min(__arg1, __arg2);
+}
+
+__host__ inline static int max(int __arg1, int __arg2) {
+ return std::max(__arg1, __arg2);
+}
+
+#pragma pop_macro("__DEF_FUN1")
+#pragma pop_macro("__DEF_FUN2")
+#pragma pop_macro("__DEF_FUNI")
+#pragma pop_macro("__DEF_FLOAT_FUN2I")
+#pragma pop_macro("__HIP_OVERLOAD1")
+#pragma pop_macro("__HIP_OVERLOAD2")
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__RETURN_TYPE")
+
+#endif // __CLANG_HIP_MATH_H__
diff --git a/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
new file mode 100644
index 000000000000..addae5605a5b
--- /dev/null
+++ b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -0,0 +1,64 @@
+/*===---- __clang_hip_runtime_wrapper.h - HIP runtime support ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * WARNING: This header is intended to be directly -include'd by
+ * the compiler and is not supposed to be included by users.
+ *
+ */
+
+#ifndef __CLANG_HIP_RUNTIME_WRAPPER_H__
+#define __CLANG_HIP_RUNTIME_WRAPPER_H__
+
+#if __HIP__
+
+#include <cmath>
+#include <cstdlib>
+#include <stdlib.h>
+
+#define __host__ __attribute__((host))
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __shared__ __attribute__((shared))
+#define __constant__ __attribute__((constant))
+
+#if __HIP_ENABLE_DEVICE_MALLOC__
+extern "C" __device__ void *__hip_malloc(size_t __size);
+extern "C" __device__ void *__hip_free(void *__ptr);
+static inline __device__ void *malloc(size_t __size) {
+ return __hip_malloc(__size);
+}
+static inline __device__ void *free(void *__ptr) { return __hip_free(__ptr); }
+#else
+static inline __device__ void *malloc(size_t __size) {
+ __builtin_trap();
+ return nullptr;
+}
+static inline __device__ void *free(void *__ptr) {
+ __builtin_trap();
+ return nullptr;
+}
+#endif
+
+#include <__clang_hip_libdevice_declares.h>
+#include <__clang_hip_math.h>
+
+#if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+#include <__clang_cuda_math_forward_declares.h>
+#include <__clang_cuda_complex_builtins.h>
+
+#include <algorithm>
+#include <complex>
+#include <new>
+#endif // !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+
+#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1
+
+#endif // __HIP__
+#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__
diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index 7e231a2a428e..ac5f43836316 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -16761,6 +16761,408 @@ static vector signed short __ATTRS_o_ai vec_nabs(vector signed short __a) {
static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
return __builtin_altivec_vminsb(__a, -__a);
}
+
+#ifdef __POWER10_VECTOR__
+/* vec_pdep */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_pdep(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vpdepd(__a, __b);
+}
+
+/* vec_pext */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_pext(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vpextd(__a, __b);
+}
+
+/* vec_cfuge */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vcfuged(__a, __b);
+}
+
+/* vec_gnb */
+
+#define vec_gnb(__a, __b) __builtin_altivec_vgnb(__a, __b)
+
+/* vec_ternarylogic */
+#ifdef __VSX__
+#define vec_ternarylogic(__a, __b, __c, __imm) \
+ _Generic((__a), vector unsigned char \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned short \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned int \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned long long \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned __int128 \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)))
+#endif /* __VSX__ */
+
+/* vec_genpcvm */
+
+#ifdef __VSX__
+#define vec_genpcvm(__a, __imm) \
+ _Generic((__a), vector unsigned char \
+ : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)), \
+ vector unsigned short \
+ : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)), \
+ vector unsigned int \
+ : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)), \
+ vector unsigned long long \
+ : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
+#endif /* __VSX__ */
+
+/* vec_clrl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_clrl(vector signed char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclrrb(__a, __n);
+#else
+ return __builtin_altivec_vclrlb( __a, __n);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_clrl(vector unsigned char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+#else
+ return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+#endif
+}
+
+/* vec_clrr */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_clrr(vector signed char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclrlb(__a, __n);
+#else
+ return __builtin_altivec_vclrrb( __a, __n);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_clrr(vector unsigned char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+#else
+ return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+#endif
+}
+
+/* vec_cntlzm */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cntlzm(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vclzdm(__a, __b);
+}
+
+/* vec_cnttzm */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cnttzm(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_altivec_vctzdm(__a, __b);
+}
+
+/* vec_sldbi */
+
+#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
+
+/* vec_srdbi */
+
+#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
+
+/* vec_insertl */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insertl(unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsbrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsblx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insertl(unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinshrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinshlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insertl(unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinswrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinswlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_insertl(unsigned long long __a, vector unsigned long long __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsdrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsdlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insertl(vector unsigned char __a, vector unsigned char __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsbvrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsbvlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insertl(vector unsigned short __a, vector unsigned short __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinshvrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinshvlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insertl(vector unsigned int __a, vector unsigned int __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinswvrx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinswvlx(__b, __c, __a);
+#endif
+}
+
+/* vec_inserth */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_inserth(unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsblx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsbrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_inserth(unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinshlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinshrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_inserth(unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinswlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinswrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_inserth(unsigned long long __a, vector unsigned long long __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsdlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsdrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_inserth(vector unsigned char __a, vector unsigned char __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinsbvlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinsbvrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_inserth(vector unsigned short __a, vector unsigned short __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinshvlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinshvrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_inserth(vector unsigned int __a, vector unsigned int __b,
+ unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vinswvlx(__b, __c, __a);
+#else
+ return __builtin_altivec_vinswvrx(__b, __c, __a);
+#endif
+}
+
+#ifdef __VSX__
+
+/* vec_permx */
+
+#define vec_permx(__a, __b, __c, __d) \
+ __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
+
+/* vec_blendv */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_blendv(vector signed char __a, vector signed char __b,
+ vector unsigned char __c) {
+ return __builtin_vsx_xxblendvb(__a, __b, __c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_blendv(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_vsx_xxblendvb(__a, __b, __c);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_blendv(vector signed short __a, vector signed short __b,
+ vector unsigned short __c) {
+ return __builtin_vsx_xxblendvh(__a, __b, __c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_blendv(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return __builtin_vsx_xxblendvh(__a, __b, __c);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_blendv(vector signed int __a, vector signed int __b,
+ vector unsigned int __c) {
+ return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_blendv(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_blendv(vector signed long long __a, vector signed long long __b,
+ vector unsigned long long __c) {
+ return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
+ return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_blendv(vector double __a, vector double __b,
+ vector unsigned long long __c) {
+ return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+/* vec_splati */
+
+#define vec_splati(__a) \
+ _Generic((__a), signed int \
+ : ((vector signed int)__a), unsigned int \
+ : ((vector unsigned int)__a), float \
+ : ((vector float)__a))
+
+/* vec_spatid */
+
+static __inline__ vector double __ATTRS_o_ai vec_splatid(const float __a) {
+ return ((vector double)((double)__a));
+}
+
+/* vec_splati_ins */
+
+static __inline__ vector signed int __ATTRS_o_ai vec_splati_ins(
+ vector signed int __a, const unsigned int __b, const signed int __c) {
+#ifdef __LITTLE_ENDIAN__
+ __a[1 - __b] = __c;
+ __a[3 - __b] = __c;
+#else
+ __a[__b] = __c;
+ __a[2 + __b] = __c;
+#endif
+ return __a;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_splati_ins(
+ vector unsigned int __a, const unsigned int __b, const unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+ __a[1 - __b] = __c;
+ __a[3 - __b] = __c;
+#else
+ __a[__b] = __c;
+ __a[2 + __b] = __c;
+#endif
+ return __a;
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_splati_ins(vector float __a, const unsigned int __b, const float __c) {
+#ifdef __LITTLE_ENDIAN__
+ __a[1 - __b] = __c;
+ __a[3 - __b] = __c;
+#else
+ __a[__b] = __c;
+ __a[2 + __b] = __c;
+#endif
+ return __a;
+}
+
+/* vec_test_lsbb_all_ones */
+
+static __inline__ int __ATTRS_o_ai
+vec_test_lsbb_all_ones(vector unsigned char __a) {
+ return __builtin_vsx_xvtlsbb(__a, 1);
+}
+
+/* vec_test_lsbb_all_zeros */
+
+static __inline__ int __ATTRS_o_ai
+vec_test_lsbb_all_zeros(vector unsigned char __a) {
+ return __builtin_vsx_xvtlsbb(__a, 0);
+}
+#endif /* __VSX__ */
+#endif /* __POWER10_VECTOR__ */
+
#undef __ATTRS_o_ai
#endif /* __ALTIVEC_H */
diff --git a/clang/lib/Headers/amxintrin.h b/clang/lib/Headers/amxintrin.h
new file mode 100644
index 000000000000..58254e21c81a
--- /dev/null
+++ b/clang/lib/Headers/amxintrin.h
@@ -0,0 +1,225 @@
+/*===--------------- amxintrin.h - AMX intrinsics -*- C/C++ -*---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===------------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <amxintrin.h> directly; include <immintrin.h> instead."
+#endif /* __IMMINTRIN_H */
+
+#ifndef __AMXINTRIN_H
+#define __AMXINTRIN_H
+#ifdef __x86_64__
+
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-tile")))
+
+/// Load tile configuration from a 64-byte memory location specified by
+/// "mem_addr". The tile configuration includes the tile type palette, the
+/// number of bytes per row, and the number of rows. If the specified
+/// palette_id is zero, that signifies the init state for both the tile
+/// config and the tile data, and the tiles are zeroed. Any invalid
+/// configurations will result in #GP fault.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LDTILECFG </c> instruction.
+///
+/// \param __config
+/// A pointer to 512-bits configuration
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_loadconfig(const void *__config)
+{
+ __builtin_ia32_tile_loadconfig(__config);
+}
+
+/// Stores the current tile configuration to a 64-byte memory location
+/// specified by "mem_addr". The tile configuration includes the tile type
+/// palette, the number of bytes per row, and the number of rows. If tiles
+/// are not configured, all zeroes will be stored to memory.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> STTILECFG </c> instruction.
+///
+/// \param __config
+/// A pointer to 512-bits configuration
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_storeconfig(void *__config)
+{
+ __builtin_ia32_tile_storeconfig(__config);
+}
+
+/// Release the tile configuration to return to the init state, which
+/// releases all storage it currently holds.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILERELEASE </c> instruction.
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_release(void)
+{
+ __builtin_ia32_tilerelease();
+}
+
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst" using the tile configuration previously configured
+/// via "_tile_loadconfig".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
+///
+/// \param dst
+/// A destination tile. Max size is 1024 Bytes.
+/// \param base
+/// A pointer to base address.
+/// \param stride
+/// The stride between the rows' data to be loaded in memory.
+#define _tile_loadd(dst, base, stride) \
+ __builtin_ia32_tileloadd64((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst" using the tile configuration previously configured
+/// via "_tile_loadconfig". This intrinsic provides a hint to the implementation
+/// that the data will likely not be reused in the near future and the data
+/// caching can be optimized accordingly.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
+///
+/// \param dst
+/// A destination tile. Max size is 1024 Bytes.
+/// \param base
+/// A pointer to base address.
+/// \param stride
+/// The stride between the rows' data to be loaded in memory.
+#define _tile_stream_loadd(dst, base, stride) \
+ __builtin_ia32_tileloaddt164((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Store the tile specified by "src" to memory specifieid by "base" address and
+/// "stride" using the tile configuration previously configured via
+/// "_tile_loadconfig".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
+///
+/// \param dst
+/// A destination tile. Max size is 1024 Bytes.
+/// \param base
+/// A pointer to base address.
+/// \param stride
+/// The stride between the rows' data to be stored in memory.
+#define _tile_stored(dst, base, stride) \
+ __builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Zero the tile specified by "tdest".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
+///
+/// \param tile
+/// The destination tile to be zero. Max size is 1024 Bytes.
+#define _tile_zero(tile) __builtin_ia32_tilezero((tile))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbssd(dst, src0, src1) __builtin_ia32_tdpbssd((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbsud(dst, src0, src1) __builtin_ia32_tdpbsud((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbusd(dst, src0, src1) __builtin_ia32_tdpbusd((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
+/// "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbuud(dst, src0, src1) __builtin_ia32_tdpbuud((dst), (src0), (src1))
+
+/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
+/// src1, accumulating the intermediate single-precision (32-bit) floating-point
+/// elements with elements in "dst", and store the 32-bit result back to tile
+/// "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbf16ps(dst, src0, src1) \
+ __builtin_ia32_tdpbf16ps((dst), (src0), (src1))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __x86_64__ */
+#endif /* __AMXINTRIN_H */
diff --git a/clang/lib/Headers/arm_acle.h b/clang/lib/Headers/arm_acle.h
index 596ea03cff2f..de568b4ff9c5 100644
--- a/clang/lib/Headers/arm_acle.h
+++ b/clang/lib/Headers/arm_acle.h
@@ -22,31 +22,43 @@ extern "C" {
/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
/* 8.3 Memory barriers */
-#if !defined(_MSC_VER)
+#if !__has_builtin(__dmb)
#define __dmb(i) __builtin_arm_dmb(i)
+#endif
+#if !__has_builtin(__dsb)
#define __dsb(i) __builtin_arm_dsb(i)
+#endif
+#if !__has_builtin(__isb)
#define __isb(i) __builtin_arm_isb(i)
#endif
/* 8.4 Hints */
-#if !defined(_MSC_VER)
+#if !__has_builtin(__wfi)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
__builtin_arm_wfi();
}
+#endif
+#if !__has_builtin(__wfe)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
__builtin_arm_wfe();
}
+#endif
+#if !__has_builtin(__sev)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
__builtin_arm_sev();
}
+#endif
+#if !__has_builtin(__sevl)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
__builtin_arm_sevl();
}
+#endif
+#if !__has_builtin(__yield)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
__builtin_arm_yield();
}
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index 162e83ea2fbc..cc16720949ea 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -740,6 +740,8 @@ _mm256_broadcastsi128_si256(__m128i __X)
return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
}
+#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
+
#define _mm_blend_epi32(V1, V2, M) \
(__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
(__v4si)(__m128i)(V2), (int)(M))
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index 376558407683..4281a33d375c 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -1504,13 +1504,14 @@ _mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi16(__m512i __A, int __B)
+_mm512_slli_epi16(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_slli_epi16(__A, __B),
@@ -1518,7 +1519,7 @@ _mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B)
+_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_slli_epi16(__A, __B),
@@ -1595,13 +1596,14 @@ _mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi16(__m512i __A, int __B)
+_mm512_srai_epi16(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srai_epi16(__A, __B),
@@ -1609,7 +1611,7 @@ _mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B)
+_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srai_epi16(__A, __B),
@@ -1639,13 +1641,14 @@ _mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi16(__m512i __A, int __B)
+_mm512_srli_epi16(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srli_epi16(__A, __B),
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 7465da379bdd..fa22ef3fdd18 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -5111,13 +5111,14 @@ _mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
(__v8di)_mm512_setzero_si512())
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi32(__m512i __A, int __B)
+_mm512_slli_epi32(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_slli_epi32(__A, __B),
@@ -5125,20 +5126,20 @@ _mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_slli_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi64(__m512i __A, int __B)
+_mm512_slli_epi64(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_slli_epi64(__A, __B),
@@ -5146,7 +5147,7 @@ _mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_slli_epi64(__A, __B),
@@ -5154,13 +5155,14 @@ _mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi32(__m512i __A, int __B)
+_mm512_srli_epi32(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srli_epi32(__A, __B),
@@ -5168,20 +5170,21 @@ _mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srli_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi64(__m512i __A, int __B)
+_mm512_srli_epi64(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srli_epi64(__A, __B),
@@ -5189,7 +5192,8 @@ _mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srli_epi64(__A, __B),
@@ -6593,13 +6597,14 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
(int)(R))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi32(__m512i __A, int __B)
+_mm512_srai_epi32(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+ unsigned int __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srai_epi32(__A, __B),
@@ -6607,20 +6612,21 @@ _mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
+ unsigned int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srai_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi64(__m512i __A, int __B)
+_mm512_srai_epi64(__m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srai_epi64(__A, __B),
@@ -6628,7 +6634,7 @@ _mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_srai_epi64(__A, __B),
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index cd9f2400daa0..6ed10ed9803b 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -1939,7 +1939,7 @@ _mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_slli_epi16(__A, __B),
@@ -1947,7 +1947,7 @@ _mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_slli_epi16(__A, __B),
@@ -1955,7 +1955,8 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A,
+ unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_slli_epi16(__A, __B),
@@ -1963,7 +1964,7 @@ _mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_slli_epi16(__A, __B),
@@ -2091,7 +2092,7 @@ _mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srai_epi16(__A, __B),
@@ -2099,7 +2100,7 @@ _mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srai_epi16(__A, __B),
@@ -2107,7 +2108,8 @@ _mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A,
+ unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_srai_epi16(__A, __B),
@@ -2115,7 +2117,7 @@ _mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, int __B)
+_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_srai_epi16(__A, __B),
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 9d1d791bb248..968c10efeac0 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -4522,7 +4522,7 @@ _mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_slli_epi32(__A, __B),
@@ -4530,7 +4530,7 @@ _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_slli_epi32(__A, __B),
@@ -4538,7 +4538,7 @@ _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_slli_epi32(__A, __B),
@@ -4546,7 +4546,7 @@ _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_slli_epi32(__A, __B),
@@ -4586,7 +4586,7 @@ _mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_slli_epi64(__A, __B),
@@ -4594,7 +4594,7 @@ _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_slli_epi64(__A, __B),
@@ -4602,7 +4602,7 @@ _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_slli_epi64(__A, __B),
@@ -4610,7 +4610,7 @@ _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_slli_epi64(__A, __B),
@@ -4866,7 +4866,7 @@ _mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srli_epi32(__A, __B),
@@ -4874,7 +4874,7 @@ _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srli_epi32(__A, __B),
@@ -4882,7 +4882,7 @@ _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srli_epi32(__A, __B),
@@ -4890,7 +4890,7 @@ _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srli_epi32(__A, __B),
@@ -4930,7 +4930,7 @@ _mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_srli_epi64(__A, __B),
@@ -4938,7 +4938,7 @@ _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_srli_epi64(__A, __B),
@@ -4946,7 +4946,7 @@ _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_srli_epi64(__A, __B),
@@ -4954,7 +4954,7 @@ _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_srli_epi64(__A, __B),
@@ -6405,7 +6405,7 @@ _mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srai_epi32(__A, __B),
@@ -6413,7 +6413,7 @@ _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_srai_epi32(__A, __B),
@@ -6421,7 +6421,7 @@ _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srai_epi32(__A, __B),
@@ -6429,7 +6429,7 @@ _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_srai_epi32(__A, __B),
@@ -6481,13 +6481,13 @@ _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srai_epi64(__m128i __A, int __imm)
+_mm_srai_epi64(__m128i __A, unsigned int __imm)
{
return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
+_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
(__v2di)_mm_srai_epi64(__A, __imm), \
@@ -6495,7 +6495,7 @@ _mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
+_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
(__v2di)_mm_srai_epi64(__A, __imm), \
@@ -6503,13 +6503,14 @@ _mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi64(__m256i __A, int __imm)
+_mm256_srai_epi64(__m256i __A, unsigned int __imm)
{
return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
+_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A,
+ unsigned int __imm)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
(__v4di)_mm256_srai_epi64(__A, __imm), \
@@ -6517,7 +6518,7 @@ _mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
+_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
(__v4di)_mm256_srai_epi64(__A, __imm), \
diff --git a/clang/lib/Headers/bmiintrin.h b/clang/lib/Headers/bmiintrin.h
index 841bd84070e8..f583c215f919 100644
--- a/clang/lib/Headers/bmiintrin.h
+++ b/clang/lib/Headers/bmiintrin.h
@@ -111,7 +111,8 @@ _mm_tzcnt_64(unsigned long long __X)
#undef __RELAXED_FN_ATTRS
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__BMI__)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
@@ -192,6 +193,28 @@ _bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
}
+/* Intel-specified, single-leading-underscore version of BEXTR2 */
+/// Extracts the specified bits from the first operand and returns them
+/// in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+///
+/// \param __X
+/// An unsigned integer whose bits are to be extracted.
+/// \param __Y
+/// An unsigned integer used to specify which bits are extracted. Bits [7:0]
+/// specify the index of the least significant bit. Bits [15:8] specify the
+/// number of bits to be extracted.
+/// \returns An unsigned integer whose least significant bits contain the
+/// extracted bits.
+/// \see __bextr_u32
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bextr2_u32(unsigned int __X, unsigned int __Y) {
+ return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
/// Clears all bits in the source except for the least significant bit
/// containing a value of 1 and returns the result.
///
@@ -321,6 +344,28 @@ _bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
}
+/* Intel-specified, single-leading-underscore version of BEXTR2 */
+/// Extracts the specified bits from the first operand and returns them
+/// in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose bits are to be extracted.
+/// \param __Y
+/// An unsigned 64-bit integer used to specify which bits are extracted. Bits
+/// [7:0] specify the index of the least significant bit. Bits [15:8] specify
+/// the number of bits to be extracted.
+/// \returns An unsigned 64-bit integer whose least significant bits contain the
+/// extracted bits.
+/// \see __bextr_u64
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bextr2_u64(unsigned long long __X, unsigned long long __Y) {
+ return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
/// Clears all bits in the source except for the least significant bit
/// containing a value of 1 and returns the result.
///
@@ -376,6 +421,7 @@ __blsr_u64(unsigned long long __X)
#undef __DEFAULT_FN_ATTRS
-#endif /* !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__) */
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
+ || defined(__BMI__) */
#endif /* __BMIINTRIN_H */
diff --git a/clang/lib/Headers/cet.h b/clang/lib/Headers/cet.h
new file mode 100644
index 000000000000..ffb19dec8f2b
--- /dev/null
+++ b/clang/lib/Headers/cet.h
@@ -0,0 +1,66 @@
+/*===------ cet.h -Control-flow Enforcement Technology feature ------------===
+ * Add x86 feature with IBT and/or SHSTK bits to ELF program property if they
+ * are enabled. Otherwise, contents in this header file are unused. This file
+ * is mainly design for assembly source code which want to enable CET.
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CET_H
+#define __CET_H
+
+#ifdef __ASSEMBLER__
+
+#ifndef __CET__
+# define _CET_ENDBR
+#endif
+
+#ifdef __CET__
+
+# ifdef __LP64__
+# if __CET__ & 0x1
+# define _CET_ENDBR endbr64
+# else
+# define _CET_ENDBR
+# endif
+# else
+# if __CET__ & 0x1
+# define _CET_ENDBR endbr32
+# else
+# define _CET_ENDBR
+# endif
+# endif
+
+
+# ifdef __LP64__
+# define __PROPERTY_ALIGN 3
+# else
+# define __PROPERTY_ALIGN 2
+# endif
+
+ .pushsection ".note.gnu.property", "a"
+ .p2align __PROPERTY_ALIGN
+ .long 1f - 0f /* name length. */
+ .long 4f - 1f /* data length. */
+ /* NT_GNU_PROPERTY_TYPE_0. */
+ .long 5 /* note type. */
+0:
+ .asciz "GNU" /* vendor name. */
+1:
+ .p2align __PROPERTY_ALIGN
+ /* GNU_PROPERTY_X86_FEATURE_1_AND. */
+ .long 0xc0000002 /* pr_type. */
+ .long 3f - 2f /* pr_datasz. */
+2:
+ /* GNU_PROPERTY_X86_FEATURE_1_XXX. */
+ .long __CET__
+3:
+ .p2align __PROPERTY_ALIGN
+4:
+ .popsection
+#endif
+#endif
+#endif
diff --git a/clang/lib/Headers/cldemoteintrin.h b/clang/lib/Headers/cldemoteintrin.h
index 2413e7dea7a1..cfb951c1b4a9 100644
--- a/clang/lib/Headers/cldemoteintrin.h
+++ b/clang/lib/Headers/cldemoteintrin.h
@@ -18,11 +18,19 @@
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("cldemote")))
+/// Hint to hardware that the cache line that contains \p __P should be demoted
+/// from the cache closest to the processor core to a level more distant from
+/// the processor core.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CLDEMOTE </c> instruction.
static __inline__ void __DEFAULT_FN_ATTRS
_cldemote(const void * __P) {
__builtin_ia32_cldemote(__P);
}
+#define _mm_cldemote(p) _cldemote(p)
#undef __DEFAULT_FN_ATTRS
#endif
diff --git a/clang/lib/Headers/cpuid.h b/clang/lib/Headers/cpuid.h
index 4ddd64847c32..2a88c042d046 100644
--- a/clang/lib/Headers/cpuid.h
+++ b/clang/lib/Headers/cpuid.h
@@ -24,6 +24,10 @@
#define signature_CYRIX_ebx 0x69727943
#define signature_CYRIX_edx 0x736e4978
#define signature_CYRIX_ecx 0x64616574
+/* HYGON: "HygonGenuine" */
+#define signature_HYGON_ebx 0x6f677948
+#define signature_HYGON_edx 0x6e65476e
+#define signature_HYGON_ecx 0x656e6975
/* INTEL: "GenuineIntel" */
#define signature_INTEL_ebx 0x756e6547
#define signature_INTEL_edx 0x49656e69
@@ -182,8 +186,13 @@
/* Features in %edx for leaf 7 sub-leaf 0 */
#define bit_AVX5124VNNIW 0x00000004
#define bit_AVX5124FMAPS 0x00000008
+#define bit_SERIALIZE 0x00004000
+#define bit_TSXLDTRK 0x00010000
#define bit_PCONFIG 0x00040000
#define bit_IBT 0x00100000
+#define bit_AMXBF16 0x00400000
+#define bit_AMXTILE 0x01000000
+#define bit_AMXINT8 0x02000000
/* Features in %eax for leaf 7 sub-leaf 1 */
#define bit_AVX512BF16 0x00000020
diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h
index 993c688ce818..73a777b107c6 100644
--- a/clang/lib/Headers/emmintrin.h
+++ b/clang/lib/Headers/emmintrin.h
@@ -4970,10 +4970,10 @@ void _mm_pause(void);
#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
-#define _MM_DENORMALS_ZERO_ON (0x0040)
-#define _MM_DENORMALS_ZERO_OFF (0x0000)
+#define _MM_DENORMALS_ZERO_ON (0x0040U)
+#define _MM_DENORMALS_ZERO_OFF (0x0000U)
-#define _MM_DENORMALS_ZERO_MASK (0x0040)
+#define _MM_DENORMALS_ZERO_MASK (0x0040U)
#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
diff --git a/clang/lib/Headers/immintrin.h b/clang/lib/Headers/immintrin.h
index edf8c42ec491..e9dff2310fdf 100644
--- a/clang/lib/Headers/immintrin.h
+++ b/clang/lib/Headers/immintrin.h
@@ -10,198 +10,231 @@
#ifndef __IMMINTRIN_H
#define __IMMINTRIN_H
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MMX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__MMX__)
#include <mmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSE__)
#include <xmmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSE2__)
#include <emmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE3__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSE3__)
#include <pmmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSSE3__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSSE3__)
#include <tmmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__SSE4_2__) || defined(__SSE4_1__))
#include <smmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AES__) || defined(__PCLMUL__))
#include <wmmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLFLUSHOPT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CLFLUSHOPT__)
#include <clflushoptintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLWB__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CLWB__)
#include <clwbintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX__)
#include <avxintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX2__)
#include <avx2intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__F16C__)
#include <f16cintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VPCLMULQDQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__VPCLMULQDQ__)
#include <vpclmulqdqintrin.h>
#endif
/* No feature check desired due to internal checks */
#include <bmiintrin.h>
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__BMI2__)
#include <bmi2intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__LZCNT__)
#include <lzcntintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__POPCNT__)
#include <popcntintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__FMA__)
#include <fmaintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512F__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512F__)
#include <avx512fintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VL__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VL__)
#include <avx512vlintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BW__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512BW__)
#include <avx512bwintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BITALG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512BITALG__)
#include <avx512bitalgintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512CD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512CD__)
#include <avx512cdintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VPOPCNTDQ__)
#include <avx512vpopcntdqintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
#include <avx512vpopcntdqvlintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VNNI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VNNI__)
#include <avx512vnniintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512VNNI__))
#include <avx512vlvnniintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512DQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512DQ__)
#include <avx512dqintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BITALG__))
#include <avx512vlbitalgintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BW__))
#include <avx512vlbwintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512CD__))
#include <avx512vlcdintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512DQ__))
#include <avx512vldqintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512ER__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512ER__)
#include <avx512erintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512IFMA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512IFMA__)
#include <avx512ifmaintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512IFMA__) && defined(__AVX512VL__))
#include <avx512ifmavlintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VBMI__)
#include <avx512vbmiintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VBMI__) && defined(__AVX512VL__))
#include <avx512vbmivlintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VBMI2__)
#include <avx512vbmi2intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VBMI2__) && defined(__AVX512VL__))
#include <avx512vlvbmi2intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512PF__)
#include <avx512pfintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BF16__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512BF16__)
#include <avx512bf16intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
(defined(__AVX512VL__) && defined(__AVX512BF16__))
#include <avx512vlbf16intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__PKU__)
#include <pkuintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VAES__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__VAES__)
#include <vaesintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__GFNI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__GFNI__)
#include <gfniintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDPID__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RDPID__)
/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
///
/// \headerfile <immintrin.h>
@@ -213,7 +246,8 @@ _rdpid_u32(void) {
}
#endif // __RDPID__
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RDRND__)
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand16_step(unsigned short *__p)
{
@@ -235,7 +269,8 @@ _rdrand64_step(unsigned long long *__p)
#endif
#endif /* __RDRND__ */
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__FSGSBASE__)
#ifdef __x86_64__
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u32(void)
@@ -288,7 +323,8 @@ _writegsbase_u64(unsigned long long __V)
#endif
#endif /* __FSGSBASE__ */
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MOVBE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__MOVBE__)
/* The structs used below are to force the load/store to be unaligned. This
* is accomplished with the __packed__ attribute. The __may_alias__ prevents
@@ -347,35 +383,42 @@ _storebe_i64(void * __P, long long __D) {
#endif
#endif /* __MOVBE */
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RTM__)
#include <rtmintrin.h>
#include <xtestintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SHA__)
#include <shaintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FXSR__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__FXSR__)
#include <fxsrintrin.h>
#endif
/* No feature check desired due to internal MSC_VER checks */
#include <xsaveintrin.h>
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__XSAVEOPT__)
#include <xsaveoptintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEC__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__XSAVEC__)
#include <xsavecintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVES__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__XSAVES__)
#include <xsavesintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHSTK__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SHSTK__)
#include <cetintrin.h>
#endif
@@ -383,57 +426,81 @@ _storebe_i64(void * __P, long long __D) {
* whereas others are also available at all times. */
#include <adxintrin.h>
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RDSEED__)
#include <rdseedintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WBNOINVD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__WBNOINVD__)
#include <wbnoinvdintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLDEMOTE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CLDEMOTE__)
#include <cldemoteintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WAITPKG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__WAITPKG__)
#include <waitpkgintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
- defined(__MOVDIRI__) || defined(__MOVDIR64B__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__MOVDIRI__) || defined(__MOVDIR64B__)
#include <movdirintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PCONFIG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__PCONFIG__)
#include <pconfigintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SGX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SGX__)
#include <sgxintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PTWRITE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__PTWRITE__)
#include <ptwriteintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__INVPCID__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__INVPCID__)
#include <invpcidintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
- defined(__AVX512VP2INTERSECT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
+#include <amxintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512VP2INTERSECT__)
#include <avx512vp2intersectintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || \
- (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
#include <avx512vlvp2intersectintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__ENQCMD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__ENQCMD__)
#include <enqcmdintrin.h>
#endif
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SERIALIZE__)
+#include <serializeintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__TSXLDTRK__)
+#include <tsxldtrkintrin.h>
+#endif
+
#if defined(_MSC_VER) && __has_extension(gnu_asm)
/* Define the default attributes for these intrinsics */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
diff --git a/clang/lib/Headers/intrin.h b/clang/lib/Headers/intrin.h
index f85f7a2beb49..871b47ca8267 100644
--- a/clang/lib/Headers/intrin.h
+++ b/clang/lib/Headers/intrin.h
@@ -289,6 +289,9 @@ unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
static __inline__
unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
+#endif
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
static __inline__
__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
static __inline__
diff --git a/clang/lib/Headers/module.modulemap b/clang/lib/Headers/module.modulemap
index 7954a77a4125..6894672ef052 100644
--- a/clang/lib/Headers/module.modulemap
+++ b/clang/lib/Headers/module.modulemap
@@ -27,6 +27,12 @@ module _Builtin_intrinsics [system] [extern_c] {
header "arm_fp16.h"
export *
}
+
+ explicit module sve {
+ requires sve
+ header "arm_sve.h"
+ export *
+ }
}
explicit module intel {
diff --git a/clang/lib/Headers/msa.h b/clang/lib/Headers/msa.h
index 19ea6071aa93..0ca4900870f1 100644
--- a/clang/lib/Headers/msa.h
+++ b/clang/lib/Headers/msa.h
@@ -212,10 +212,14 @@ typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8)));
#define __msa_ld_h __builtin_msa_ld_h
#define __msa_ld_w __builtin_msa_ld_w
#define __msa_ld_d __builtin_msa_ld_d
+#define __msa_ldr_d __builtin_msa_ldr_d
+#define __msa_ldr_w __builtin_msa_ldrq_w
#define __msa_st_b __builtin_msa_st_b
#define __msa_st_h __builtin_msa_st_h
#define __msa_st_w __builtin_msa_st_w
#define __msa_st_d __builtin_msa_st_d
+#define __msa_str_d __builtin_msa_str_d
+#define __msa_str_w __builtin_msa_strq_w
#define __msa_sat_s_b __builtin_msa_sat_s_b
#define __msa_sat_s_h __builtin_msa_sat_s_h
#define __msa_sat_s_w __builtin_msa_sat_s_w
diff --git a/clang/lib/Headers/opencl-c.h b/clang/lib/Headers/opencl-c.h
index 06c5ab6a72f0..66e18bdd47bb 100644
--- a/clang/lib/Headers/opencl-c.h
+++ b/clang/lib/Headers/opencl-c.h
@@ -13432,18 +13432,12 @@ int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, m
uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
@@ -13482,18 +13476,12 @@ long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand
ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
// OpenCL v2.0 s6.13.11.7.5:
@@ -14682,7 +14670,7 @@ void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, flo
// OpenCL Extension v2.0 s9.18 - Mipmaps
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
+#if defined(cl_khr_mipmap_image_writes)
void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
@@ -14699,15 +14687,16 @@ void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int
void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float color);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float color);
+void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float depth);
+void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float depth);
#ifdef cl_khr_3d_image_writes
void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
-#endif
-#endif //cl_khr_mipmap_image
+#endif //cl_khr_3d_image_writes
+
+#endif //defined(cl_khr_mipmap_image_writes)
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image write functions for half4 type
@@ -14756,7 +14745,7 @@ void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, flo
#endif //cl_khr_depth_images
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
+#if defined(cl_khr_mipmap_image_writes)
void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
@@ -14780,8 +14769,9 @@ void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int
void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
-#endif
-#endif //cl_khr_mipmap_image
+#endif //cl_khr_3d_image_writes
+
+#endif //cl_khr_mipmap_image_writes
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image write functions for half4 type
@@ -15470,6 +15460,674 @@ double __ovld __conv sub_group_scan_inclusive_max(double x);
#endif //cl_khr_subgroups cl_intel_subgroups
+#if defined(cl_khr_subgroup_extended_types)
+char __ovld __conv sub_group_broadcast( char value, uint index );
+char2 __ovld __conv sub_group_broadcast( char2 value, uint index );
+char3 __ovld __conv sub_group_broadcast( char3 value, uint index );
+char4 __ovld __conv sub_group_broadcast( char4 value, uint index );
+char8 __ovld __conv sub_group_broadcast( char8 value, uint index );
+char16 __ovld __conv sub_group_broadcast( char16 value, uint index );
+
+uchar __ovld __conv sub_group_broadcast( uchar value, uint index );
+uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );
+uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );
+uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );
+uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );
+uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );
+
+short __ovld __conv sub_group_broadcast( short value, uint index );
+short2 __ovld __conv sub_group_broadcast( short2 value, uint index );
+short3 __ovld __conv sub_group_broadcast( short3 value, uint index );
+short4 __ovld __conv sub_group_broadcast( short4 value, uint index );
+short8 __ovld __conv sub_group_broadcast( short8 value, uint index );
+short16 __ovld __conv sub_group_broadcast( short16 value, uint index );
+
+ushort __ovld __conv sub_group_broadcast( ushort value, uint index );
+ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );
+ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );
+ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );
+ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );
+ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );
+
+// scalar int broadcast is part of cl_khr_subgroups
+int2 __ovld __conv sub_group_broadcast( int2 value, uint index );
+int3 __ovld __conv sub_group_broadcast( int3 value, uint index );
+int4 __ovld __conv sub_group_broadcast( int4 value, uint index );
+int8 __ovld __conv sub_group_broadcast( int8 value, uint index );
+int16 __ovld __conv sub_group_broadcast( int16 value, uint index );
+
+// scalar uint broadcast is part of cl_khr_subgroups
+uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );
+uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );
+uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );
+uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );
+uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );
+
+// scalar long broadcast is part of cl_khr_subgroups
+long2 __ovld __conv sub_group_broadcast( long2 value, uint index );
+long3 __ovld __conv sub_group_broadcast( long3 value, uint index );
+long4 __ovld __conv sub_group_broadcast( long4 value, uint index );
+long8 __ovld __conv sub_group_broadcast( long8 value, uint index );
+long16 __ovld __conv sub_group_broadcast( long16 value, uint index );
+
+// scalar ulong broadcast is part of cl_khr_subgroups
+ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );
+ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );
+ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );
+ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );
+ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );
+
+// scalar float broadcast is part of cl_khr_subgroups
+float2 __ovld __conv sub_group_broadcast( float2 value, uint index );
+float3 __ovld __conv sub_group_broadcast( float3 value, uint index );
+float4 __ovld __conv sub_group_broadcast( float4 value, uint index );
+float8 __ovld __conv sub_group_broadcast( float8 value, uint index );
+float16 __ovld __conv sub_group_broadcast( float16 value, uint index );
+
+char __ovld __conv sub_group_reduce_add( char value );
+uchar __ovld __conv sub_group_reduce_add( uchar value );
+short __ovld __conv sub_group_reduce_add( short value );
+ushort __ovld __conv sub_group_reduce_add( ushort value );
+
+char __ovld __conv sub_group_reduce_min( char value );
+uchar __ovld __conv sub_group_reduce_min( uchar value );
+short __ovld __conv sub_group_reduce_min( short value );
+ushort __ovld __conv sub_group_reduce_min( ushort value );
+
+char __ovld __conv sub_group_reduce_max( char value );
+uchar __ovld __conv sub_group_reduce_max( uchar value );
+short __ovld __conv sub_group_reduce_max( short value );
+ushort __ovld __conv sub_group_reduce_max( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_add( char value );
+uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );
+short __ovld __conv sub_group_scan_inclusive_add( short value );
+ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_min( char value );
+uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );
+short __ovld __conv sub_group_scan_inclusive_min( short value );
+ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_max( char value );
+uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );
+short __ovld __conv sub_group_scan_inclusive_max( short value );
+ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_add( char value );
+uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );
+short __ovld __conv sub_group_scan_exclusive_add( short value );
+ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_min( char value );
+uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );
+short __ovld __conv sub_group_scan_exclusive_min( short value );
+ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_max( char value );
+uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );
+short __ovld __conv sub_group_scan_exclusive_max( short value );
+ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );
+
+#if defined(cl_khr_fp16)
+// scalar half broadcast is part of cl_khr_subgroups
+half2 __ovld __conv sub_group_broadcast( half2 value, uint index );
+half3 __ovld __conv sub_group_broadcast( half3 value, uint index );
+half4 __ovld __conv sub_group_broadcast( half4 value, uint index );
+half8 __ovld __conv sub_group_broadcast( half8 value, uint index );
+half16 __ovld __conv sub_group_broadcast( half16 value, uint index );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+// scalar double broadcast is part of cl_khr_subgroups
+double2 __ovld __conv sub_group_broadcast( double2 value, uint index );
+double3 __ovld __conv sub_group_broadcast( double3 value, uint index );
+double4 __ovld __conv sub_group_broadcast( double4 value, uint index );
+double8 __ovld __conv sub_group_broadcast( double8 value, uint index );
+double16 __ovld __conv sub_group_broadcast( double16 value, uint index );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_extended_types
+
+#if defined(cl_khr_subgroup_non_uniform_vote)
+int __ovld sub_group_elect(void);
+int __ovld sub_group_non_uniform_all( int predicate );
+int __ovld sub_group_non_uniform_any( int predicate );
+
+int __ovld sub_group_non_uniform_all_equal( char value );
+int __ovld sub_group_non_uniform_all_equal( uchar value );
+int __ovld sub_group_non_uniform_all_equal( short value );
+int __ovld sub_group_non_uniform_all_equal( ushort value );
+int __ovld sub_group_non_uniform_all_equal( int value );
+int __ovld sub_group_non_uniform_all_equal( uint value );
+int __ovld sub_group_non_uniform_all_equal( long value );
+int __ovld sub_group_non_uniform_all_equal( ulong value );
+int __ovld sub_group_non_uniform_all_equal( float value );
+
+#if defined(cl_khr_fp16)
+int __ovld sub_group_non_uniform_all_equal( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+int __ovld sub_group_non_uniform_all_equal( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_vote
+
+#if defined(cl_khr_subgroup_ballot)
+char __ovld sub_group_non_uniform_broadcast( char value, uint index );
+char2 __ovld sub_group_non_uniform_broadcast( char2 value, uint index );
+char3 __ovld sub_group_non_uniform_broadcast( char3 value, uint index );
+char4 __ovld sub_group_non_uniform_broadcast( char4 value, uint index );
+char8 __ovld sub_group_non_uniform_broadcast( char8 value, uint index );
+char16 __ovld sub_group_non_uniform_broadcast( char16 value, uint index );
+
+uchar __ovld sub_group_non_uniform_broadcast( uchar value, uint index );
+uchar2 __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );
+uchar3 __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );
+uchar4 __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );
+uchar8 __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );
+uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );
+
+short __ovld sub_group_non_uniform_broadcast( short value, uint index );
+short2 __ovld sub_group_non_uniform_broadcast( short2 value, uint index );
+short3 __ovld sub_group_non_uniform_broadcast( short3 value, uint index );
+short4 __ovld sub_group_non_uniform_broadcast( short4 value, uint index );
+short8 __ovld sub_group_non_uniform_broadcast( short8 value, uint index );
+short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );
+
+ushort __ovld sub_group_non_uniform_broadcast( ushort value, uint index );
+ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );
+ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );
+ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );
+ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );
+ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );
+
+int __ovld sub_group_non_uniform_broadcast( int value, uint index );
+int2 __ovld sub_group_non_uniform_broadcast( int2 value, uint index );
+int3 __ovld sub_group_non_uniform_broadcast( int3 value, uint index );
+int4 __ovld sub_group_non_uniform_broadcast( int4 value, uint index );
+int8 __ovld sub_group_non_uniform_broadcast( int8 value, uint index );
+int16 __ovld sub_group_non_uniform_broadcast( int16 value, uint index );
+
+uint __ovld sub_group_non_uniform_broadcast( uint value, uint index );
+uint2 __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );
+uint3 __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );
+uint4 __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );
+uint8 __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );
+uint16 __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );
+
+long __ovld sub_group_non_uniform_broadcast( long value, uint index );
+long2 __ovld sub_group_non_uniform_broadcast( long2 value, uint index );
+long3 __ovld sub_group_non_uniform_broadcast( long3 value, uint index );
+long4 __ovld sub_group_non_uniform_broadcast( long4 value, uint index );
+long8 __ovld sub_group_non_uniform_broadcast( long8 value, uint index );
+long16 __ovld sub_group_non_uniform_broadcast( long16 value, uint index );
+
+ulong __ovld sub_group_non_uniform_broadcast( ulong value, uint index );
+ulong2 __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );
+ulong3 __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );
+ulong4 __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );
+ulong8 __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );
+ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );
+
+float __ovld sub_group_non_uniform_broadcast( float value, uint index );
+float2 __ovld sub_group_non_uniform_broadcast( float2 value, uint index );
+float3 __ovld sub_group_non_uniform_broadcast( float3 value, uint index );
+float4 __ovld sub_group_non_uniform_broadcast( float4 value, uint index );
+float8 __ovld sub_group_non_uniform_broadcast( float8 value, uint index );
+float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );
+
+char __ovld sub_group_broadcast_first( char value );
+uchar __ovld sub_group_broadcast_first( uchar value );
+short __ovld sub_group_broadcast_first( short value );
+ushort __ovld sub_group_broadcast_first( ushort value );
+int __ovld sub_group_broadcast_first( int value );
+uint __ovld sub_group_broadcast_first( uint value );
+long __ovld sub_group_broadcast_first( long value );
+ulong __ovld sub_group_broadcast_first( ulong value );
+float __ovld sub_group_broadcast_first( float value );
+
+uint4 __ovld sub_group_ballot( int predicate );
+int __ovld __cnfn sub_group_inverse_ballot( uint4 value );
+int __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );
+uint __ovld __cnfn sub_group_ballot_bit_count( uint4 value );
+
+uint __ovld sub_group_ballot_inclusive_scan( uint4 value );
+uint __ovld sub_group_ballot_exclusive_scan( uint4 value );
+uint __ovld sub_group_ballot_find_lsb( uint4 value );
+uint __ovld sub_group_ballot_find_msb( uint4 value );
+
+uint4 __ovld __cnfn get_sub_group_eq_mask(void);
+uint4 __ovld __cnfn get_sub_group_ge_mask(void);
+uint4 __ovld __cnfn get_sub_group_gt_mask(void);
+uint4 __ovld __cnfn get_sub_group_le_mask(void);
+uint4 __ovld __cnfn get_sub_group_lt_mask(void);
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_non_uniform_broadcast( half value, uint index );
+half2 __ovld sub_group_non_uniform_broadcast( half2 value, uint index );
+half3 __ovld sub_group_non_uniform_broadcast( half3 value, uint index );
+half4 __ovld sub_group_non_uniform_broadcast( half4 value, uint index );
+half8 __ovld sub_group_non_uniform_broadcast( half8 value, uint index );
+half16 __ovld sub_group_non_uniform_broadcast( half16 value, uint index );
+
+half __ovld sub_group_broadcast_first( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_non_uniform_broadcast( double value, uint index );
+double2 __ovld sub_group_non_uniform_broadcast( double2 value, uint index );
+double3 __ovld sub_group_non_uniform_broadcast( double3 value, uint index );
+double4 __ovld sub_group_non_uniform_broadcast( double4 value, uint index );
+double8 __ovld sub_group_non_uniform_broadcast( double8 value, uint index );
+double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );
+
+double __ovld sub_group_broadcast_first( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_ballot
+
+#if defined(cl_khr_subgroup_non_uniform_arithmetic)
+char __ovld sub_group_non_uniform_reduce_add( char value );
+uchar __ovld sub_group_non_uniform_reduce_add( uchar value );
+short __ovld sub_group_non_uniform_reduce_add( short value );
+ushort __ovld sub_group_non_uniform_reduce_add( ushort value );
+int __ovld sub_group_non_uniform_reduce_add( int value );
+uint __ovld sub_group_non_uniform_reduce_add( uint value );
+long __ovld sub_group_non_uniform_reduce_add( long value );
+ulong __ovld sub_group_non_uniform_reduce_add( ulong value );
+float __ovld sub_group_non_uniform_reduce_add( float value );
+
+char __ovld sub_group_non_uniform_reduce_mul( char value );
+uchar __ovld sub_group_non_uniform_reduce_mul( uchar value );
+short __ovld sub_group_non_uniform_reduce_mul( short value );
+ushort __ovld sub_group_non_uniform_reduce_mul( ushort value );
+int __ovld sub_group_non_uniform_reduce_mul( int value );
+uint __ovld sub_group_non_uniform_reduce_mul( uint value );
+long __ovld sub_group_non_uniform_reduce_mul( long value );
+ulong __ovld sub_group_non_uniform_reduce_mul( ulong value );
+float __ovld sub_group_non_uniform_reduce_mul( float value );
+
+char __ovld sub_group_non_uniform_reduce_min( char value );
+uchar __ovld sub_group_non_uniform_reduce_min( uchar value );
+short __ovld sub_group_non_uniform_reduce_min( short value );
+ushort __ovld sub_group_non_uniform_reduce_min( ushort value );
+int __ovld sub_group_non_uniform_reduce_min( int value );
+uint __ovld sub_group_non_uniform_reduce_min( uint value );
+long __ovld sub_group_non_uniform_reduce_min( long value );
+ulong __ovld sub_group_non_uniform_reduce_min( ulong value );
+float __ovld sub_group_non_uniform_reduce_min( float value );
+
+char __ovld sub_group_non_uniform_reduce_max( char value );
+uchar __ovld sub_group_non_uniform_reduce_max( uchar value );
+short __ovld sub_group_non_uniform_reduce_max( short value );
+ushort __ovld sub_group_non_uniform_reduce_max( ushort value );
+int __ovld sub_group_non_uniform_reduce_max( int value );
+uint __ovld sub_group_non_uniform_reduce_max( uint value );
+long __ovld sub_group_non_uniform_reduce_max( long value );
+ulong __ovld sub_group_non_uniform_reduce_max( ulong value );
+float __ovld sub_group_non_uniform_reduce_max( float value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_add( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_add( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_add( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_add( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_add( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );
+float __ovld sub_group_non_uniform_scan_inclusive_add( float value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_mul( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_mul( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_mul( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_mul( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );
+float __ovld sub_group_non_uniform_scan_inclusive_mul( float value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_min( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_min( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_min( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_min( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_min( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );
+float __ovld sub_group_non_uniform_scan_inclusive_min( float value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_max( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_max( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_max( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_max( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_max( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );
+float __ovld sub_group_non_uniform_scan_inclusive_max( float value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_add( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_add( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_add( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_add( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_add( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );
+float __ovld sub_group_non_uniform_scan_exclusive_add( float value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_mul( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_mul( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_mul( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_mul( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );
+float __ovld sub_group_non_uniform_scan_exclusive_mul( float value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_min( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_min( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_min( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_min( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_min( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );
+float __ovld sub_group_non_uniform_scan_exclusive_min( float value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_max( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_max( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_max( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_max( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_max( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );
+float __ovld sub_group_non_uniform_scan_exclusive_max( float value );
+
+char __ovld sub_group_non_uniform_reduce_and( char value );
+uchar __ovld sub_group_non_uniform_reduce_and( uchar value );
+short __ovld sub_group_non_uniform_reduce_and( short value );
+ushort __ovld sub_group_non_uniform_reduce_and( ushort value );
+int __ovld sub_group_non_uniform_reduce_and( int value );
+uint __ovld sub_group_non_uniform_reduce_and( uint value );
+long __ovld sub_group_non_uniform_reduce_and( long value );
+ulong __ovld sub_group_non_uniform_reduce_and( ulong value );
+
+char __ovld sub_group_non_uniform_reduce_or( char value );
+uchar __ovld sub_group_non_uniform_reduce_or( uchar value );
+short __ovld sub_group_non_uniform_reduce_or( short value );
+ushort __ovld sub_group_non_uniform_reduce_or( ushort value );
+int __ovld sub_group_non_uniform_reduce_or( int value );
+uint __ovld sub_group_non_uniform_reduce_or( uint value );
+long __ovld sub_group_non_uniform_reduce_or( long value );
+ulong __ovld sub_group_non_uniform_reduce_or( ulong value );
+
+char __ovld sub_group_non_uniform_reduce_xor( char value );
+uchar __ovld sub_group_non_uniform_reduce_xor( uchar value );
+short __ovld sub_group_non_uniform_reduce_xor( short value );
+ushort __ovld sub_group_non_uniform_reduce_xor( ushort value );
+int __ovld sub_group_non_uniform_reduce_xor( int value );
+uint __ovld sub_group_non_uniform_reduce_xor( uint value );
+long __ovld sub_group_non_uniform_reduce_xor( long value );
+ulong __ovld sub_group_non_uniform_reduce_xor( ulong value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_and( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_and( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_and( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_and( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_and( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_or( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_or( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_or( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_or( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_or( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );
+
+char __ovld sub_group_non_uniform_scan_inclusive_xor( char value );
+uchar __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );
+short __ovld sub_group_non_uniform_scan_inclusive_xor( short value );
+ushort __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );
+int __ovld sub_group_non_uniform_scan_inclusive_xor( int value );
+uint __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );
+long __ovld sub_group_non_uniform_scan_inclusive_xor( long value );
+ulong __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_and( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_and( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_and( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_and( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_and( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_or( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_or( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_or( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_or( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_or( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );
+
+char __ovld sub_group_non_uniform_scan_exclusive_xor( char value );
+uchar __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );
+short __ovld sub_group_non_uniform_scan_exclusive_xor( short value );
+ushort __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );
+int __ovld sub_group_non_uniform_scan_exclusive_xor( int value );
+uint __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );
+long __ovld sub_group_non_uniform_scan_exclusive_xor( long value );
+ulong __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );
+
+int __ovld sub_group_non_uniform_reduce_logical_and( int predicate );
+int __ovld sub_group_non_uniform_reduce_logical_or( int predicate );
+int __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );
+
+int __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );
+int __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );
+int __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );
+
+int __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );
+int __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );
+int __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_non_uniform_reduce_add( half value );
+half __ovld sub_group_non_uniform_reduce_mul( half value );
+half __ovld sub_group_non_uniform_reduce_min( half value );
+half __ovld sub_group_non_uniform_reduce_max( half value );
+half __ovld sub_group_non_uniform_scan_inclusive_add( half value );
+half __ovld sub_group_non_uniform_scan_inclusive_mul( half value );
+half __ovld sub_group_non_uniform_scan_inclusive_min( half value );
+half __ovld sub_group_non_uniform_scan_inclusive_max( half value );
+half __ovld sub_group_non_uniform_scan_exclusive_add( half value );
+half __ovld sub_group_non_uniform_scan_exclusive_mul( half value );
+half __ovld sub_group_non_uniform_scan_exclusive_min( half value );
+half __ovld sub_group_non_uniform_scan_exclusive_max( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_non_uniform_reduce_add( double value );
+double __ovld sub_group_non_uniform_reduce_mul( double value );
+double __ovld sub_group_non_uniform_reduce_min( double value );
+double __ovld sub_group_non_uniform_reduce_max( double value );
+double __ovld sub_group_non_uniform_scan_inclusive_add( double value );
+double __ovld sub_group_non_uniform_scan_inclusive_mul( double value );
+double __ovld sub_group_non_uniform_scan_inclusive_min( double value );
+double __ovld sub_group_non_uniform_scan_inclusive_max( double value );
+double __ovld sub_group_non_uniform_scan_exclusive_add( double value );
+double __ovld sub_group_non_uniform_scan_exclusive_mul( double value );
+double __ovld sub_group_non_uniform_scan_exclusive_min( double value );
+double __ovld sub_group_non_uniform_scan_exclusive_max( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_arithmetic
+
+#if defined(cl_khr_subgroup_shuffle)
+char __ovld sub_group_shuffle( char value, uint index );
+uchar __ovld sub_group_shuffle( uchar value, uint index );
+short __ovld sub_group_shuffle( short value, uint index );
+ushort __ovld sub_group_shuffle( ushort value, uint index );
+int __ovld sub_group_shuffle( int value, uint index );
+uint __ovld sub_group_shuffle( uint value, uint index );
+long __ovld sub_group_shuffle( long value, uint index );
+ulong __ovld sub_group_shuffle( ulong value, uint index );
+float __ovld sub_group_shuffle( float value, uint index );
+
+char __ovld sub_group_shuffle_xor( char value, uint mask );
+uchar __ovld sub_group_shuffle_xor( uchar value, uint mask );
+short __ovld sub_group_shuffle_xor( short value, uint mask );
+ushort __ovld sub_group_shuffle_xor( ushort value, uint mask );
+int __ovld sub_group_shuffle_xor( int value, uint mask );
+uint __ovld sub_group_shuffle_xor( uint value, uint mask );
+long __ovld sub_group_shuffle_xor( long value, uint mask );
+ulong __ovld sub_group_shuffle_xor( ulong value, uint mask );
+float __ovld sub_group_shuffle_xor( float value, uint mask );
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_shuffle( half value, uint index );
+half __ovld sub_group_shuffle_xor( half value, uint mask );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_shuffle( double value, uint index );
+double __ovld sub_group_shuffle_xor( double value, uint mask );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle
+
+#if defined(cl_khr_subgroup_shuffle_relative)
+char __ovld sub_group_shuffle_up( char value, uint delta );
+uchar __ovld sub_group_shuffle_up( uchar value, uint delta );
+short __ovld sub_group_shuffle_up( short value, uint delta );
+ushort __ovld sub_group_shuffle_up( ushort value, uint delta );
+int __ovld sub_group_shuffle_up( int value, uint delta );
+uint __ovld sub_group_shuffle_up( uint value, uint delta );
+long __ovld sub_group_shuffle_up( long value, uint delta );
+ulong __ovld sub_group_shuffle_up( ulong value, uint delta );
+float __ovld sub_group_shuffle_up( float value, uint delta );
+
+char __ovld sub_group_shuffle_down( char value, uint delta );
+uchar __ovld sub_group_shuffle_down( uchar value, uint delta );
+short __ovld sub_group_shuffle_down( short value, uint delta );
+ushort __ovld sub_group_shuffle_down( ushort value, uint delta );
+int __ovld sub_group_shuffle_down( int value, uint delta );
+uint __ovld sub_group_shuffle_down( uint value, uint delta );
+long __ovld sub_group_shuffle_down( long value, uint delta );
+ulong __ovld sub_group_shuffle_down( ulong value, uint delta );
+float __ovld sub_group_shuffle_down( float value, uint delta );
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_shuffle_up( half value, uint delta );
+half __ovld sub_group_shuffle_down( half value, uint delta );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_shuffle_up( double value, uint delta );
+double __ovld sub_group_shuffle_down( double value, uint delta );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle_relative
+
+#if defined(cl_khr_subgroup_clustered_reduce)
+char __ovld sub_group_clustered_reduce_add( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_add( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_add( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_add( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );
+float __ovld sub_group_clustered_reduce_add( float value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );
+float __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_min( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_min( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_min( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_min( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );
+float __ovld sub_group_clustered_reduce_min( float value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_max( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_max( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_max( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_max( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );
+float __ovld sub_group_clustered_reduce_max( float value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_and( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_and( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_and( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_and( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_or( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_or( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_or( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_or( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );
+
+char __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );
+uchar __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );
+short __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );
+ushort __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );
+int __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );
+uint __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );
+long __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );
+ulong __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );
+
+int __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );
+int __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );
+int __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );
+
+#if defined(cl_khr_fp16)
+half __ovld sub_group_clustered_reduce_add( half value, uint clustersize );
+half __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );
+half __ovld sub_group_clustered_reduce_min( half value, uint clustersize );
+half __ovld sub_group_clustered_reduce_max( half value, uint clustersize );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double __ovld sub_group_clustered_reduce_add( double value, uint clustersize );
+double __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );
+double __ovld sub_group_clustered_reduce_min( double value, uint clustersize );
+double __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_clustered_reduce
+
#if defined(cl_intel_subgroups)
// Intel-Specific Sub Group Functions
float __ovld __conv intel_sub_group_shuffle( float x, uint c );
diff --git a/clang/lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h b/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
index a422c98bf97d..406c9748e286 100644
--- a/clang/lib/Headers/openmp_wrappers/__clang_openmp_math_declares.h
+++ b/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
@@ -1,4 +1,4 @@
-/*===---- __clang_openmp_math_declares.h - OpenMP math declares ------------===
+/*===- __clang_openmp_device_functions.h - OpenMP device function declares -===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
@@ -7,27 +7,36 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef __CLANG_OPENMP_MATH_DECLARES_H__
-#define __CLANG_OPENMP_MATH_DECLARES_H__
+#ifndef __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
+#define __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
#ifndef _OPENMP
#error "This file is for OpenMP compilation only."
#endif
-#if defined(__NVPTX__) && defined(_OPENMP)
+#pragma omp begin declare variant match( \
+ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
-#define __CUDA__
-
-#if defined(__cplusplus)
- #include <__clang_cuda_math_forward_declares.h>
+#ifdef __cplusplus
+extern "C" {
#endif
+#define __CUDA__
+#define __OPENMP_NVPTX__
+
/// Include declarations for libdevice functions.
#include <__clang_cuda_libdevice_declares.h>
+
/// Provide definitions for these functions.
#include <__clang_cuda_device_functions.h>
+#undef __OPENMP_NVPTX__
#undef __CUDA__
+#ifdef __cplusplus
+} // extern "C"
#endif
+
+#pragma omp end declare variant
+
#endif
diff --git a/clang/lib/Headers/openmp_wrappers/__clang_openmp_math.h b/clang/lib/Headers/openmp_wrappers/__clang_openmp_math.h
deleted file mode 100644
index 5d7ce9a965d3..000000000000
--- a/clang/lib/Headers/openmp_wrappers/__clang_openmp_math.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*===---- __clang_openmp_math.h - OpenMP target math support ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if defined(__NVPTX__) && defined(_OPENMP)
-/// TODO:
-/// We are currently reusing the functionality of the Clang-CUDA code path
-/// as an alternative to the host declarations provided by math.h and cmath.
-/// This is suboptimal.
-///
-/// We should instead declare the device functions in a similar way, e.g.,
-/// through OpenMP 5.0 variants, and afterwards populate the module with the
-/// host declarations by unconditionally including the host math.h or cmath,
-/// respectively. This is actually what the Clang-CUDA code path does, using
-/// __device__ instead of variants to avoid redeclarations and get the desired
-/// overload resolution.
-
-#define __CUDA__
-
-#if defined(__cplusplus)
- #include <__clang_cuda_cmath.h>
-#endif
-
-#undef __CUDA__
-
-/// Magic macro for stopping the math.h/cmath host header from being included.
-#define __CLANG_NO_HOST_MATH__
-
-#endif
-
diff --git a/clang/lib/Headers/openmp_wrappers/cmath b/clang/lib/Headers/openmp_wrappers/cmath
index a5183a1d8d1b..bd6011eb6f6d 100644
--- a/clang/lib/Headers/openmp_wrappers/cmath
+++ b/clang/lib/Headers/openmp_wrappers/cmath
@@ -1,4 +1,4 @@
-/*===-------------- cmath - Alternative cmath header -----------------------===
+/*===-- __clang_openmp_device_functions.h - OpenMP math declares ------ c++ -===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
@@ -7,10 +7,69 @@
*===-----------------------------------------------------------------------===
*/
-#include <__clang_openmp_math.h>
+#ifndef __CLANG_OPENMP_CMATH_H__
+#define __CLANG_OPENMP_CMATH_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
-#ifndef __CLANG_NO_HOST_MATH__
#include_next <cmath>
-#else
-#undef __CLANG_NO_HOST_MATH__
+
+// Make sure we include our math.h overlay, it probably happend already but we
+// need to be sure.
+#include <math.h>
+
+// We (might) need cstdlib because __clang_cuda_cmath.h below declares `abs`
+// which might live in cstdlib.
+#include <cstdlib>
+
+#pragma omp begin declare variant match( \
+ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+#include <__clang_cuda_cmath.h>
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+// Overloads not provided by the CUDA wrappers but by the CUDA system headers.
+// Since we do not include the latter we define them ourselves.
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+
+__DEVICE__ float acosh(float __x) { return ::acoshf(__x); }
+__DEVICE__ float asinh(float __x) { return ::asinhf(__x); }
+__DEVICE__ float atanh(float __x) { return ::atanhf(__x); }
+__DEVICE__ float cbrt(float __x) { return ::cbrtf(__x); }
+__DEVICE__ float erf(float __x) { return ::erff(__x); }
+__DEVICE__ float erfc(float __x) { return ::erfcf(__x); }
+__DEVICE__ float exp2(float __x) { return ::exp2f(__x); }
+__DEVICE__ float expm1(float __x) { return ::expm1f(__x); }
+__DEVICE__ float fdim(float __x, float __y) { return ::fdimf(__x, __y); }
+__DEVICE__ float hypot(float __x, float __y) { return ::hypotf(__x, __y); }
+__DEVICE__ int ilogb(float __x) { return ::ilogbf(__x); }
+__DEVICE__ float lgamma(float __x) { return ::lgammaf(__x); }
+__DEVICE__ long long int llrint(float __x) { return ::llrintf(__x); }
+__DEVICE__ long long int llround(float __x) { return ::llroundf(__x); }
+__DEVICE__ float log1p(float __x) { return ::log1pf(__x); }
+__DEVICE__ float log2(float __x) { return ::log2f(__x); }
+__DEVICE__ float logb(float __x) { return ::logbf(__x); }
+__DEVICE__ long int lrint(float __x) { return ::lrintf(__x); }
+__DEVICE__ long int lround(float __x) { return ::lroundf(__x); }
+__DEVICE__ float nextafter(float __x, float __y) {
+ return ::nextafterf(__x, __y);
+}
+__DEVICE__ float remainder(float __x, float __y) {
+ return ::remainderf(__x, __y);
+}
+__DEVICE__ float scalbln(float __x, long int __y) {
+ return ::scalblnf(__x, __y);
+}
+__DEVICE__ float scalbn(float __x, int __y) { return ::scalbnf(__x, __y); }
+__DEVICE__ float tgamma(float __x) { return ::tgammaf(__x); }
+
+#undef __DEVICE__
+
+#pragma omp end declare variant
+
#endif
diff --git a/clang/lib/Headers/openmp_wrappers/complex b/clang/lib/Headers/openmp_wrappers/complex
new file mode 100644
index 000000000000..1ed0b14879ef
--- /dev/null
+++ b/clang/lib/Headers/openmp_wrappers/complex
@@ -0,0 +1,25 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX__
+#define __CLANG_OPENMP_COMPLEX__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require std::math functions in the complex builtins below.
+#include <cmath>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex>
diff --git a/clang/lib/Headers/openmp_wrappers/complex.h b/clang/lib/Headers/openmp_wrappers/complex.h
new file mode 100644
index 000000000000..829c7a785725
--- /dev/null
+++ b/clang/lib/Headers/openmp_wrappers/complex.h
@@ -0,0 +1,25 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX_H__
+#define __CLANG_OPENMP_COMPLEX_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require math functions in the complex builtins below.
+#include <math.h>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex.h>
diff --git a/clang/lib/Headers/openmp_wrappers/math.h b/clang/lib/Headers/openmp_wrappers/math.h
index d2786ecb2424..c64af8b13ece 100644
--- a/clang/lib/Headers/openmp_wrappers/math.h
+++ b/clang/lib/Headers/openmp_wrappers/math.h
@@ -1,4 +1,4 @@
-/*===------------- math.h - Alternative math.h header ----------------------===
+/*===---- openmp_wrapper/math.h -------- OpenMP math.h intercept ------ c++ -===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
@@ -7,11 +7,45 @@
*===-----------------------------------------------------------------------===
*/
-#include <__clang_openmp_math.h>
+// If we are in C++ mode and include <math.h> (not <cmath>) first, we still need
+// to make sure <cmath> is read first. The problem otherwise is that we haven't
+// seen the declarations of the math.h functions when the system math.h includes
+// our cmath overlay. However, our cmath overlay, or better the underlying
+// overlay, e.g. CUDA, uses the math.h functions. Since we haven't declared them
+// yet we get errors. CUDA avoids this by eagerly declaring all math functions
+// (in the __device__ space) but we cannot do this. Instead we break the
+// dependence by forcing cmath to go first. While our cmath will in turn include
+// this file, the cmath guards will prevent recursion.
+#ifdef __cplusplus
+#include <cmath>
+#endif
-#ifndef __CLANG_NO_HOST_MATH__
-#include_next <math.h>
-#else
-#undef __CLANG_NO_HOST_MATH__
+#ifndef __CLANG_OPENMP_MATH_H__
+#define __CLANG_OPENMP_MATH_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
#endif
+#include_next <math.h>
+
+// We need limits.h for __clang_cuda_math.h below and because it should not hurt
+// we include it eagerly here.
+#include <limits.h>
+
+// We need stdlib.h because (for now) __clang_cuda_math.h below declares `abs`
+// which should live in stdlib.h.
+#include <stdlib.h>
+
+#pragma omp begin declare variant match( \
+ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+#include <__clang_cuda_math.h>
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+#pragma omp end declare variant
+
+#endif
diff --git a/clang/lib/Headers/openmp_wrappers/new b/clang/lib/Headers/openmp_wrappers/new
new file mode 100644
index 000000000000..1387d925b126
--- /dev/null
+++ b/clang/lib/Headers/openmp_wrappers/new
@@ -0,0 +1,70 @@
+//===--------- new - OPENMP wrapper for <new> ------------------------------===
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===-----------------------------------------------------------------------===
+
+#ifndef __CLANG_OPENMP_WRAPPERS_NEW
+#define __CLANG_OPENMP_WRAPPERS_NEW
+
+#include_next <new>
+
+#if defined(__NVPTX__) && defined(_OPENMP)
+
+#include <cstdlib>
+
+#pragma push_macro("OPENMP_NOEXCEPT")
+#if __cplusplus >= 201103L
+#define OPENMP_NOEXCEPT noexcept
+#else
+#define OPENMP_NOEXCEPT
+#endif
+
+// Device overrides for non-placement new and delete.
+inline void *operator new(__SIZE_TYPE__ size) {
+ if (size == 0)
+ size = 1;
+ return ::malloc(size);
+}
+inline void *operator new(__SIZE_TYPE__ size,
+ const std::nothrow_t &) OPENMP_NOEXCEPT {
+ return ::operator new(size);
+}
+
+inline void *operator new[](__SIZE_TYPE__ size) { return ::operator new(size); }
+inline void *operator new[](__SIZE_TYPE__ size, const std::nothrow_t &) {
+ return ::operator new(size);
+}
+
+inline void operator delete(void *ptr)OPENMP_NOEXCEPT {
+ if (ptr)
+ ::free(ptr);
+}
+inline void operator delete(void *ptr, const std::nothrow_t &)OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+
+inline void operator delete[](void *ptr) OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+inline void operator delete[](void *ptr,
+ const std::nothrow_t &) OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+
+// Sized delete, C++14 only.
+#if __cplusplus >= 201402L
+inline void operator delete(void *ptr, __SIZE_TYPE__ size)OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+inline void operator delete[](void *ptr, __SIZE_TYPE__ size) OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+#endif
+
+#pragma pop_macro("OPENMP_NOEXCEPT")
+#endif
+
+#endif // include guard
diff --git a/clang/lib/Headers/openmp_wrappers/time.h b/clang/lib/Headers/openmp_wrappers/time.h
new file mode 100644
index 000000000000..c760dd1ed963
--- /dev/null
+++ b/clang/lib/Headers/openmp_wrappers/time.h
@@ -0,0 +1,32 @@
+/*===---- time.h - OpenMP time header wrapper ------------------------ c ---===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_TIME_H__
+#define __CLANG_OPENMP_TIME_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#if defined(__cplusplus)
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
+#endif
+
+#include_next <time.h>
+
+#pragma omp begin declare variant match( \
+ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+__DEVICE__ clock_t clock() { return __nvvm_read_ptx_sreg_clock(); }
+
+#pragma omp end declare variant
+
+#endif
diff --git a/clang/lib/Headers/ppc_wrappers/emmintrin.h b/clang/lib/Headers/ppc_wrappers/emmintrin.h
index 293276cc9be0..4dcb8485e2e9 100644
--- a/clang/lib/Headers/ppc_wrappers/emmintrin.h
+++ b/clang/lib/Headers/ppc_wrappers/emmintrin.h
@@ -1749,7 +1749,7 @@ _mm_sll_epi64 (__m128i __A, __m128i __B)
lshift = vec_splat ((__v2du) __B, 0);
shmask = vec_cmplt (lshift, shmax);
result = vec_sl ((__v2du) __A, lshift);
- result = vec_sel ((__v2du) shmask, result, shmask);
+ result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
return (__m128i) result;
}
@@ -1843,7 +1843,7 @@ _mm_srl_epi64 (__m128i __A, __m128i __B)
rshift = vec_splat ((__v2du) __B, 0);
shmask = vec_cmplt (rshift, shmax);
result = vec_sr ((__v2du) __A, rshift);
- result = vec_sel ((__v2du) shmask, result, shmask);
+ result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
return (__m128i) result;
}
diff --git a/clang/lib/Headers/serializeintrin.h b/clang/lib/Headers/serializeintrin.h
new file mode 100644
index 000000000000..b774e5a24a0b
--- /dev/null
+++ b/clang/lib/Headers/serializeintrin.h
@@ -0,0 +1,30 @@
+/*===--------------- serializeintrin.h - serialize intrinsics --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <serializeintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __SERIALIZEINTRIN_H
+#define __SERIALIZEINTRIN_H
+
+/// Serialize instruction fetch and execution.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> SERIALIZE </c> instruction.
+///
+static __inline__ void
+__attribute__((__always_inline__, __nodebug__, __target__("serialize")))
+_serialize (void)
+{
+ __builtin_ia32_serialize ();
+}
+
+#endif /* __SERIALIZEINTRIN_H */
diff --git a/clang/lib/Headers/tsxldtrkintrin.h b/clang/lib/Headers/tsxldtrkintrin.h
new file mode 100644
index 000000000000..491823e93fc0
--- /dev/null
+++ b/clang/lib/Headers/tsxldtrkintrin.h
@@ -0,0 +1,56 @@
+/*===------------- tsxldtrkintrin.h - tsxldtrk intrinsics ------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <tsxldtrkintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __TSXLDTRKINTRIN_H
+#define __TSXLDTRKINTRIN_H
+
+/* Define the default attributes for the functions in this file */
+#define _DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("tsxldtrk")))
+
+/// Marks the start of an TSX (RTM) suspend load address tracking region. If
+/// this intrinsic is used inside a transactional region, subsequent loads
+/// are not added to the read set of the transaction. If it's used inside a
+/// suspend load address tracking region it will cause transaction abort.
+/// If it's used outside of a transactional region it behaves like a NOP.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c XSUSLDTRK instruction.
+///
+static __inline__ void _DEFAULT_FN_ATTRS
+_xsusldtrk (void)
+{
+ __builtin_ia32_xsusldtrk();
+}
+
+/// Marks the end of an TSX (RTM) suspend load address tracking region. If this
+/// intrinsic is used inside a suspend load address tracking region it will
+/// end the suspend region and all following load addresses will be added to
+/// the transaction read set. If it's used inside an active transaction but
+/// not in a suspend region it will cause transaction abort. If it's used
+/// outside of a transactional region it behaves like a NOP.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c XRESLDTRK instruction.
+///
+static __inline__ void _DEFAULT_FN_ATTRS
+_xresldtrk (void)
+{
+ __builtin_ia32_xresldtrk();
+}
+
+#undef _DEFAULT_FN_ATTRS
+
+#endif /* __TSXLDTRKINTRIN_H */
diff --git a/clang/lib/Headers/vecintrin.h b/clang/lib/Headers/vecintrin.h
index c71b76a3ee39..e58c9769e8cb 100644
--- a/clang/lib/Headers/vecintrin.h
+++ b/clang/lib/Headers/vecintrin.h
@@ -43,1281 +43,1341 @@ __lcbb(const void *__ptr, unsigned short __len)
/*-- vec_extract ------------------------------------------------------------*/
static inline __ATTRS_o_ai signed char
-vec_extract(vector signed char __vec, int __index) {
+vec_extract(__vector signed char __vec, int __index) {
return __vec[__index & 15];
}
static inline __ATTRS_o_ai unsigned char
-vec_extract(vector bool char __vec, int __index) {
+vec_extract(__vector __bool char __vec, int __index) {
return __vec[__index & 15];
}
static inline __ATTRS_o_ai unsigned char
-vec_extract(vector unsigned char __vec, int __index) {
+vec_extract(__vector unsigned char __vec, int __index) {
return __vec[__index & 15];
}
static inline __ATTRS_o_ai signed short
-vec_extract(vector signed short __vec, int __index) {
+vec_extract(__vector signed short __vec, int __index) {
return __vec[__index & 7];
}
static inline __ATTRS_o_ai unsigned short
-vec_extract(vector bool short __vec, int __index) {
+vec_extract(__vector __bool short __vec, int __index) {
return __vec[__index & 7];
}
static inline __ATTRS_o_ai unsigned short
-vec_extract(vector unsigned short __vec, int __index) {
+vec_extract(__vector unsigned short __vec, int __index) {
return __vec[__index & 7];
}
static inline __ATTRS_o_ai signed int
-vec_extract(vector signed int __vec, int __index) {
+vec_extract(__vector signed int __vec, int __index) {
return __vec[__index & 3];
}
static inline __ATTRS_o_ai unsigned int
-vec_extract(vector bool int __vec, int __index) {
+vec_extract(__vector __bool int __vec, int __index) {
return __vec[__index & 3];
}
static inline __ATTRS_o_ai unsigned int
-vec_extract(vector unsigned int __vec, int __index) {
+vec_extract(__vector unsigned int __vec, int __index) {
return __vec[__index & 3];
}
static inline __ATTRS_o_ai signed long long
-vec_extract(vector signed long long __vec, int __index) {
+vec_extract(__vector signed long long __vec, int __index) {
return __vec[__index & 1];
}
static inline __ATTRS_o_ai unsigned long long
-vec_extract(vector bool long long __vec, int __index) {
+vec_extract(__vector __bool long long __vec, int __index) {
return __vec[__index & 1];
}
static inline __ATTRS_o_ai unsigned long long
-vec_extract(vector unsigned long long __vec, int __index) {
+vec_extract(__vector unsigned long long __vec, int __index) {
return __vec[__index & 1];
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai float
-vec_extract(vector float __vec, int __index) {
+vec_extract(__vector float __vec, int __index) {
return __vec[__index & 3];
}
#endif
static inline __ATTRS_o_ai double
-vec_extract(vector double __vec, int __index) {
+vec_extract(__vector double __vec, int __index) {
return __vec[__index & 1];
}
/*-- vec_insert -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_insert(signed char __scalar, vector signed char __vec, int __index) {
+static inline __ATTRS_o_ai __vector signed char
+vec_insert(signed char __scalar, __vector signed char __vec, int __index) {
__vec[__index & 15] = __scalar;
return __vec;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert(unsigned char __scalar, vector bool char __vec, int __index) {
- vector unsigned char __newvec = (vector unsigned char)__vec;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert(unsigned char __scalar, __vector __bool char __vec, int __index) {
+ __vector unsigned char __newvec = (__vector unsigned char)__vec;
__newvec[__index & 15] = (unsigned char)__scalar;
return __newvec;
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert(unsigned char __scalar, vector unsigned char __vec, int __index) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert(unsigned char __scalar, __vector unsigned char __vec, int __index) {
__vec[__index & 15] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed short
-vec_insert(signed short __scalar, vector signed short __vec, int __index) {
+static inline __ATTRS_o_ai __vector signed short
+vec_insert(signed short __scalar, __vector signed short __vec, int __index) {
__vec[__index & 7] = __scalar;
return __vec;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert(unsigned short __scalar, vector bool short __vec, int __index) {
- vector unsigned short __newvec = (vector unsigned short)__vec;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert(unsigned short __scalar, __vector __bool short __vec,
+ int __index) {
+ __vector unsigned short __newvec = (__vector unsigned short)__vec;
__newvec[__index & 7] = (unsigned short)__scalar;
return __newvec;
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert(unsigned short __scalar, vector unsigned short __vec, int __index) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert(unsigned short __scalar, __vector unsigned short __vec,
+ int __index) {
__vec[__index & 7] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed int
-vec_insert(signed int __scalar, vector signed int __vec, int __index) {
+static inline __ATTRS_o_ai __vector signed int
+vec_insert(signed int __scalar, __vector signed int __vec, int __index) {
__vec[__index & 3] = __scalar;
return __vec;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert(unsigned int __scalar, vector bool int __vec, int __index) {
- vector unsigned int __newvec = (vector unsigned int)__vec;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert(unsigned int __scalar, __vector __bool int __vec, int __index) {
+ __vector unsigned int __newvec = (__vector unsigned int)__vec;
__newvec[__index & 3] = __scalar;
return __newvec;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert(unsigned int __scalar, vector unsigned int __vec, int __index) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert(unsigned int __scalar, __vector unsigned int __vec, int __index) {
__vec[__index & 3] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed long long
-vec_insert(signed long long __scalar, vector signed long long __vec,
+static inline __ATTRS_o_ai __vector signed long long
+vec_insert(signed long long __scalar, __vector signed long long __vec,
int __index) {
__vec[__index & 1] = __scalar;
return __vec;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert(unsigned long long __scalar, vector bool long long __vec,
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert(unsigned long long __scalar, __vector __bool long long __vec,
int __index) {
- vector unsigned long long __newvec = (vector unsigned long long)__vec;
+ __vector unsigned long long __newvec = (__vector unsigned long long)__vec;
__newvec[__index & 1] = __scalar;
return __newvec;
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert(unsigned long long __scalar, vector unsigned long long __vec,
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert(unsigned long long __scalar, __vector unsigned long long __vec,
int __index) {
__vec[__index & 1] = __scalar;
return __vec;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_insert(float __scalar, vector float __vec, int __index) {
+static inline __ATTRS_o_ai __vector float
+vec_insert(float __scalar, __vector float __vec, int __index) {
__vec[__index & 1] = __scalar;
return __vec;
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_insert(double __scalar, vector double __vec, int __index) {
+static inline __ATTRS_o_ai __vector double
+vec_insert(double __scalar, __vector double __vec, int __index) {
__vec[__index & 1] = __scalar;
return __vec;
}
/*-- vec_promote ------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_promote(signed char __scalar, int __index) {
- const vector signed char __zero = (vector signed char)0;
- vector signed char __vec = __builtin_shufflevector(__zero, __zero,
+ const __vector signed char __zero = (__vector signed char)0;
+ __vector signed char __vec = __builtin_shufflevector(__zero, __zero,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
__vec[__index & 15] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_promote(unsigned char __scalar, int __index) {
- const vector unsigned char __zero = (vector unsigned char)0;
- vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
+ const __vector unsigned char __zero = (__vector unsigned char)0;
+ __vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
__vec[__index & 15] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_promote(signed short __scalar, int __index) {
- const vector signed short __zero = (vector signed short)0;
- vector signed short __vec = __builtin_shufflevector(__zero, __zero,
+ const __vector signed short __zero = (__vector signed short)0;
+ __vector signed short __vec = __builtin_shufflevector(__zero, __zero,
-1, -1, -1, -1, -1, -1, -1, -1);
__vec[__index & 7] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_promote(unsigned short __scalar, int __index) {
- const vector unsigned short __zero = (vector unsigned short)0;
- vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
+ const __vector unsigned short __zero = (__vector unsigned short)0;
+ __vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
-1, -1, -1, -1, -1, -1, -1, -1);
__vec[__index & 7] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_promote(signed int __scalar, int __index) {
- const vector signed int __zero = (vector signed int)0;
- vector signed int __vec = __builtin_shufflevector(__zero, __zero,
- -1, -1, -1, -1);
+ const __vector signed int __zero = (__vector signed int)0;
+ __vector signed int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
__vec[__index & 3] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_promote(unsigned int __scalar, int __index) {
- const vector unsigned int __zero = (vector unsigned int)0;
- vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
- -1, -1, -1, -1);
+ const __vector unsigned int __zero = (__vector unsigned int)0;
+ __vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
__vec[__index & 3] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_promote(signed long long __scalar, int __index) {
- const vector signed long long __zero = (vector signed long long)0;
- vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
- -1, -1);
+ const __vector signed long long __zero = (__vector signed long long)0;
+ __vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
__vec[__index & 1] = __scalar;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_promote(unsigned long long __scalar, int __index) {
- const vector unsigned long long __zero = (vector unsigned long long)0;
- vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
- -1, -1);
+ const __vector unsigned long long __zero = (__vector unsigned long long)0;
+ __vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1);
__vec[__index & 1] = __scalar;
return __vec;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_promote(float __scalar, int __index) {
- const vector float __zero = (vector float)0;
- vector float __vec = __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1);
+ const __vector float __zero = (__vector float)0.0f;
+ __vector float __vec = __builtin_shufflevector(__zero, __zero,
+ -1, -1, -1, -1);
__vec[__index & 3] = __scalar;
return __vec;
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_promote(double __scalar, int __index) {
- const vector double __zero = (vector double)0;
- vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
+ const __vector double __zero = (__vector double)0.0;
+ __vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
__vec[__index & 1] = __scalar;
return __vec;
}
/*-- vec_insert_and_zero ----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_insert_and_zero(const signed char *__ptr) {
- vector signed char __vec = (vector signed char)0;
+ __vector signed char __vec = (__vector signed char)0;
__vec[7] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_insert_and_zero(const unsigned char *__ptr) {
- vector unsigned char __vec = (vector unsigned char)0;
+ __vector unsigned char __vec = (__vector unsigned char)0;
__vec[7] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_insert_and_zero(const signed short *__ptr) {
- vector signed short __vec = (vector signed short)0;
+ __vector signed short __vec = (__vector signed short)0;
__vec[3] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_insert_and_zero(const unsigned short *__ptr) {
- vector unsigned short __vec = (vector unsigned short)0;
+ __vector unsigned short __vec = (__vector unsigned short)0;
__vec[3] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_insert_and_zero(const signed int *__ptr) {
- vector signed int __vec = (vector signed int)0;
+ __vector signed int __vec = (__vector signed int)0;
__vec[1] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_insert_and_zero(const unsigned int *__ptr) {
- vector unsigned int __vec = (vector unsigned int)0;
+ __vector unsigned int __vec = (__vector unsigned int)0;
__vec[1] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_insert_and_zero(const signed long long *__ptr) {
- vector signed long long __vec = (vector signed long long)0;
+ __vector signed long long __vec = (__vector signed long long)0;
__vec[0] = *__ptr;
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_insert_and_zero(const unsigned long long *__ptr) {
- vector unsigned long long __vec = (vector unsigned long long)0;
+ __vector unsigned long long __vec = (__vector unsigned long long)0;
__vec[0] = *__ptr;
return __vec;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_insert_and_zero(const float *__ptr) {
- vector float __vec = (vector float)0;
+ __vector float __vec = (__vector float)0.0f;
__vec[1] = *__ptr;
return __vec;
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_insert_and_zero(const double *__ptr) {
- vector double __vec = (vector double)0;
+ __vector double __vec = (__vector double)0.0;
__vec[0] = *__ptr;
return __vec;
}
/*-- vec_perm ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_perm(vector signed char __a, vector signed char __b,
- vector unsigned char __c) {
- return (vector signed char)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector signed char
+vec_perm(__vector signed char __a, __vector signed char __b,
+ __vector unsigned char __c) {
+ return (__vector signed char)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_perm(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
- return (vector unsigned char)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_perm(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
+ return (__vector unsigned char)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector bool char
-vec_perm(vector bool char __a, vector bool char __b,
- vector unsigned char __c) {
- return (vector bool char)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector __bool char
+vec_perm(__vector __bool char __a, __vector __bool char __b,
+ __vector unsigned char __c) {
+ return (__vector __bool char)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector signed short
-vec_perm(vector signed short __a, vector signed short __b,
- vector unsigned char __c) {
- return (vector signed short)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector signed short
+vec_perm(__vector signed short __a, __vector signed short __b,
+ __vector unsigned char __c) {
+ return (__vector signed short)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_perm(vector unsigned short __a, vector unsigned short __b,
- vector unsigned char __c) {
- return (vector unsigned short)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_perm(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned char __c) {
+ return (__vector unsigned short)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector bool short
-vec_perm(vector bool short __a, vector bool short __b,
- vector unsigned char __c) {
- return (vector bool short)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector __bool short
+vec_perm(__vector __bool short __a, __vector __bool short __b,
+ __vector unsigned char __c) {
+ return (__vector __bool short)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector signed int
-vec_perm(vector signed int __a, vector signed int __b,
- vector unsigned char __c) {
- return (vector signed int)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector signed int
+vec_perm(__vector signed int __a, __vector signed int __b,
+ __vector unsigned char __c) {
+ return (__vector signed int)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_perm(vector unsigned int __a, vector unsigned int __b,
- vector unsigned char __c) {
- return (vector unsigned int)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_perm(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned char __c) {
+ return (__vector unsigned int)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector bool int
-vec_perm(vector bool int __a, vector bool int __b,
- vector unsigned char __c) {
- return (vector bool int)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector __bool int
+vec_perm(__vector __bool int __a, __vector __bool int __b,
+ __vector unsigned char __c) {
+ return (__vector __bool int)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_perm(vector signed long long __a, vector signed long long __b,
- vector unsigned char __c) {
- return (vector signed long long)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector signed long long
+vec_perm(__vector signed long long __a, __vector signed long long __b,
+ __vector unsigned char __c) {
+ return (__vector signed long long)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
- vector unsigned char __c) {
- return (vector unsigned long long)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_perm(__vector unsigned long long __a, __vector unsigned long long __b,
+ __vector unsigned char __c) {
+ return (__vector unsigned long long)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_perm(vector bool long long __a, vector bool long long __b,
- vector unsigned char __c) {
- return (vector bool long long)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_perm(__vector __bool long long __a, __vector __bool long long __b,
+ __vector unsigned char __c) {
+ return (__vector __bool long long)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_perm(vector float __a, vector float __b,
- vector unsigned char __c) {
- return (vector float)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector float
+vec_perm(__vector float __a, __vector float __b,
+ __vector unsigned char __c) {
+ return (__vector float)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_perm(vector double __a, vector double __b,
- vector unsigned char __c) {
- return (vector double)__builtin_s390_vperm(
- (vector unsigned char)__a, (vector unsigned char)__b, __c);
+static inline __ATTRS_o_ai __vector double
+vec_perm(__vector double __a, __vector double __b,
+ __vector unsigned char __c) {
+ return (__vector double)__builtin_s390_vperm(
+ (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
}
/*-- vec_permi --------------------------------------------------------------*/
// This prototype is deprecated.
-extern __ATTRS_o vector signed long long
-vec_permi(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_permi(__vector signed long long __a, __vector signed long long __b,
+ int __c)
__constant_range(__c, 0, 3);
// This prototype is deprecated.
-extern __ATTRS_o vector unsigned long long
-vec_permi(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_permi(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 3);
// This prototype is deprecated.
-extern __ATTRS_o vector bool long long
-vec_permi(vector bool long long __a, vector bool long long __b, int __c)
+extern __ATTRS_o __vector __bool long long
+vec_permi(__vector __bool long long __a, __vector __bool long long __b,
+ int __c)
__constant_range(__c, 0, 3);
// This prototype is deprecated.
-extern __ATTRS_o vector double
-vec_permi(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_permi(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 3);
#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
- __builtin_s390_vpdi((vector unsigned long long)(X), \
- (vector unsigned long long)(Y), \
+ __builtin_s390_vpdi((__vector unsigned long long)(X), \
+ (__vector unsigned long long)(Y), \
(((Z) & 2) << 1) | ((Z) & 1)))
/*-- vec_bperm_u128 ---------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_ai vector unsigned long long
-vec_bperm_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned long long
+vec_bperm_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vbperm(__a, __b);
}
#endif
/*-- vec_revb ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_revb(vector signed short __vec) {
- return (vector signed short)
- __builtin_s390_vlbrh((vector unsigned short)__vec);
+static inline __ATTRS_o_ai __vector signed short
+vec_revb(__vector signed short __vec) {
+ return (__vector signed short)
+ __builtin_s390_vlbrh((__vector unsigned short)__vec);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_revb(vector unsigned short __vec) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_revb(__vector unsigned short __vec) {
return __builtin_s390_vlbrh(__vec);
}
-static inline __ATTRS_o_ai vector signed int
-vec_revb(vector signed int __vec) {
- return (vector signed int)
- __builtin_s390_vlbrf((vector unsigned int)__vec);
+static inline __ATTRS_o_ai __vector signed int
+vec_revb(__vector signed int __vec) {
+ return (__vector signed int)
+ __builtin_s390_vlbrf((__vector unsigned int)__vec);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_revb(vector unsigned int __vec) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_revb(__vector unsigned int __vec) {
return __builtin_s390_vlbrf(__vec);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_revb(vector signed long long __vec) {
- return (vector signed long long)
- __builtin_s390_vlbrg((vector unsigned long long)__vec);
+static inline __ATTRS_o_ai __vector signed long long
+vec_revb(__vector signed long long __vec) {
+ return (__vector signed long long)
+ __builtin_s390_vlbrg((__vector unsigned long long)__vec);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_revb(vector unsigned long long __vec) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_revb(__vector unsigned long long __vec) {
return __builtin_s390_vlbrg(__vec);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_revb(vector float __vec) {
- return (vector float)
- __builtin_s390_vlbrf((vector unsigned int)__vec);
+static inline __ATTRS_o_ai __vector float
+vec_revb(__vector float __vec) {
+ return (__vector float)
+ __builtin_s390_vlbrf((__vector unsigned int)__vec);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_revb(vector double __vec) {
- return (vector double)
- __builtin_s390_vlbrg((vector unsigned long long)__vec);
+static inline __ATTRS_o_ai __vector double
+vec_revb(__vector double __vec) {
+ return (__vector double)
+ __builtin_s390_vlbrg((__vector unsigned long long)__vec);
}
/*-- vec_reve ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_reve(vector signed char __vec) {
- return (vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
- __vec[11], __vec[10], __vec[9], __vec[8],
- __vec[7], __vec[6], __vec[5], __vec[4],
- __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_reve(vector unsigned char __vec) {
- return (vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
+static inline __ATTRS_o_ai __vector signed char
+vec_reve(__vector signed char __vec) {
+ return (__vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
__vec[11], __vec[10], __vec[9], __vec[8],
__vec[7], __vec[6], __vec[5], __vec[4],
__vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector bool char
-vec_reve(vector bool char __vec) {
- return (vector bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
- __vec[11], __vec[10], __vec[9], __vec[8],
- __vec[7], __vec[6], __vec[5], __vec[4],
- __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector unsigned char
+vec_reve(__vector unsigned char __vec) {
+ return (__vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector signed short
-vec_reve(vector signed short __vec) {
- return (vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
- __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector __bool char
+vec_reve(__vector __bool char __vec) {
+ return (__vector __bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
+ __vec[11], __vec[10], __vec[9], __vec[8],
+ __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_reve(vector unsigned short __vec) {
- return (vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
+static inline __ATTRS_o_ai __vector signed short
+vec_reve(__vector signed short __vec) {
+ return (__vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
__vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector bool short
-vec_reve(vector bool short __vec) {
- return (vector bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
- __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector unsigned short
+vec_reve(__vector unsigned short __vec) {
+ return (__vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_reve(__vector __bool short __vec) {
+ return (__vector __bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
+ __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector signed int
-vec_reve(vector signed int __vec) {
- return (vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector signed int
+vec_reve(__vector signed int __vec) {
+ return (__vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_reve(vector unsigned int __vec) {
- return (vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector unsigned int
+vec_reve(__vector unsigned int __vec) {
+ return (__vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector bool int
-vec_reve(vector bool int __vec) {
- return (vector bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector __bool int
+vec_reve(__vector __bool int __vec) {
+ return (__vector __bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector signed long long
-vec_reve(vector signed long long __vec) {
- return (vector signed long long) { __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector signed long long
+vec_reve(__vector signed long long __vec) {
+ return (__vector signed long long) { __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_reve(vector unsigned long long __vec) {
- return (vector unsigned long long) { __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_reve(__vector unsigned long long __vec) {
+ return (__vector unsigned long long) { __vec[1], __vec[0] };
}
-static inline __ATTRS_o_ai vector bool long long
-vec_reve(vector bool long long __vec) {
- return (vector bool long long) { __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector __bool long long
+vec_reve(__vector __bool long long __vec) {
+ return (__vector __bool long long) { __vec[1], __vec[0] };
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_reve(vector float __vec) {
- return (vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector float
+vec_reve(__vector float __vec) {
+ return (__vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_reve(vector double __vec) {
- return (vector double) { __vec[1], __vec[0] };
+static inline __ATTRS_o_ai __vector double
+vec_reve(__vector double __vec) {
+ return (__vector double) { __vec[1], __vec[0] };
}
/*-- vec_sel ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_sel(vector signed char __a, vector signed char __b,
- vector unsigned char __c) {
- return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+static inline __ATTRS_o_ai __vector signed char
+vec_sel(__vector signed char __a, __vector signed char __b,
+ __vector unsigned char __c) {
+ return (((__vector signed char)__c & __b) |
+ (~(__vector signed char)__c & __a));
}
-static inline __ATTRS_o_ai vector signed char
-vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
- return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
+static inline __ATTRS_o_ai __vector signed char
+vec_sel(__vector signed char __a, __vector signed char __b,
+ __vector __bool char __c) {
+ return (((__vector signed char)__c & __b) |
+ (~(__vector signed char)__c & __a));
}
-static inline __ATTRS_o_ai vector bool char
-vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
- return ((vector bool char)__c & __b) | (~(vector bool char)__c & __a);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sel(__vector __bool char __a, __vector __bool char __b,
+ __vector unsigned char __c) {
+ return (((__vector __bool char)__c & __b) |
+ (~(__vector __bool char)__c & __a));
}
-static inline __ATTRS_o_ai vector bool char
-vec_sel(vector bool char __a, vector bool char __b, vector bool char __c) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_sel(__vector __bool char __a, __vector __bool char __b,
+ __vector __bool char __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sel(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sel(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sel(vector unsigned char __a, vector unsigned char __b,
- vector bool char __c) {
- return ((vector unsigned char)__c & __b) | (~(vector unsigned char)__c & __a);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sel(__vector unsigned char __a, __vector unsigned char __b,
+ __vector __bool char __c) {
+ return (((__vector unsigned char)__c & __b) |
+ (~(__vector unsigned char)__c & __a));
}
-static inline __ATTRS_o_ai vector signed short
-vec_sel(vector signed short __a, vector signed short __b,
- vector unsigned short __c) {
- return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+static inline __ATTRS_o_ai __vector signed short
+vec_sel(__vector signed short __a, __vector signed short __b,
+ __vector unsigned short __c) {
+ return (((__vector signed short)__c & __b) |
+ (~(__vector signed short)__c & __a));
}
-static inline __ATTRS_o_ai vector signed short
-vec_sel(vector signed short __a, vector signed short __b,
- vector bool short __c) {
- return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
+static inline __ATTRS_o_ai __vector signed short
+vec_sel(__vector signed short __a, __vector signed short __b,
+ __vector __bool short __c) {
+ return (((__vector signed short)__c & __b) |
+ (~(__vector signed short)__c & __a));
}
-static inline __ATTRS_o_ai vector bool short
-vec_sel(vector bool short __a, vector bool short __b,
- vector unsigned short __c) {
- return ((vector bool short)__c & __b) | (~(vector bool short)__c & __a);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sel(__vector __bool short __a, __vector __bool short __b,
+ __vector unsigned short __c) {
+ return (((__vector __bool short)__c & __b) |
+ (~(__vector __bool short)__c & __a));
}
-static inline __ATTRS_o_ai vector bool short
-vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_sel(__vector __bool short __a, __vector __bool short __b,
+ __vector __bool short __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_sel(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sel(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_sel(vector unsigned short __a, vector unsigned short __b,
- vector bool short __c) {
- return (((vector unsigned short)__c & __b) |
- (~(vector unsigned short)__c & __a));
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sel(__vector unsigned short __a, __vector unsigned short __b,
+ __vector __bool short __c) {
+ return (((__vector unsigned short)__c & __b) |
+ (~(__vector unsigned short)__c & __a));
}
-static inline __ATTRS_o_ai vector signed int
-vec_sel(vector signed int __a, vector signed int __b,
- vector unsigned int __c) {
- return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+static inline __ATTRS_o_ai __vector signed int
+vec_sel(__vector signed int __a, __vector signed int __b,
+ __vector unsigned int __c) {
+ return (((__vector signed int)__c & __b) |
+ (~(__vector signed int)__c & __a));
}
-static inline __ATTRS_o_ai vector signed int
-vec_sel(vector signed int __a, vector signed int __b, vector bool int __c) {
- return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
+static inline __ATTRS_o_ai __vector signed int
+vec_sel(__vector signed int __a, __vector signed int __b,
+ __vector __bool int __c) {
+ return (((__vector signed int)__c & __b) |
+ (~(__vector signed int)__c & __a));
}
-static inline __ATTRS_o_ai vector bool int
-vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
- return ((vector bool int)__c & __b) | (~(vector bool int)__c & __a);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sel(__vector __bool int __a, __vector __bool int __b,
+ __vector unsigned int __c) {
+ return (((__vector __bool int)__c & __b) |
+ (~(__vector __bool int)__c & __a));
}
-static inline __ATTRS_o_ai vector bool int
-vec_sel(vector bool int __a, vector bool int __b, vector bool int __c) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_sel(__vector __bool int __a, __vector __bool int __b,
+ __vector __bool int __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sel(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sel(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
- return ((vector unsigned int)__c & __b) | (~(vector unsigned int)__c & __a);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sel(__vector unsigned int __a, __vector unsigned int __b,
+ __vector __bool int __c) {
+ return (((__vector unsigned int)__c & __b) |
+ (~(__vector unsigned int)__c & __a));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_sel(vector signed long long __a, vector signed long long __b,
- vector unsigned long long __c) {
- return (((vector signed long long)__c & __b) |
- (~(vector signed long long)__c & __a));
+static inline __ATTRS_o_ai __vector signed long long
+vec_sel(__vector signed long long __a, __vector signed long long __b,
+ __vector unsigned long long __c) {
+ return (((__vector signed long long)__c & __b) |
+ (~(__vector signed long long)__c & __a));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_sel(vector signed long long __a, vector signed long long __b,
- vector bool long long __c) {
- return (((vector signed long long)__c & __b) |
- (~(vector signed long long)__c & __a));
+static inline __ATTRS_o_ai __vector signed long long
+vec_sel(__vector signed long long __a, __vector signed long long __b,
+ __vector __bool long long __c) {
+ return (((__vector signed long long)__c & __b) |
+ (~(__vector signed long long)__c & __a));
}
-static inline __ATTRS_o_ai vector bool long long
-vec_sel(vector bool long long __a, vector bool long long __b,
- vector unsigned long long __c) {
- return (((vector bool long long)__c & __b) |
- (~(vector bool long long)__c & __a));
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sel(__vector __bool long long __a, __vector __bool long long __b,
+ __vector unsigned long long __c) {
+ return (((__vector __bool long long)__c & __b) |
+ (~(__vector __bool long long)__c & __a));
}
-static inline __ATTRS_o_ai vector bool long long
-vec_sel(vector bool long long __a, vector bool long long __b,
- vector bool long long __c) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sel(__vector __bool long long __a, __vector __bool long long __b,
+ __vector __bool long long __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
- vector unsigned long long __c) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sel(__vector unsigned long long __a, __vector unsigned long long __b,
+ __vector unsigned long long __c) {
return (__c & __b) | (~__c & __a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
- vector bool long long __c) {
- return (((vector unsigned long long)__c & __b) |
- (~(vector unsigned long long)__c & __a));
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sel(__vector unsigned long long __a, __vector unsigned long long __b,
+ __vector __bool long long __c) {
+ return (((__vector unsigned long long)__c & __b) |
+ (~(__vector unsigned long long)__c & __a));
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_sel(vector float __a, vector float __b, vector unsigned int __c) {
- return (vector float)((__c & (vector unsigned int)__b) |
- (~__c & (vector unsigned int)__a));
+static inline __ATTRS_o_ai __vector float
+vec_sel(__vector float __a, __vector float __b, __vector unsigned int __c) {
+ return (__vector float)((__c & (__vector unsigned int)__b) |
+ (~__c & (__vector unsigned int)__a));
}
-static inline __ATTRS_o_ai vector float
-vec_sel(vector float __a, vector float __b, vector bool int __c) {
- vector unsigned int __ac = (vector unsigned int)__a;
- vector unsigned int __bc = (vector unsigned int)__b;
- vector unsigned int __cc = (vector unsigned int)__c;
- return (vector float)((__cc & __bc) | (~__cc & __ac));
+static inline __ATTRS_o_ai __vector float
+vec_sel(__vector float __a, __vector float __b, __vector __bool int __c) {
+ __vector unsigned int __ac = (__vector unsigned int)__a;
+ __vector unsigned int __bc = (__vector unsigned int)__b;
+ __vector unsigned int __cc = (__vector unsigned int)__c;
+ return (__vector float)((__cc & __bc) | (~__cc & __ac));
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
- return (vector double)((__c & (vector unsigned long long)__b) |
- (~__c & (vector unsigned long long)__a));
+static inline __ATTRS_o_ai __vector double
+vec_sel(__vector double __a, __vector double __b,
+ __vector unsigned long long __c) {
+ return (__vector double)((__c & (__vector unsigned long long)__b) |
+ (~__c & (__vector unsigned long long)__a));
}
-static inline __ATTRS_o_ai vector double
-vec_sel(vector double __a, vector double __b, vector bool long long __c) {
- vector unsigned long long __ac = (vector unsigned long long)__a;
- vector unsigned long long __bc = (vector unsigned long long)__b;
- vector unsigned long long __cc = (vector unsigned long long)__c;
- return (vector double)((__cc & __bc) | (~__cc & __ac));
+static inline __ATTRS_o_ai __vector double
+vec_sel(__vector double __a, __vector double __b,
+ __vector __bool long long __c) {
+ __vector unsigned long long __ac = (__vector unsigned long long)__a;
+ __vector unsigned long long __bc = (__vector unsigned long long)__b;
+ __vector unsigned long long __cc = (__vector unsigned long long)__c;
+ return (__vector double)((__cc & __bc) | (~__cc & __ac));
}
/*-- vec_gather_element -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed int
-vec_gather_element(vector signed int __vec, vector unsigned int __offset,
+static inline __ATTRS_o_ai __vector signed int
+vec_gather_element(__vector signed int __vec,
+ __vector unsigned int __offset,
const signed int *__ptr, int __index)
__constant_range(__index, 0, 3) {
__vec[__index] = *(const signed int *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector bool int
-vec_gather_element(vector bool int __vec, vector unsigned int __offset,
+static inline __ATTRS_o_ai __vector __bool int
+vec_gather_element(__vector __bool int __vec,
+ __vector unsigned int __offset,
const unsigned int *__ptr, int __index)
__constant_range(__index, 0, 3) {
__vec[__index] = *(const unsigned int *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_gather_element(vector unsigned int __vec, vector unsigned int __offset,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gather_element(__vector unsigned int __vec,
+ __vector unsigned int __offset,
const unsigned int *__ptr, int __index)
__constant_range(__index, 0, 3) {
__vec[__index] = *(const unsigned int *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector signed long long
-vec_gather_element(vector signed long long __vec,
- vector unsigned long long __offset,
+static inline __ATTRS_o_ai __vector signed long long
+vec_gather_element(__vector signed long long __vec,
+ __vector unsigned long long __offset,
const signed long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
__vec[__index] = *(const signed long long *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector bool long long
-vec_gather_element(vector bool long long __vec,
- vector unsigned long long __offset,
+static inline __ATTRS_o_ai __vector __bool long long
+vec_gather_element(__vector __bool long long __vec,
+ __vector unsigned long long __offset,
const unsigned long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
__vec[__index] = *(const unsigned long long *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gather_element(vector unsigned long long __vec,
- vector unsigned long long __offset,
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gather_element(__vector unsigned long long __vec,
+ __vector unsigned long long __offset,
const unsigned long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
__vec[__index] = *(const unsigned long long *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_gather_element(vector float __vec, vector unsigned int __offset,
+static inline __ATTRS_o_ai __vector float
+vec_gather_element(__vector float __vec,
+ __vector unsigned int __offset,
const float *__ptr, int __index)
__constant_range(__index, 0, 3) {
__vec[__index] = *(const float *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_gather_element(vector double __vec, vector unsigned long long __offset,
+static inline __ATTRS_o_ai __vector double
+vec_gather_element(__vector double __vec,
+ __vector unsigned long long __offset,
const double *__ptr, int __index)
__constant_range(__index, 0, 1) {
__vec[__index] = *(const double *)(
- (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
+ (const char *)__ptr + __offset[__index]);
return __vec;
}
/*-- vec_scatter_element ----------------------------------------------------*/
static inline __ATTRS_o_ai void
-vec_scatter_element(vector signed int __vec, vector unsigned int __offset,
+vec_scatter_element(__vector signed int __vec,
+ __vector unsigned int __offset,
signed int *__ptr, int __index)
__constant_range(__index, 0, 3) {
- *(signed int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(signed int *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector bool int __vec, vector unsigned int __offset,
+vec_scatter_element(__vector __bool int __vec,
+ __vector unsigned int __offset,
unsigned int *__ptr, int __index)
__constant_range(__index, 0, 3) {
- *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(unsigned int *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector unsigned int __vec, vector unsigned int __offset,
+vec_scatter_element(__vector unsigned int __vec,
+ __vector unsigned int __offset,
unsigned int *__ptr, int __index)
__constant_range(__index, 0, 3) {
- *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(unsigned int *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector signed long long __vec,
- vector unsigned long long __offset,
+vec_scatter_element(__vector signed long long __vec,
+ __vector unsigned long long __offset,
signed long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
- *(signed long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(signed long long *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector bool long long __vec,
- vector unsigned long long __offset,
+vec_scatter_element(__vector __bool long long __vec,
+ __vector unsigned long long __offset,
unsigned long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
- *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(unsigned long long *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
static inline __ATTRS_o_ai void
-vec_scatter_element(vector unsigned long long __vec,
- vector unsigned long long __offset,
+vec_scatter_element(__vector unsigned long long __vec,
+ __vector unsigned long long __offset,
unsigned long long *__ptr, int __index)
__constant_range(__index, 0, 1) {
- *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(unsigned long long *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai void
-vec_scatter_element(vector float __vec, vector unsigned int __offset,
+vec_scatter_element(__vector float __vec,
+ __vector unsigned int __offset,
float *__ptr, int __index)
__constant_range(__index, 0, 3) {
- *(float *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(float *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
#endif
static inline __ATTRS_o_ai void
-vec_scatter_element(vector double __vec, vector unsigned long long __offset,
+vec_scatter_element(__vector double __vec,
+ __vector unsigned long long __offset,
double *__ptr, int __index)
__constant_range(__index, 0, 1) {
- *(double *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
+ *(double *)((char *)__ptr + __offset[__index]) =
__vec[__index];
}
/*-- vec_xl -----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_xl(long __offset, const signed char *__ptr) {
- return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed char *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_xl(long __offset, const unsigned char *__ptr) {
- return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned char *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_xl(long __offset, const signed short *__ptr) {
- return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed short *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_xl(long __offset, const unsigned short *__ptr) {
- return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned short *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_xl(long __offset, const signed int *__ptr) {
- return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed int *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_xl(long __offset, const unsigned int *__ptr) {
- return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned int *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_xl(long __offset, const signed long long *__ptr) {
- return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed long long *)
+ ((const char *)__ptr + __offset);
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_xl(long __offset, const unsigned long long *__ptr) {
- return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned long long *)
+ ((const char *)__ptr + __offset);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_xl(long __offset, const float *__ptr) {
- return *(const vector float *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector float *)
+ ((const char *)__ptr + __offset);
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_xl(long __offset, const double *__ptr) {
- return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector double *)
+ ((const char *)__ptr + __offset);
}
/*-- vec_xld2 ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_xld2(long __offset, const signed char *__ptr) {
- return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed char *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_xld2(long __offset, const unsigned char *__ptr) {
- return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned char *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_xld2(long __offset, const signed short *__ptr) {
- return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed short *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_xld2(long __offset, const unsigned short *__ptr) {
- return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned short *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_xld2(long __offset, const signed int *__ptr) {
- return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed int *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_xld2(long __offset, const unsigned int *__ptr) {
- return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned int *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_xld2(long __offset, const signed long long *__ptr) {
- return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed long long *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_xld2(long __offset, const unsigned long long *__ptr) {
- return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned long long *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_xld2(long __offset, const double *__ptr) {
- return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector double *)
+ ((const char *)__ptr + __offset);
}
/*-- vec_xlw4 ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_xlw4(long __offset, const signed char *__ptr) {
- return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed char *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_xlw4(long __offset, const unsigned char *__ptr) {
- return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned char *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_xlw4(long __offset, const signed short *__ptr) {
- return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed short *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_xlw4(long __offset, const unsigned short *__ptr) {
- return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned short *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_xlw4(long __offset, const signed int *__ptr) {
- return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector signed int *)
+ ((const char *)__ptr + __offset);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_xlw4(long __offset, const unsigned int *__ptr) {
- return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
+ return *(const __vector unsigned int *)
+ ((const char *)__ptr + __offset);
}
/*-- vec_xst ----------------------------------------------------------------*/
static inline __ATTRS_o_ai void
-vec_xst(vector signed char __vec, long __offset, signed char *__ptr) {
- *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector signed char __vec, long __offset, signed char *__ptr) {
+ *(__vector signed char *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
- *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector signed short __vec, long __offset, signed short *__ptr) {
- *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector signed short __vec, long __offset, signed short *__ptr) {
+ *(__vector signed short *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
- *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector signed int __vec, long __offset, signed int *__ptr) {
- *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector signed int __vec, long __offset, signed int *__ptr) {
+ *(__vector signed int *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
- *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector signed long long __vec, long __offset,
+vec_xst(__vector signed long long __vec, long __offset,
signed long long *__ptr) {
- *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+ *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void
-vec_xst(vector unsigned long long __vec, long __offset,
+vec_xst(__vector unsigned long long __vec, long __offset,
unsigned long long *__ptr) {
- *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
- __vec;
+ *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai void
-vec_xst(vector float __vec, long __offset, float *__ptr) {
- *(vector float *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector float __vec, long __offset, float *__ptr) {
+ *(__vector float *)((char *)__ptr + __offset) = __vec;
}
#endif
static inline __ATTRS_o_ai void
-vec_xst(vector double __vec, long __offset, double *__ptr) {
- *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xst(__vector double __vec, long __offset, double *__ptr) {
+ *(__vector double *)((char *)__ptr + __offset) = __vec;
}
/*-- vec_xstd2 --------------------------------------------------------------*/
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector signed char __vec, long __offset, signed char *__ptr) {
- *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector signed char __vec, long __offset, signed char *__ptr) {
+ *(__vector signed char *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
- *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector signed short __vec, long __offset, signed short *__ptr) {
- *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector signed short __vec, long __offset, signed short *__ptr) {
+ *(__vector signed short *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
- *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector signed int __vec, long __offset, signed int *__ptr) {
- *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector signed int __vec, long __offset, signed int *__ptr) {
+ *(__vector signed int *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
- *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector signed long long __vec, long __offset,
+vec_xstd2(__vector signed long long __vec, long __offset,
signed long long *__ptr) {
- *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+ *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned long long __vec, long __offset,
+vec_xstd2(__vector unsigned long long __vec, long __offset,
unsigned long long *__ptr) {
- *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
- __vec;
+ *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstd2(vector double __vec, long __offset, double *__ptr) {
- *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstd2(__vector double __vec, long __offset, double *__ptr) {
+ *(__vector double *)((char *)__ptr + __offset) = __vec;
}
/*-- vec_xstw4 --------------------------------------------------------------*/
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector signed char __vec, long __offset, signed char *__ptr) {
- *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector signed char __vec, long __offset, signed char *__ptr) {
+ *(__vector signed char *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
- *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+ *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector signed short __vec, long __offset, signed short *__ptr) {
- *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector signed short __vec, long __offset, signed short *__ptr) {
+ *(__vector signed short *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
- *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+ *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector signed int __vec, long __offset, signed int *__ptr) {
- *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector signed int __vec, long __offset, signed int *__ptr) {
+ *(__vector signed int *)((char *)__ptr + __offset) = __vec;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
- *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
+vec_xstw4(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+ *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
}
/*-- vec_load_bndry ---------------------------------------------------------*/
-extern __ATTRS_o vector signed char
+extern __ATTRS_o __vector signed char
vec_load_bndry(const signed char *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector unsigned char
+extern __ATTRS_o __vector unsigned char
vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector signed short
+extern __ATTRS_o __vector signed short
vec_load_bndry(const signed short *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector unsigned short
+extern __ATTRS_o __vector unsigned short
vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector signed int
+extern __ATTRS_o __vector signed int
vec_load_bndry(const signed int *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector unsigned int
+extern __ATTRS_o __vector unsigned int
vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector signed long long
+extern __ATTRS_o __vector signed long long
vec_load_bndry(const signed long long *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
-extern __ATTRS_o vector unsigned long long
+extern __ATTRS_o __vector unsigned long long
vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
#if __ARCH__ >= 12
-extern __ATTRS_o vector float
+extern __ATTRS_o __vector float
vec_load_bndry(const float *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
#endif
-extern __ATTRS_o vector double
+extern __ATTRS_o __vector double
vec_load_bndry(const double *__ptr, unsigned short __len)
__constant_pow2_range(__len, 64, 4096);
@@ -1332,159 +1392,159 @@ vec_load_bndry(const double *__ptr, unsigned short __len)
/*-- vec_load_len -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_load_len(const signed char *__ptr, unsigned int __len) {
- return (vector signed char)__builtin_s390_vll(__len, __ptr);
+ return (__vector signed char)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_load_len(const unsigned char *__ptr, unsigned int __len) {
- return (vector unsigned char)__builtin_s390_vll(__len, __ptr);
+ return (__vector unsigned char)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_load_len(const signed short *__ptr, unsigned int __len) {
- return (vector signed short)__builtin_s390_vll(__len, __ptr);
+ return (__vector signed short)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_load_len(const unsigned short *__ptr, unsigned int __len) {
- return (vector unsigned short)__builtin_s390_vll(__len, __ptr);
+ return (__vector unsigned short)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_load_len(const signed int *__ptr, unsigned int __len) {
- return (vector signed int)__builtin_s390_vll(__len, __ptr);
+ return (__vector signed int)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_load_len(const unsigned int *__ptr, unsigned int __len) {
- return (vector unsigned int)__builtin_s390_vll(__len, __ptr);
+ return (__vector unsigned int)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_load_len(const signed long long *__ptr, unsigned int __len) {
- return (vector signed long long)__builtin_s390_vll(__len, __ptr);
+ return (__vector signed long long)__builtin_s390_vll(__len, __ptr);
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
- return (vector unsigned long long)__builtin_s390_vll(__len, __ptr);
+ return (__vector unsigned long long)__builtin_s390_vll(__len, __ptr);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_load_len(const float *__ptr, unsigned int __len) {
- return (vector float)__builtin_s390_vll(__len, __ptr);
+ return (__vector float)__builtin_s390_vll(__len, __ptr);
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_load_len(const double *__ptr, unsigned int __len) {
- return (vector double)__builtin_s390_vll(__len, __ptr);
+ return (__vector double)__builtin_s390_vll(__len, __ptr);
}
/*-- vec_load_len_r ---------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_ai vector unsigned char
+static inline __ATTRS_ai __vector unsigned char
vec_load_len_r(const unsigned char *__ptr, unsigned int __len) {
- return (vector unsigned char)__builtin_s390_vlrl(__len, __ptr);
+ return (__vector unsigned char)__builtin_s390_vlrl(__len, __ptr);
}
#endif
/*-- vec_store_len ----------------------------------------------------------*/
static inline __ATTRS_o_ai void
-vec_store_len(vector signed char __vec, signed char *__ptr,
+vec_store_len(__vector signed char __vec, signed char *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned char __vec, unsigned char *__ptr,
+vec_store_len(__vector unsigned char __vec, unsigned char *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector signed short __vec, signed short *__ptr,
+vec_store_len(__vector signed short __vec, signed short *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned short __vec, unsigned short *__ptr,
+vec_store_len(__vector unsigned short __vec, unsigned short *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector signed int __vec, signed int *__ptr,
+vec_store_len(__vector signed int __vec, signed int *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned int __vec, unsigned int *__ptr,
+vec_store_len(__vector unsigned int __vec, unsigned int *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector signed long long __vec, signed long long *__ptr,
+vec_store_len(__vector signed long long __vec, signed long long *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned long long __vec, unsigned long long *__ptr,
+vec_store_len(__vector unsigned long long __vec, unsigned long long *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai void
-vec_store_len(vector float __vec, float *__ptr,
+vec_store_len(__vector float __vec, float *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
#endif
static inline __ATTRS_o_ai void
-vec_store_len(vector double __vec, double *__ptr,
+vec_store_len(__vector double __vec, double *__ptr,
unsigned int __len) {
- __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
}
/*-- vec_store_len_r --------------------------------------------------------*/
#if __ARCH__ >= 12
static inline __ATTRS_ai void
-vec_store_len_r(vector unsigned char __vec, unsigned char *__ptr,
+vec_store_len_r(__vector unsigned char __vec, unsigned char *__ptr,
unsigned int __len) {
- __builtin_s390_vstrl((vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstrl((__vector signed char)__vec, __len, __ptr);
}
#endif
/*-- vec_load_pair ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_load_pair(signed long long __a, signed long long __b) {
- return (vector signed long long)(__a, __b);
+ return (__vector signed long long)(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_load_pair(unsigned long long __a, unsigned long long __b) {
- return (vector unsigned long long)(__a, __b);
+ return (__vector unsigned long long)(__a, __b);
}
/*-- vec_genmask ------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_genmask(unsigned short __mask)
__constant(__mask) {
- return (vector unsigned char)(
+ return (__vector unsigned char)(
__mask & 0x8000 ? 0xff : 0,
__mask & 0x4000 ? 0xff : 0,
__mask & 0x2000 ? 0xff : 0,
@@ -1505,7 +1565,7 @@ vec_genmask(unsigned short __mask)
/*-- vec_genmasks_* ---------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_genmasks_8(unsigned char __first, unsigned char __last)
__constant(__first) __constant(__last) {
unsigned char __bit1 = __first & 7;
@@ -1515,10 +1575,10 @@ vec_genmasks_8(unsigned char __first, unsigned char __last)
unsigned char __value = (__bit1 <= __bit2 ?
__mask1 & ~__mask2 :
__mask1 | ~__mask2);
- return (vector unsigned char)__value;
+ return (__vector unsigned char)__value;
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_genmasks_16(unsigned char __first, unsigned char __last)
__constant(__first) __constant(__last) {
unsigned char __bit1 = __first & 15;
@@ -1528,10 +1588,10 @@ vec_genmasks_16(unsigned char __first, unsigned char __last)
unsigned short __value = (__bit1 <= __bit2 ?
__mask1 & ~__mask2 :
__mask1 | ~__mask2);
- return (vector unsigned short)__value;
+ return (__vector unsigned short)__value;
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_genmasks_32(unsigned char __first, unsigned char __last)
__constant(__first) __constant(__last) {
unsigned char __bit1 = __first & 31;
@@ -1541,10 +1601,10 @@ vec_genmasks_32(unsigned char __first, unsigned char __last)
unsigned int __value = (__bit1 <= __bit2 ?
__mask1 & ~__mask2 :
__mask1 | ~__mask2);
- return (vector unsigned int)__value;
+ return (__vector unsigned int)__value;
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_genmasks_64(unsigned char __first, unsigned char __last)
__constant(__first) __constant(__last) {
unsigned char __bit1 = __first & 63;
@@ -1554,978 +1614,986 @@ vec_genmasks_64(unsigned char __first, unsigned char __last)
unsigned long long __value = (__bit1 <= __bit2 ?
__mask1 & ~__mask2 :
__mask1 | ~__mask2);
- return (vector unsigned long long)__value;
+ return (__vector unsigned long long)__value;
}
/*-- vec_splat --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_splat(vector signed char __vec, int __index)
+static inline __ATTRS_o_ai __vector signed char
+vec_splat(__vector signed char __vec, int __index)
__constant_range(__index, 0, 15) {
- return (vector signed char)__vec[__index];
+ return (__vector signed char)__vec[__index];
}
-static inline __ATTRS_o_ai vector bool char
-vec_splat(vector bool char __vec, int __index)
+static inline __ATTRS_o_ai __vector __bool char
+vec_splat(__vector __bool char __vec, int __index)
__constant_range(__index, 0, 15) {
- return (vector bool char)(vector unsigned char)__vec[__index];
+ return (__vector __bool char)(__vector unsigned char)__vec[__index];
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_splat(vector unsigned char __vec, int __index)
+static inline __ATTRS_o_ai __vector unsigned char
+vec_splat(__vector unsigned char __vec, int __index)
__constant_range(__index, 0, 15) {
- return (vector unsigned char)__vec[__index];
+ return (__vector unsigned char)__vec[__index];
}
-static inline __ATTRS_o_ai vector signed short
-vec_splat(vector signed short __vec, int __index)
+static inline __ATTRS_o_ai __vector signed short
+vec_splat(__vector signed short __vec, int __index)
__constant_range(__index, 0, 7) {
- return (vector signed short)__vec[__index];
+ return (__vector signed short)__vec[__index];
}
-static inline __ATTRS_o_ai vector bool short
-vec_splat(vector bool short __vec, int __index)
+static inline __ATTRS_o_ai __vector __bool short
+vec_splat(__vector __bool short __vec, int __index)
__constant_range(__index, 0, 7) {
- return (vector bool short)(vector unsigned short)__vec[__index];
+ return (__vector __bool short)(__vector unsigned short)__vec[__index];
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_splat(vector unsigned short __vec, int __index)
+static inline __ATTRS_o_ai __vector unsigned short
+vec_splat(__vector unsigned short __vec, int __index)
__constant_range(__index, 0, 7) {
- return (vector unsigned short)__vec[__index];
+ return (__vector unsigned short)__vec[__index];
}
-static inline __ATTRS_o_ai vector signed int
-vec_splat(vector signed int __vec, int __index)
+static inline __ATTRS_o_ai __vector signed int
+vec_splat(__vector signed int __vec, int __index)
__constant_range(__index, 0, 3) {
- return (vector signed int)__vec[__index];
+ return (__vector signed int)__vec[__index];
}
-static inline __ATTRS_o_ai vector bool int
-vec_splat(vector bool int __vec, int __index)
+static inline __ATTRS_o_ai __vector __bool int
+vec_splat(__vector __bool int __vec, int __index)
__constant_range(__index, 0, 3) {
- return (vector bool int)(vector unsigned int)__vec[__index];
+ return (__vector __bool int)(__vector unsigned int)__vec[__index];
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_splat(vector unsigned int __vec, int __index)
+static inline __ATTRS_o_ai __vector unsigned int
+vec_splat(__vector unsigned int __vec, int __index)
__constant_range(__index, 0, 3) {
- return (vector unsigned int)__vec[__index];
+ return (__vector unsigned int)__vec[__index];
}
-static inline __ATTRS_o_ai vector signed long long
-vec_splat(vector signed long long __vec, int __index)
+static inline __ATTRS_o_ai __vector signed long long
+vec_splat(__vector signed long long __vec, int __index)
__constant_range(__index, 0, 1) {
- return (vector signed long long)__vec[__index];
+ return (__vector signed long long)__vec[__index];
}
-static inline __ATTRS_o_ai vector bool long long
-vec_splat(vector bool long long __vec, int __index)
+static inline __ATTRS_o_ai __vector __bool long long
+vec_splat(__vector __bool long long __vec, int __index)
__constant_range(__index, 0, 1) {
- return (vector bool long long)(vector unsigned long long)__vec[__index];
+ return ((__vector __bool long long)
+ (__vector unsigned long long)__vec[__index]);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_splat(vector unsigned long long __vec, int __index)
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_splat(__vector unsigned long long __vec, int __index)
__constant_range(__index, 0, 1) {
- return (vector unsigned long long)__vec[__index];
+ return (__vector unsigned long long)__vec[__index];
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_splat(vector float __vec, int __index)
+static inline __ATTRS_o_ai __vector float
+vec_splat(__vector float __vec, int __index)
__constant_range(__index, 0, 3) {
- return (vector float)__vec[__index];
+ return (__vector float)__vec[__index];
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_splat(vector double __vec, int __index)
+static inline __ATTRS_o_ai __vector double
+vec_splat(__vector double __vec, int __index)
__constant_range(__index, 0, 1) {
- return (vector double)__vec[__index];
+ return (__vector double)__vec[__index];
}
/*-- vec_splat_s* -----------------------------------------------------------*/
-static inline __ATTRS_ai vector signed char
+static inline __ATTRS_ai __vector signed char
vec_splat_s8(signed char __scalar)
__constant(__scalar) {
- return (vector signed char)__scalar;
+ return (__vector signed char)__scalar;
}
-static inline __ATTRS_ai vector signed short
+static inline __ATTRS_ai __vector signed short
vec_splat_s16(signed short __scalar)
__constant(__scalar) {
- return (vector signed short)__scalar;
+ return (__vector signed short)__scalar;
}
-static inline __ATTRS_ai vector signed int
+static inline __ATTRS_ai __vector signed int
vec_splat_s32(signed short __scalar)
__constant(__scalar) {
- return (vector signed int)(signed int)__scalar;
+ return (__vector signed int)(signed int)__scalar;
}
-static inline __ATTRS_ai vector signed long long
+static inline __ATTRS_ai __vector signed long long
vec_splat_s64(signed short __scalar)
__constant(__scalar) {
- return (vector signed long long)(signed long)__scalar;
+ return (__vector signed long long)(signed long)__scalar;
}
/*-- vec_splat_u* -----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
+static inline __ATTRS_ai __vector unsigned char
vec_splat_u8(unsigned char __scalar)
__constant(__scalar) {
- return (vector unsigned char)__scalar;
+ return (__vector unsigned char)__scalar;
}
-static inline __ATTRS_ai vector unsigned short
+static inline __ATTRS_ai __vector unsigned short
vec_splat_u16(unsigned short __scalar)
__constant(__scalar) {
- return (vector unsigned short)__scalar;
+ return (__vector unsigned short)__scalar;
}
-static inline __ATTRS_ai vector unsigned int
+static inline __ATTRS_ai __vector unsigned int
vec_splat_u32(signed short __scalar)
__constant(__scalar) {
- return (vector unsigned int)(signed int)__scalar;
+ return (__vector unsigned int)(signed int)__scalar;
}
-static inline __ATTRS_ai vector unsigned long long
+static inline __ATTRS_ai __vector unsigned long long
vec_splat_u64(signed short __scalar)
__constant(__scalar) {
- return (vector unsigned long long)(signed long long)__scalar;
+ return (__vector unsigned long long)(signed long long)__scalar;
}
/*-- vec_splats -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
+static inline __ATTRS_o_ai __vector signed char
vec_splats(signed char __scalar) {
- return (vector signed char)__scalar;
+ return (__vector signed char)__scalar;
}
-static inline __ATTRS_o_ai vector unsigned char
+static inline __ATTRS_o_ai __vector unsigned char
vec_splats(unsigned char __scalar) {
- return (vector unsigned char)__scalar;
+ return (__vector unsigned char)__scalar;
}
-static inline __ATTRS_o_ai vector signed short
+static inline __ATTRS_o_ai __vector signed short
vec_splats(signed short __scalar) {
- return (vector signed short)__scalar;
+ return (__vector signed short)__scalar;
}
-static inline __ATTRS_o_ai vector unsigned short
+static inline __ATTRS_o_ai __vector unsigned short
vec_splats(unsigned short __scalar) {
- return (vector unsigned short)__scalar;
+ return (__vector unsigned short)__scalar;
}
-static inline __ATTRS_o_ai vector signed int
+static inline __ATTRS_o_ai __vector signed int
vec_splats(signed int __scalar) {
- return (vector signed int)__scalar;
+ return (__vector signed int)__scalar;
}
-static inline __ATTRS_o_ai vector unsigned int
+static inline __ATTRS_o_ai __vector unsigned int
vec_splats(unsigned int __scalar) {
- return (vector unsigned int)__scalar;
+ return (__vector unsigned int)__scalar;
}
-static inline __ATTRS_o_ai vector signed long long
+static inline __ATTRS_o_ai __vector signed long long
vec_splats(signed long long __scalar) {
- return (vector signed long long)__scalar;
+ return (__vector signed long long)__scalar;
}
-static inline __ATTRS_o_ai vector unsigned long long
+static inline __ATTRS_o_ai __vector unsigned long long
vec_splats(unsigned long long __scalar) {
- return (vector unsigned long long)__scalar;
+ return (__vector unsigned long long)__scalar;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
+static inline __ATTRS_o_ai __vector float
vec_splats(float __scalar) {
- return (vector float)__scalar;
+ return (__vector float)__scalar;
}
#endif
-static inline __ATTRS_o_ai vector double
+static inline __ATTRS_o_ai __vector double
vec_splats(double __scalar) {
- return (vector double)__scalar;
+ return (__vector double)__scalar;
}
/*-- vec_extend_s64 ---------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed char __a) {
- return (vector signed long long)(__a[7], __a[15]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed char __a) {
+ return (__vector signed long long)(__a[7], __a[15]);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed short __a) {
- return (vector signed long long)(__a[3], __a[7]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed short __a) {
+ return (__vector signed long long)(__a[3], __a[7]);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed int __a) {
- return (vector signed long long)(__a[1], __a[3]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed int __a) {
+ return (__vector signed long long)(__a[1], __a[3]);
}
/*-- vec_mergeh -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mergeh(vector signed char __a, vector signed char __b) {
- return (vector signed char)(
+static inline __ATTRS_o_ai __vector signed char
+vec_mergeh(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector bool char
-vec_mergeh(vector bool char __a, vector bool char __b) {
- return (vector bool char)(
+static inline __ATTRS_o_ai __vector __bool char
+vec_mergeh(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
- return (vector unsigned char)(
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mergeh(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector unsigned char)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector signed short
-vec_mergeh(vector signed short __a, vector signed short __b) {
- return (vector signed short)(
+static inline __ATTRS_o_ai __vector signed short
+vec_mergeh(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector bool short
-vec_mergeh(vector bool short __a, vector bool short __b) {
- return (vector bool short)(
+static inline __ATTRS_o_ai __vector __bool short
+vec_mergeh(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)(
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mergeh(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)(
__a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mergeh(vector signed int __a, vector signed int __b) {
- return (vector signed int)(__a[0], __b[0], __a[1], __b[1]);
+static inline __ATTRS_o_ai __vector signed int
+vec_mergeh(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)(__a[0], __b[0], __a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector bool int
-vec_mergeh(vector bool int __a, vector bool int __b) {
- return (vector bool int)(__a[0], __b[0], __a[1], __b[1]);
+static inline __ATTRS_o_ai __vector __bool int
+vec_mergeh(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)(__a[0], __b[0], __a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mergeh(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_mergeh(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)(__a[0], __b[0]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_mergeh(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)(__a[0], __b[0]);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_mergeh(vector bool long long __a, vector bool long long __b) {
- return (vector bool long long)(__a[0], __b[0]);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_mergeh(__vector __bool long long __a, __vector __bool long long __b) {
+ return (__vector __bool long long)(__a[0], __b[0]);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)(__a[0], __b[0]);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mergeh(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)(__a[0], __b[0]);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_mergeh(vector float __a, vector float __b) {
- return (vector float)(__a[0], __b[0], __a[1], __b[1]);
+static inline __ATTRS_o_ai __vector float
+vec_mergeh(__vector float __a, __vector float __b) {
+ return (__vector float)(__a[0], __b[0], __a[1], __b[1]);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_mergeh(vector double __a, vector double __b) {
- return (vector double)(__a[0], __b[0]);
+static inline __ATTRS_o_ai __vector double
+vec_mergeh(__vector double __a, __vector double __b) {
+ return (__vector double)(__a[0], __b[0]);
}
/*-- vec_mergel -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mergel(vector signed char __a, vector signed char __b) {
- return (vector signed char)(
+static inline __ATTRS_o_ai __vector signed char
+vec_mergel(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)(
__a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
__a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
}
-static inline __ATTRS_o_ai vector bool char
-vec_mergel(vector bool char __a, vector bool char __b) {
- return (vector bool char)(
+static inline __ATTRS_o_ai __vector __bool char
+vec_mergel(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)(
__a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
__a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mergel(vector unsigned char __a, vector unsigned char __b) {
- return (vector unsigned char)(
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mergel(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector unsigned char)(
__a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
__a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
}
-static inline __ATTRS_o_ai vector signed short
-vec_mergel(vector signed short __a, vector signed short __b) {
- return (vector signed short)(
+static inline __ATTRS_o_ai __vector signed short
+vec_mergel(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)(
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector bool short
-vec_mergel(vector bool short __a, vector bool short __b) {
- return (vector bool short)(
+static inline __ATTRS_o_ai __vector __bool short
+vec_mergel(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)(
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mergel(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)(
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mergel(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)(
__a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mergel(vector signed int __a, vector signed int __b) {
- return (vector signed int)(__a[2], __b[2], __a[3], __b[3]);
+static inline __ATTRS_o_ai __vector signed int
+vec_mergel(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)(__a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector bool int
-vec_mergel(vector bool int __a, vector bool int __b) {
- return (vector bool int)(__a[2], __b[2], __a[3], __b[3]);
+static inline __ATTRS_o_ai __vector __bool int
+vec_mergel(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)(__a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mergel(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mergel(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_mergel(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)(__a[1], __b[1]);
+static inline __ATTRS_o_ai __vector signed long long
+vec_mergel(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)(__a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_mergel(vector bool long long __a, vector bool long long __b) {
- return (vector bool long long)(__a[1], __b[1]);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_mergel(__vector __bool long long __a, __vector __bool long long __b) {
+ return (__vector __bool long long)(__a[1], __b[1]);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)(__a[1], __b[1]);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mergel(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)(__a[1], __b[1]);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_mergel(vector float __a, vector float __b) {
- return (vector float)(__a[2], __b[2], __a[3], __b[3]);
+static inline __ATTRS_o_ai __vector float
+vec_mergel(__vector float __a, __vector float __b) {
+ return (__vector float)(__a[2], __b[2], __a[3], __b[3]);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_mergel(vector double __a, vector double __b) {
- return (vector double)(__a[1], __b[1]);
+static inline __ATTRS_o_ai __vector double
+vec_mergel(__vector double __a, __vector double __b) {
+ return (__vector double)(__a[1], __b[1]);
}
/*-- vec_pack ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_pack(vector signed short __a, vector signed short __b) {
- vector signed char __ac = (vector signed char)__a;
- vector signed char __bc = (vector signed char)__b;
- return (vector signed char)(
+static inline __ATTRS_o_ai __vector signed char
+vec_pack(__vector signed short __a, __vector signed short __b) {
+ __vector signed char __ac = (__vector signed char)__a;
+ __vector signed char __bc = (__vector signed char)__b;
+ return (__vector signed char)(
__ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
__bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
}
-static inline __ATTRS_o_ai vector bool char
-vec_pack(vector bool short __a, vector bool short __b) {
- vector bool char __ac = (vector bool char)__a;
- vector bool char __bc = (vector bool char)__b;
- return (vector bool char)(
+static inline __ATTRS_o_ai __vector __bool char
+vec_pack(__vector __bool short __a, __vector __bool short __b) {
+ __vector __bool char __ac = (__vector __bool char)__a;
+ __vector __bool char __bc = (__vector __bool char)__b;
+ return (__vector __bool char)(
__ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
__bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_pack(vector unsigned short __a, vector unsigned short __b) {
- vector unsigned char __ac = (vector unsigned char)__a;
- vector unsigned char __bc = (vector unsigned char)__b;
- return (vector unsigned char)(
+static inline __ATTRS_o_ai __vector unsigned char
+vec_pack(__vector unsigned short __a, __vector unsigned short __b) {
+ __vector unsigned char __ac = (__vector unsigned char)__a;
+ __vector unsigned char __bc = (__vector unsigned char)__b;
+ return (__vector unsigned char)(
__ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
__bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
}
-static inline __ATTRS_o_ai vector signed short
-vec_pack(vector signed int __a, vector signed int __b) {
- vector signed short __ac = (vector signed short)__a;
- vector signed short __bc = (vector signed short)__b;
- return (vector signed short)(
+static inline __ATTRS_o_ai __vector signed short
+vec_pack(__vector signed int __a, __vector signed int __b) {
+ __vector signed short __ac = (__vector signed short)__a;
+ __vector signed short __bc = (__vector signed short)__b;
+ return (__vector signed short)(
__ac[1], __ac[3], __ac[5], __ac[7],
__bc[1], __bc[3], __bc[5], __bc[7]);
}
-static inline __ATTRS_o_ai vector bool short
-vec_pack(vector bool int __a, vector bool int __b) {
- vector bool short __ac = (vector bool short)__a;
- vector bool short __bc = (vector bool short)__b;
- return (vector bool short)(
+static inline __ATTRS_o_ai __vector __bool short
+vec_pack(__vector __bool int __a, __vector __bool int __b) {
+ __vector __bool short __ac = (__vector __bool short)__a;
+ __vector __bool short __bc = (__vector __bool short)__b;
+ return (__vector __bool short)(
__ac[1], __ac[3], __ac[5], __ac[7],
__bc[1], __bc[3], __bc[5], __bc[7]);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_pack(vector unsigned int __a, vector unsigned int __b) {
- vector unsigned short __ac = (vector unsigned short)__a;
- vector unsigned short __bc = (vector unsigned short)__b;
- return (vector unsigned short)(
+static inline __ATTRS_o_ai __vector unsigned short
+vec_pack(__vector unsigned int __a, __vector unsigned int __b) {
+ __vector unsigned short __ac = (__vector unsigned short)__a;
+ __vector unsigned short __bc = (__vector unsigned short)__b;
+ return (__vector unsigned short)(
__ac[1], __ac[3], __ac[5], __ac[7],
__bc[1], __bc[3], __bc[5], __bc[7]);
}
-static inline __ATTRS_o_ai vector signed int
-vec_pack(vector signed long long __a, vector signed long long __b) {
- vector signed int __ac = (vector signed int)__a;
- vector signed int __bc = (vector signed int)__b;
- return (vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+static inline __ATTRS_o_ai __vector signed int
+vec_pack(__vector signed long long __a, __vector signed long long __b) {
+ __vector signed int __ac = (__vector signed int)__a;
+ __vector signed int __bc = (__vector signed int)__b;
+ return (__vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
}
-static inline __ATTRS_o_ai vector bool int
-vec_pack(vector bool long long __a, vector bool long long __b) {
- vector bool int __ac = (vector bool int)__a;
- vector bool int __bc = (vector bool int)__b;
- return (vector bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+static inline __ATTRS_o_ai __vector __bool int
+vec_pack(__vector __bool long long __a, __vector __bool long long __b) {
+ __vector __bool int __ac = (__vector __bool int)__a;
+ __vector __bool int __bc = (__vector __bool int)__b;
+ return (__vector __bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
- vector unsigned int __ac = (vector unsigned int)__a;
- vector unsigned int __bc = (vector unsigned int)__b;
- return (vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_pack(__vector unsigned long long __a, __vector unsigned long long __b) {
+ __vector unsigned int __ac = (__vector unsigned int)__a;
+ __vector unsigned int __bc = (__vector unsigned int)__b;
+ return (__vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
}
/*-- vec_packs --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_packs(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_packs(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vpksh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_packs(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packs(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vpklsh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_packs(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_packs(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vpksf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packs(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packs(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vpklsf(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_packs(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_packs(__vector signed long long __a, __vector signed long long __b) {
return __builtin_s390_vpksg(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packs(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vpklsg(__a, __b);
}
/*-- vec_packs_cc -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_packs_cc(vector signed short __a, vector signed short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector signed char
+vec_packs_cc(__vector signed short __a, __vector signed short __b, int *__cc) {
return __builtin_s390_vpkshs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_packs_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packs_cc(__vector unsigned short __a, __vector unsigned short __b,
+ int *__cc) {
return __builtin_s390_vpklshs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_packs_cc(vector signed int __a, vector signed int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector signed short
+vec_packs_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
return __builtin_s390_vpksfs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packs_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packs_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) {
return __builtin_s390_vpklsfs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_packs_cc(vector signed long long __a, vector signed long long __b,
+static inline __ATTRS_o_ai __vector signed int
+vec_packs_cc(__vector signed long long __a, __vector signed long long __b,
int *__cc) {
return __builtin_s390_vpksgs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packs_cc(vector unsigned long long __a, vector unsigned long long __b,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packs_cc(__vector unsigned long long __a, __vector unsigned long long __b,
int *__cc) {
return __builtin_s390_vpklsgs(__a, __b, __cc);
}
/*-- vec_packsu -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu(vector signed short __a, vector signed short __b) {
- const vector signed short __zero = (vector signed short)0;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu(__vector signed short __a, __vector signed short __b) {
+ const __vector signed short __zero = (__vector signed short)0;
return __builtin_s390_vpklsh(
- (vector unsigned short)(__a >= __zero) & (vector unsigned short)__a,
- (vector unsigned short)(__b >= __zero) & (vector unsigned short)__b);
+ (__vector unsigned short)(__a >= __zero) & (__vector unsigned short)__a,
+ (__vector unsigned short)(__b >= __zero) & (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vpklsh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu(vector signed int __a, vector signed int __b) {
- const vector signed int __zero = (vector signed int)0;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu(__vector signed int __a, __vector signed int __b) {
+ const __vector signed int __zero = (__vector signed int)0;
return __builtin_s390_vpklsf(
- (vector unsigned int)(__a >= __zero) & (vector unsigned int)__a,
- (vector unsigned int)(__b >= __zero) & (vector unsigned int)__b);
+ (__vector unsigned int)(__a >= __zero) & (__vector unsigned int)__a,
+ (__vector unsigned int)(__b >= __zero) & (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vpklsf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu(vector signed long long __a, vector signed long long __b) {
- const vector signed long long __zero = (vector signed long long)0;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu(__vector signed long long __a, __vector signed long long __b) {
+ const __vector signed long long __zero = (__vector signed long long)0;
return __builtin_s390_vpklsg(
- (vector unsigned long long)(__a >= __zero) &
- (vector unsigned long long)__a,
- (vector unsigned long long)(__b >= __zero) &
- (vector unsigned long long)__b);
+ (__vector unsigned long long)(__a >= __zero) &
+ (__vector unsigned long long)__a,
+ (__vector unsigned long long)(__b >= __zero) &
+ (__vector unsigned long long)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vpklsg(__a, __b);
}
/*-- vec_packsu_cc ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu_cc(__vector unsigned short __a, __vector unsigned short __b,
+ int *__cc) {
return __builtin_s390_vpklshs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) {
return __builtin_s390_vpklsfs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu_cc(vector unsigned long long __a, vector unsigned long long __b,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu_cc(__vector unsigned long long __a, __vector unsigned long long __b,
int *__cc) {
return __builtin_s390_vpklsgs(__a, __b, __cc);
}
/*-- vec_unpackh ------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_unpackh(vector signed char __a) {
+static inline __ATTRS_o_ai __vector signed short
+vec_unpackh(__vector signed char __a) {
return __builtin_s390_vuphb(__a);
}
-static inline __ATTRS_o_ai vector bool short
-vec_unpackh(vector bool char __a) {
- return (vector bool short)__builtin_s390_vuphb((vector signed char)__a);
+static inline __ATTRS_o_ai __vector __bool short
+vec_unpackh(__vector __bool char __a) {
+ return ((__vector __bool short)
+ __builtin_s390_vuphb((__vector signed char)__a));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_unpackh(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_unpackh(__vector unsigned char __a) {
return __builtin_s390_vuplhb(__a);
}
-static inline __ATTRS_o_ai vector signed int
-vec_unpackh(vector signed short __a) {
+static inline __ATTRS_o_ai __vector signed int
+vec_unpackh(__vector signed short __a) {
return __builtin_s390_vuphh(__a);
}
-static inline __ATTRS_o_ai vector bool int
-vec_unpackh(vector bool short __a) {
- return (vector bool int)__builtin_s390_vuphh((vector signed short)__a);
+static inline __ATTRS_o_ai __vector __bool int
+vec_unpackh(__vector __bool short __a) {
+ return (__vector __bool int)__builtin_s390_vuphh((__vector signed short)__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_unpackh(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unpackh(__vector unsigned short __a) {
return __builtin_s390_vuplhh(__a);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_unpackh(vector signed int __a) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_unpackh(__vector signed int __a) {
return __builtin_s390_vuphf(__a);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_unpackh(vector bool int __a) {
- return (vector bool long long)__builtin_s390_vuphf((vector signed int)__a);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_unpackh(__vector __bool int __a) {
+ return ((__vector __bool long long)
+ __builtin_s390_vuphf((__vector signed int)__a));
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unpackh(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unpackh(__vector unsigned int __a) {
return __builtin_s390_vuplhf(__a);
}
/*-- vec_unpackl ------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_unpackl(vector signed char __a) {
+static inline __ATTRS_o_ai __vector signed short
+vec_unpackl(__vector signed char __a) {
return __builtin_s390_vuplb(__a);
}
-static inline __ATTRS_o_ai vector bool short
-vec_unpackl(vector bool char __a) {
- return (vector bool short)__builtin_s390_vuplb((vector signed char)__a);
+static inline __ATTRS_o_ai __vector __bool short
+vec_unpackl(__vector __bool char __a) {
+ return ((__vector __bool short)
+ __builtin_s390_vuplb((__vector signed char)__a));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_unpackl(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_unpackl(__vector unsigned char __a) {
return __builtin_s390_vupllb(__a);
}
-static inline __ATTRS_o_ai vector signed int
-vec_unpackl(vector signed short __a) {
+static inline __ATTRS_o_ai __vector signed int
+vec_unpackl(__vector signed short __a) {
return __builtin_s390_vuplhw(__a);
}
-static inline __ATTRS_o_ai vector bool int
-vec_unpackl(vector bool short __a) {
- return (vector bool int)__builtin_s390_vuplhw((vector signed short)__a);
+static inline __ATTRS_o_ai __vector __bool int
+vec_unpackl(__vector __bool short __a) {
+ return ((__vector __bool int)
+ __builtin_s390_vuplhw((__vector signed short)__a));
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_unpackl(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unpackl(__vector unsigned short __a) {
return __builtin_s390_vupllh(__a);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_unpackl(vector signed int __a) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_unpackl(__vector signed int __a) {
return __builtin_s390_vuplf(__a);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_unpackl(vector bool int __a) {
- return (vector bool long long)__builtin_s390_vuplf((vector signed int)__a);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_unpackl(__vector __bool int __a) {
+ return ((__vector __bool long long)
+ __builtin_s390_vuplf((__vector signed int)__a));
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unpackl(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unpackl(__vector unsigned int __a) {
return __builtin_s390_vupllf(__a);
}
/*-- vec_cmpeq --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector bool char __a, vector bool char __b) {
- return (vector bool char)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector bool short __a, vector bool short __b) {
- return (vector bool short)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector bool int __a, vector bool int __b) {
- return (vector bool int)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector bool long long __a, vector bool long long __b) {
- return (vector bool long long)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector __bool long long __a, __vector __bool long long __b) {
+ return (__vector __bool long long)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a == __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a == __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector float __a, vector float __b) {
- return (vector bool int)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a == __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector double __a, vector double __b) {
- return (vector bool long long)(__a == __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a == __b);
}
/*-- vec_cmpge --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpge(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpge(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpge(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpge(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpge(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpge(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a >= __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a >= __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector float __a, vector float __b) {
- return (vector bool int)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a >= __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector double __a, vector double __b) {
- return (vector bool long long)(__a >= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a >= __b);
}
/*-- vec_cmpgt --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpgt(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpgt(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpgt(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpgt(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpgt(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpgt(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a > __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a > __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector float __a, vector float __b) {
- return (vector bool int)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a > __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector double __a, vector double __b) {
- return (vector bool long long)(__a > __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a > __b);
}
/*-- vec_cmple --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmple(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmple(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmple(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmple(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmple(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmple(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmple(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmple(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a <= __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a <= __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector float __a, vector float __b) {
- return (vector bool int)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a <= __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector double __a, vector double __b) {
- return (vector bool long long)(__a <= __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a <= __b);
}
/*-- vec_cmplt --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmplt(vector signed char __a, vector signed char __b) {
- return (vector bool char)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmplt(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmplt(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmplt(vector signed short __a, vector signed short __b) {
- return (vector bool short)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmplt(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmplt(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector signed int __a, vector signed int __b) {
- return (vector bool int)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector signed long long __a, vector signed long long __b) {
- return (vector bool long long)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector __bool long long)(__a < __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector bool long long)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector __bool long long)(__a < __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector float __a, vector float __b) {
- return (vector bool int)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector float __a, __vector float __b) {
+ return (__vector __bool int)(__a < __b);
}
#endif
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector double __a, vector double __b) {
- return (vector bool long long)(__a < __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector double __a, __vector double __b) {
+ return (__vector __bool long long)(__a < __b);
}
/*-- vec_all_eq -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed char __a, vector signed char __b) {
+vec_all_eq(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vceqbs(__a, __b, &__cc);
return __cc == 0;
@@ -2533,56 +2601,56 @@ vec_all_eq(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed char __a, vector bool char __b) {
+vec_all_eq(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector signed char __b) {
+vec_all_eq(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned char __a, vector unsigned char __b) {
+vec_all_eq(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned char __a, vector bool char __b) {
+vec_all_eq(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector unsigned char __b) {
+vec_all_eq(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector bool char __b) {
+vec_all_eq(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed short __a, vector signed short __b) {
+vec_all_eq(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vceqhs(__a, __b, &__cc);
return __cc == 0;
@@ -2590,56 +2658,56 @@ vec_all_eq(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed short __a, vector bool short __b) {
+vec_all_eq(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector signed short __b) {
+vec_all_eq(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned short __a, vector unsigned short __b) {
+vec_all_eq(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned short __a, vector bool short __b) {
+vec_all_eq(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector unsigned short __b) {
+vec_all_eq(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector bool short __b) {
+vec_all_eq(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed int __a, vector signed int __b) {
+vec_all_eq(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vceqfs(__a, __b, &__cc);
return __cc == 0;
@@ -2647,56 +2715,56 @@ vec_all_eq(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed int __a, vector bool int __b) {
+vec_all_eq(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector signed int __b) {
+vec_all_eq(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned int __a, vector unsigned int __b) {
+vec_all_eq(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned int __a, vector bool int __b) {
+vec_all_eq(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector unsigned int __b) {
+vec_all_eq(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector bool int __b) {
+vec_all_eq(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed long long __a, vector signed long long __b) {
+vec_all_eq(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vceqgs(__a, __b, &__cc);
return __cc == 0;
@@ -2704,57 +2772,57 @@ vec_all_eq(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector signed long long __a, vector bool long long __b) {
+vec_all_eq(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector signed long long __b) {
+vec_all_eq(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned long long __a, vector bool long long __b) {
+vec_all_eq(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector unsigned long long __b) {
+vec_all_eq(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector bool long long __b) {
+vec_all_eq(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_eq(vector float __a, vector float __b) {
+vec_all_eq(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfcesbs(__a, __b, &__cc);
return __cc == 0;
@@ -2762,7 +2830,7 @@ vec_all_eq(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_eq(vector double __a, vector double __b) {
+vec_all_eq(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfcedbs(__a, __b, &__cc);
return __cc == 0;
@@ -2771,7 +2839,7 @@ vec_all_eq(vector double __a, vector double __b) {
/*-- vec_all_ne -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed char __a, vector signed char __b) {
+vec_all_ne(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vceqbs(__a, __b, &__cc);
return __cc == 3;
@@ -2779,56 +2847,56 @@ vec_all_ne(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed char __a, vector bool char __b) {
+vec_all_ne(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector signed char __b) {
+vec_all_ne(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned char __a, vector unsigned char __b) {
+vec_all_ne(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned char __a, vector bool char __b) {
+vec_all_ne(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector unsigned char __b) {
+vec_all_ne(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector bool char __b) {
+vec_all_ne(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed short __a, vector signed short __b) {
+vec_all_ne(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vceqhs(__a, __b, &__cc);
return __cc == 3;
@@ -2836,56 +2904,56 @@ vec_all_ne(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed short __a, vector bool short __b) {
+vec_all_ne(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector signed short __b) {
+vec_all_ne(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned short __a, vector unsigned short __b) {
+vec_all_ne(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned short __a, vector bool short __b) {
+vec_all_ne(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector unsigned short __b) {
+vec_all_ne(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector bool short __b) {
+vec_all_ne(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed int __a, vector signed int __b) {
+vec_all_ne(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vceqfs(__a, __b, &__cc);
return __cc == 3;
@@ -2893,56 +2961,56 @@ vec_all_ne(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed int __a, vector bool int __b) {
+vec_all_ne(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector signed int __b) {
+vec_all_ne(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned int __a, vector unsigned int __b) {
+vec_all_ne(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned int __a, vector bool int __b) {
+vec_all_ne(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector unsigned int __b) {
+vec_all_ne(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector bool int __b) {
+vec_all_ne(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed long long __a, vector signed long long __b) {
+vec_all_ne(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vceqgs(__a, __b, &__cc);
return __cc == 3;
@@ -2950,57 +3018,57 @@ vec_all_ne(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector signed long long __a, vector bool long long __b) {
+vec_all_ne(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector signed long long __b) {
+vec_all_ne(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned long long __a, vector bool long long __b) {
+vec_all_ne(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector unsigned long long __b) {
+vec_all_ne(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector bool long long __b) {
+vec_all_ne(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc == 3;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_ne(vector float __a, vector float __b) {
+vec_all_ne(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfcesbs(__a, __b, &__cc);
return __cc == 3;
@@ -3008,7 +3076,7 @@ vec_all_ne(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_ne(vector double __a, vector double __b) {
+vec_all_ne(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfcedbs(__a, __b, &__cc);
return __cc == 3;
@@ -3017,7 +3085,7 @@ vec_all_ne(vector double __a, vector double __b) {
/*-- vec_all_ge -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed char __a, vector signed char __b) {
+vec_all_ge(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__b, __a, &__cc);
return __cc == 3;
@@ -3025,22 +3093,22 @@ vec_all_ge(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed char __a, vector bool char __b) {
+vec_all_ge(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector signed char __b) {
+vec_all_ge(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
+vec_all_ge(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__b, __a, &__cc);
return __cc == 3;
@@ -3048,31 +3116,31 @@ vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned char __a, vector bool char __b) {
+vec_all_ge(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector unsigned char __b) {
+vec_all_ge(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector bool char __b) {
+vec_all_ge(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b,
- (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b,
+ (__vector unsigned char)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed short __a, vector signed short __b) {
+vec_all_ge(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__b, __a, &__cc);
return __cc == 3;
@@ -3080,22 +3148,22 @@ vec_all_ge(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed short __a, vector bool short __b) {
+vec_all_ge(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector signed short __b) {
+vec_all_ge(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
+vec_all_ge(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__b, __a, &__cc);
return __cc == 3;
@@ -3103,31 +3171,31 @@ vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned short __a, vector bool short __b) {
+vec_all_ge(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector unsigned short __b) {
+vec_all_ge(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector bool short __b) {
+vec_all_ge(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b,
- (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b,
+ (__vector unsigned short)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed int __a, vector signed int __b) {
+vec_all_ge(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__b, __a, &__cc);
return __cc == 3;
@@ -3135,22 +3203,22 @@ vec_all_ge(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed int __a, vector bool int __b) {
+vec_all_ge(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector signed int __b) {
+vec_all_ge(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
+vec_all_ge(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__b, __a, &__cc);
return __cc == 3;
@@ -3158,31 +3226,31 @@ vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned int __a, vector bool int __b) {
+vec_all_ge(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector unsigned int __b) {
+vec_all_ge(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector bool int __b) {
+vec_all_ge(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b,
- (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b,
+ (__vector unsigned int)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed long long __a, vector signed long long __b) {
+vec_all_ge(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__b, __a, &__cc);
return __cc == 3;
@@ -3190,22 +3258,22 @@ vec_all_ge(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector signed long long __a, vector bool long long __b) {
+vec_all_ge(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector signed long long __b) {
+vec_all_ge(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_ge(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__b, __a, &__cc);
return __cc == 3;
@@ -3213,32 +3281,32 @@ vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned long long __a, vector bool long long __b) {
+vec_all_ge(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector unsigned long long __b) {
+vec_all_ge(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector bool long long __b) {
+vec_all_ge(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b,
- (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b,
+ (__vector unsigned long long)__a, &__cc);
return __cc == 3;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_ge(vector float __a, vector float __b) {
+vec_all_ge(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__a, __b, &__cc);
return __cc == 0;
@@ -3246,7 +3314,7 @@ vec_all_ge(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_ge(vector double __a, vector double __b) {
+vec_all_ge(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__a, __b, &__cc);
return __cc == 0;
@@ -3255,7 +3323,7 @@ vec_all_ge(vector double __a, vector double __b) {
/*-- vec_all_gt -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed char __a, vector signed char __b) {
+vec_all_gt(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__a, __b, &__cc);
return __cc == 0;
@@ -3263,22 +3331,22 @@ vec_all_gt(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed char __a, vector bool char __b) {
+vec_all_gt(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector signed char __b) {
+vec_all_gt(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
+vec_all_gt(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__a, __b, &__cc);
return __cc == 0;
@@ -3286,31 +3354,31 @@ vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned char __a, vector bool char __b) {
+vec_all_gt(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector unsigned char __b) {
+vec_all_gt(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector bool char __b) {
+vec_all_gt(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a,
- (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed short __a, vector signed short __b) {
+vec_all_gt(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__a, __b, &__cc);
return __cc == 0;
@@ -3318,22 +3386,22 @@ vec_all_gt(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed short __a, vector bool short __b) {
+vec_all_gt(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector signed short __b) {
+vec_all_gt(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
+vec_all_gt(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__a, __b, &__cc);
return __cc == 0;
@@ -3341,31 +3409,31 @@ vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned short __a, vector bool short __b) {
+vec_all_gt(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector unsigned short __b) {
+vec_all_gt(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector bool short __b) {
+vec_all_gt(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a,
- (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed int __a, vector signed int __b) {
+vec_all_gt(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__a, __b, &__cc);
return __cc == 0;
@@ -3373,22 +3441,22 @@ vec_all_gt(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed int __a, vector bool int __b) {
+vec_all_gt(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector signed int __b) {
+vec_all_gt(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
+vec_all_gt(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__a, __b, &__cc);
return __cc == 0;
@@ -3396,31 +3464,31 @@ vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned int __a, vector bool int __b) {
+vec_all_gt(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector unsigned int __b) {
+vec_all_gt(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector bool int __b) {
+vec_all_gt(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a,
- (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed long long __a, vector signed long long __b) {
+vec_all_gt(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__a, __b, &__cc);
return __cc == 0;
@@ -3428,22 +3496,22 @@ vec_all_gt(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector signed long long __a, vector bool long long __b) {
+vec_all_gt(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector signed long long __b) {
+vec_all_gt(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_gt(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__a, __b, &__cc);
return __cc == 0;
@@ -3451,32 +3519,32 @@ vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned long long __a, vector bool long long __b) {
+vec_all_gt(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector unsigned long long __b) {
+vec_all_gt(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector bool long long __b) {
+vec_all_gt(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a,
- (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_gt(vector float __a, vector float __b) {
+vec_all_gt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__a, __b, &__cc);
return __cc == 0;
@@ -3484,7 +3552,7 @@ vec_all_gt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_gt(vector double __a, vector double __b) {
+vec_all_gt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__a, __b, &__cc);
return __cc == 0;
@@ -3493,7 +3561,7 @@ vec_all_gt(vector double __a, vector double __b) {
/*-- vec_all_le -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_le(vector signed char __a, vector signed char __b) {
+vec_all_le(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__a, __b, &__cc);
return __cc == 3;
@@ -3501,22 +3569,22 @@ vec_all_le(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector signed char __a, vector bool char __b) {
+vec_all_le(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector signed char __b) {
+vec_all_le(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned char __a, vector unsigned char __b) {
+vec_all_le(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__a, __b, &__cc);
return __cc == 3;
@@ -3524,31 +3592,31 @@ vec_all_le(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned char __a, vector bool char __b) {
+vec_all_le(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector unsigned char __b) {
+vec_all_le(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector bool char __b) {
+vec_all_le(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a,
- (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector signed short __a, vector signed short __b) {
+vec_all_le(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__a, __b, &__cc);
return __cc == 3;
@@ -3556,22 +3624,22 @@ vec_all_le(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector signed short __a, vector bool short __b) {
+vec_all_le(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector signed short __b) {
+vec_all_le(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned short __a, vector unsigned short __b) {
+vec_all_le(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__a, __b, &__cc);
return __cc == 3;
@@ -3579,31 +3647,31 @@ vec_all_le(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned short __a, vector bool short __b) {
+vec_all_le(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector unsigned short __b) {
+vec_all_le(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector bool short __b) {
+vec_all_le(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a,
- (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector signed int __a, vector signed int __b) {
+vec_all_le(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__a, __b, &__cc);
return __cc == 3;
@@ -3611,22 +3679,22 @@ vec_all_le(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector signed int __a, vector bool int __b) {
+vec_all_le(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector signed int __b) {
+vec_all_le(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned int __a, vector unsigned int __b) {
+vec_all_le(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__a, __b, &__cc);
return __cc == 3;
@@ -3634,31 +3702,31 @@ vec_all_le(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned int __a, vector bool int __b) {
+vec_all_le(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector unsigned int __b) {
+vec_all_le(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector bool int __b) {
+vec_all_le(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a,
- (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector signed long long __a, vector signed long long __b) {
+vec_all_le(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__a, __b, &__cc);
return __cc == 3;
@@ -3666,22 +3734,22 @@ vec_all_le(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector signed long long __a, vector bool long long __b) {
+vec_all_le(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector signed long long __b) {
+vec_all_le(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_le(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__a, __b, &__cc);
return __cc == 3;
@@ -3689,32 +3757,32 @@ vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned long long __a, vector bool long long __b) {
+vec_all_le(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector unsigned long long __b) {
+vec_all_le(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
return __cc == 3;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector bool long long __b) {
+vec_all_le(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a,
- (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 3;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_le(vector float __a, vector float __b) {
+vec_all_le(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__b, __a, &__cc);
return __cc == 0;
@@ -3722,7 +3790,7 @@ vec_all_le(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_le(vector double __a, vector double __b) {
+vec_all_le(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__b, __a, &__cc);
return __cc == 0;
@@ -3731,7 +3799,7 @@ vec_all_le(vector double __a, vector double __b) {
/*-- vec_all_lt -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed char __a, vector signed char __b) {
+vec_all_lt(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__b, __a, &__cc);
return __cc == 0;
@@ -3739,22 +3807,22 @@ vec_all_lt(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed char __a, vector bool char __b) {
+vec_all_lt(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector signed char __b) {
+vec_all_lt(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
+vec_all_lt(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__b, __a, &__cc);
return __cc == 0;
@@ -3762,31 +3830,31 @@ vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned char __a, vector bool char __b) {
+vec_all_lt(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector unsigned char __b) {
+vec_all_lt(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector bool char __b) {
+vec_all_lt(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b,
- (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b,
+ (__vector unsigned char)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed short __a, vector signed short __b) {
+vec_all_lt(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__b, __a, &__cc);
return __cc == 0;
@@ -3794,22 +3862,22 @@ vec_all_lt(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed short __a, vector bool short __b) {
+vec_all_lt(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector signed short __b) {
+vec_all_lt(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
+vec_all_lt(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__b, __a, &__cc);
return __cc == 0;
@@ -3817,31 +3885,31 @@ vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned short __a, vector bool short __b) {
+vec_all_lt(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector unsigned short __b) {
+vec_all_lt(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector bool short __b) {
+vec_all_lt(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b,
- (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b,
+ (__vector unsigned short)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed int __a, vector signed int __b) {
+vec_all_lt(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__b, __a, &__cc);
return __cc == 0;
@@ -3849,22 +3917,22 @@ vec_all_lt(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed int __a, vector bool int __b) {
+vec_all_lt(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector signed int __b) {
+vec_all_lt(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
+vec_all_lt(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__b, __a, &__cc);
return __cc == 0;
@@ -3872,31 +3940,31 @@ vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned int __a, vector bool int __b) {
+vec_all_lt(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector unsigned int __b) {
+vec_all_lt(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector bool int __b) {
+vec_all_lt(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b,
- (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b,
+ (__vector unsigned int)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed long long __a, vector signed long long __b) {
+vec_all_lt(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__b, __a, &__cc);
return __cc == 0;
@@ -3904,22 +3972,22 @@ vec_all_lt(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector signed long long __a, vector bool long long __b) {
+vec_all_lt(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector signed long long __b) {
+vec_all_lt(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
+vec_all_lt(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__b, __a, &__cc);
return __cc == 0;
@@ -3927,32 +3995,32 @@ vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned long long __a, vector bool long long __b) {
+vec_all_lt(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector unsigned long long __b) {
+vec_all_lt(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
return __cc == 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector bool long long __b) {
+vec_all_lt(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b,
- (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b,
+ (__vector unsigned long long)__a, &__cc);
return __cc == 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_lt(vector float __a, vector float __b) {
+vec_all_lt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__b, __a, &__cc);
return __cc == 0;
@@ -3960,7 +4028,7 @@ vec_all_lt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_lt(vector double __a, vector double __b) {
+vec_all_lt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__b, __a, &__cc);
return __cc == 0;
@@ -3970,7 +4038,7 @@ vec_all_lt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_nge(vector float __a, vector float __b) {
+vec_all_nge(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__a, __b, &__cc);
return __cc == 3;
@@ -3978,7 +4046,7 @@ vec_all_nge(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_nge(vector double __a, vector double __b) {
+vec_all_nge(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__a, __b, &__cc);
return __cc == 3;
@@ -3988,7 +4056,7 @@ vec_all_nge(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_ngt(vector float __a, vector float __b) {
+vec_all_ngt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__a, __b, &__cc);
return __cc == 3;
@@ -3996,7 +4064,7 @@ vec_all_ngt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_ngt(vector double __a, vector double __b) {
+vec_all_ngt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__a, __b, &__cc);
return __cc == 3;
@@ -4006,7 +4074,7 @@ vec_all_ngt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_nle(vector float __a, vector float __b) {
+vec_all_nle(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__b, __a, &__cc);
return __cc == 3;
@@ -4014,7 +4082,7 @@ vec_all_nle(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_nle(vector double __a, vector double __b) {
+vec_all_nle(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__b, __a, &__cc);
return __cc == 3;
@@ -4024,7 +4092,7 @@ vec_all_nle(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_nlt(vector float __a, vector float __b) {
+vec_all_nlt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__b, __a, &__cc);
return __cc == 3;
@@ -4032,7 +4100,7 @@ vec_all_nlt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_all_nlt(vector double __a, vector double __b) {
+vec_all_nlt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__b, __a, &__cc);
return __cc == 3;
@@ -4042,7 +4110,7 @@ vec_all_nlt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_nan(vector float __a) {
+vec_all_nan(__vector float __a) {
int __cc;
__builtin_s390_vftcisb(__a, 15, &__cc);
return __cc == 0;
@@ -4050,7 +4118,7 @@ vec_all_nan(vector float __a) {
#endif
static inline __ATTRS_o_ai int
-vec_all_nan(vector double __a) {
+vec_all_nan(__vector double __a) {
int __cc;
__builtin_s390_vftcidb(__a, 15, &__cc);
return __cc == 0;
@@ -4060,7 +4128,7 @@ vec_all_nan(vector double __a) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_all_numeric(vector float __a) {
+vec_all_numeric(__vector float __a) {
int __cc;
__builtin_s390_vftcisb(__a, 15, &__cc);
return __cc == 3;
@@ -4068,7 +4136,7 @@ vec_all_numeric(vector float __a) {
#endif
static inline __ATTRS_o_ai int
-vec_all_numeric(vector double __a) {
+vec_all_numeric(__vector double __a) {
int __cc;
__builtin_s390_vftcidb(__a, 15, &__cc);
return __cc == 3;
@@ -4077,7 +4145,7 @@ vec_all_numeric(vector double __a) {
/*-- vec_any_eq -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed char __a, vector signed char __b) {
+vec_any_eq(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vceqbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4085,56 +4153,56 @@ vec_any_eq(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed char __a, vector bool char __b) {
+vec_any_eq(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector signed char __b) {
+vec_any_eq(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned char __a, vector unsigned char __b) {
+vec_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned char __a, vector bool char __b) {
+vec_any_eq(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector unsigned char __b) {
+vec_any_eq(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector bool char __b) {
+vec_any_eq(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed short __a, vector signed short __b) {
+vec_any_eq(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vceqhs(__a, __b, &__cc);
return __cc <= 1;
@@ -4142,56 +4210,56 @@ vec_any_eq(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed short __a, vector bool short __b) {
+vec_any_eq(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector signed short __b) {
+vec_any_eq(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned short __a, vector unsigned short __b) {
+vec_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned short __a, vector bool short __b) {
+vec_any_eq(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector unsigned short __b) {
+vec_any_eq(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector bool short __b) {
+vec_any_eq(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed int __a, vector signed int __b) {
+vec_any_eq(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vceqfs(__a, __b, &__cc);
return __cc <= 1;
@@ -4199,56 +4267,56 @@ vec_any_eq(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed int __a, vector bool int __b) {
+vec_any_eq(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector signed int __b) {
+vec_any_eq(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned int __a, vector unsigned int __b) {
+vec_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned int __a, vector bool int __b) {
+vec_any_eq(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector unsigned int __b) {
+vec_any_eq(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector bool int __b) {
+vec_any_eq(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed long long __a, vector signed long long __b) {
+vec_any_eq(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vceqgs(__a, __b, &__cc);
return __cc <= 1;
@@ -4256,57 +4324,57 @@ vec_any_eq(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector signed long long __a, vector bool long long __b) {
+vec_any_eq(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector signed long long __b) {
+vec_any_eq(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned long long __a, vector bool long long __b) {
+vec_any_eq(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector unsigned long long __b) {
+vec_any_eq(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector bool long long __b) {
+vec_any_eq(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_eq(vector float __a, vector float __b) {
+vec_any_eq(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfcesbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4314,7 +4382,7 @@ vec_any_eq(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_eq(vector double __a, vector double __b) {
+vec_any_eq(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfcedbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4323,7 +4391,7 @@ vec_any_eq(vector double __a, vector double __b) {
/*-- vec_any_ne -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed char __a, vector signed char __b) {
+vec_any_ne(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vceqbs(__a, __b, &__cc);
return __cc != 0;
@@ -4331,56 +4399,56 @@ vec_any_ne(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed char __a, vector bool char __b) {
+vec_any_ne(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector signed char __b) {
+vec_any_ne(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned char __a, vector unsigned char __b) {
+vec_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned char __a, vector bool char __b) {
+vec_any_ne(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector unsigned char __b) {
+vec_any_ne(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector bool char __b) {
+vec_any_ne(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((vector signed char)__a,
- (vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector signed char)__a,
+ (__vector signed char)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed short __a, vector signed short __b) {
+vec_any_ne(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vceqhs(__a, __b, &__cc);
return __cc != 0;
@@ -4388,56 +4456,56 @@ vec_any_ne(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed short __a, vector bool short __b) {
+vec_any_ne(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector signed short __b) {
+vec_any_ne(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned short __a, vector unsigned short __b) {
+vec_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned short __a, vector bool short __b) {
+vec_any_ne(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector unsigned short __b) {
+vec_any_ne(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector bool short __b) {
+vec_any_ne(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((vector signed short)__a,
- (vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector signed short)__a,
+ (__vector signed short)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed int __a, vector signed int __b) {
+vec_any_ne(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vceqfs(__a, __b, &__cc);
return __cc != 0;
@@ -4445,56 +4513,56 @@ vec_any_ne(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed int __a, vector bool int __b) {
+vec_any_ne(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector signed int __b) {
+vec_any_ne(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned int __a, vector unsigned int __b) {
+vec_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned int __a, vector bool int __b) {
+vec_any_ne(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector unsigned int __b) {
+vec_any_ne(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector bool int __b) {
+vec_any_ne(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((vector signed int)__a,
- (vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector signed int)__a,
+ (__vector signed int)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed long long __a, vector signed long long __b) {
+vec_any_ne(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vceqgs(__a, __b, &__cc);
return __cc != 0;
@@ -4502,57 +4570,57 @@ vec_any_ne(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector signed long long __a, vector bool long long __b) {
+vec_any_ne(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector signed long long __b) {
+vec_any_ne(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned long long __a, vector bool long long __b) {
+vec_any_ne(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector unsigned long long __b) {
+vec_any_ne(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector bool long long __b) {
+vec_any_ne(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((vector signed long long)__a,
- (vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector signed long long)__a,
+ (__vector signed long long)__b, &__cc);
return __cc != 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_ne(vector float __a, vector float __b) {
+vec_any_ne(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfcesbs(__a, __b, &__cc);
return __cc != 0;
@@ -4560,7 +4628,7 @@ vec_any_ne(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_ne(vector double __a, vector double __b) {
+vec_any_ne(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfcedbs(__a, __b, &__cc);
return __cc != 0;
@@ -4569,7 +4637,7 @@ vec_any_ne(vector double __a, vector double __b) {
/*-- vec_any_ge -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed char __a, vector signed char __b) {
+vec_any_ge(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__b, __a, &__cc);
return __cc != 0;
@@ -4577,22 +4645,22 @@ vec_any_ge(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed char __a, vector bool char __b) {
+vec_any_ge(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector signed char __b) {
+vec_any_ge(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
+vec_any_ge(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__b, __a, &__cc);
return __cc != 0;
@@ -4600,31 +4668,31 @@ vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned char __a, vector bool char __b) {
+vec_any_ge(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector unsigned char __b) {
+vec_any_ge(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector bool char __b) {
+vec_any_ge(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b,
- (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b,
+ (__vector unsigned char)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed short __a, vector signed short __b) {
+vec_any_ge(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__b, __a, &__cc);
return __cc != 0;
@@ -4632,22 +4700,22 @@ vec_any_ge(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed short __a, vector bool short __b) {
+vec_any_ge(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector signed short __b) {
+vec_any_ge(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
+vec_any_ge(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__b, __a, &__cc);
return __cc != 0;
@@ -4655,31 +4723,31 @@ vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned short __a, vector bool short __b) {
+vec_any_ge(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector unsigned short __b) {
+vec_any_ge(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector bool short __b) {
+vec_any_ge(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b,
- (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b,
+ (__vector unsigned short)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed int __a, vector signed int __b) {
+vec_any_ge(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__b, __a, &__cc);
return __cc != 0;
@@ -4687,22 +4755,22 @@ vec_any_ge(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed int __a, vector bool int __b) {
+vec_any_ge(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector signed int __b) {
+vec_any_ge(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
+vec_any_ge(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__b, __a, &__cc);
return __cc != 0;
@@ -4710,31 +4778,31 @@ vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned int __a, vector bool int __b) {
+vec_any_ge(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector unsigned int __b) {
+vec_any_ge(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector bool int __b) {
+vec_any_ge(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b,
- (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b,
+ (__vector unsigned int)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed long long __a, vector signed long long __b) {
+vec_any_ge(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__b, __a, &__cc);
return __cc != 0;
@@ -4742,22 +4810,22 @@ vec_any_ge(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector signed long long __a, vector bool long long __b) {
+vec_any_ge(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector signed long long __b) {
+vec_any_ge(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_ge(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__b, __a, &__cc);
return __cc != 0;
@@ -4765,32 +4833,32 @@ vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned long long __a, vector bool long long __b) {
+vec_any_ge(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector unsigned long long __b) {
+vec_any_ge(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector bool long long __b) {
+vec_any_ge(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b,
- (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b,
+ (__vector unsigned long long)__a, &__cc);
return __cc != 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_ge(vector float __a, vector float __b) {
+vec_any_ge(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4798,7 +4866,7 @@ vec_any_ge(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_ge(vector double __a, vector double __b) {
+vec_any_ge(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4807,7 +4875,7 @@ vec_any_ge(vector double __a, vector double __b) {
/*-- vec_any_gt -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed char __a, vector signed char __b) {
+vec_any_gt(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4815,22 +4883,22 @@ vec_any_gt(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed char __a, vector bool char __b) {
+vec_any_gt(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector signed char __b) {
+vec_any_gt(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
+vec_any_gt(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__a, __b, &__cc);
return __cc <= 1;
@@ -4838,31 +4906,31 @@ vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned char __a, vector bool char __b) {
+vec_any_gt(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector unsigned char __b) {
+vec_any_gt(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector bool char __b) {
+vec_any_gt(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a,
- (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed short __a, vector signed short __b) {
+vec_any_gt(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__a, __b, &__cc);
return __cc <= 1;
@@ -4870,22 +4938,22 @@ vec_any_gt(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed short __a, vector bool short __b) {
+vec_any_gt(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector signed short __b) {
+vec_any_gt(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
+vec_any_gt(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__a, __b, &__cc);
return __cc <= 1;
@@ -4893,31 +4961,31 @@ vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned short __a, vector bool short __b) {
+vec_any_gt(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector unsigned short __b) {
+vec_any_gt(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector bool short __b) {
+vec_any_gt(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a,
- (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed int __a, vector signed int __b) {
+vec_any_gt(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__a, __b, &__cc);
return __cc <= 1;
@@ -4925,22 +4993,22 @@ vec_any_gt(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed int __a, vector bool int __b) {
+vec_any_gt(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector signed int __b) {
+vec_any_gt(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
+vec_any_gt(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__a, __b, &__cc);
return __cc <= 1;
@@ -4948,31 +5016,31 @@ vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned int __a, vector bool int __b) {
+vec_any_gt(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector unsigned int __b) {
+vec_any_gt(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector bool int __b) {
+vec_any_gt(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a,
- (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed long long __a, vector signed long long __b) {
+vec_any_gt(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__a, __b, &__cc);
return __cc <= 1;
@@ -4980,22 +5048,22 @@ vec_any_gt(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector signed long long __a, vector bool long long __b) {
+vec_any_gt(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector signed long long __b) {
+vec_any_gt(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_gt(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__a, __b, &__cc);
return __cc <= 1;
@@ -5003,32 +5071,32 @@ vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned long long __a, vector bool long long __b) {
+vec_any_gt(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector unsigned long long __b) {
+vec_any_gt(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector bool long long __b) {
+vec_any_gt(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a,
- (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc <= 1;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_gt(vector float __a, vector float __b) {
+vec_any_gt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__a, __b, &__cc);
return __cc <= 1;
@@ -5036,7 +5104,7 @@ vec_any_gt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_gt(vector double __a, vector double __b) {
+vec_any_gt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__a, __b, &__cc);
return __cc <= 1;
@@ -5045,7 +5113,7 @@ vec_any_gt(vector double __a, vector double __b) {
/*-- vec_any_le -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_le(vector signed char __a, vector signed char __b) {
+vec_any_le(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__a, __b, &__cc);
return __cc != 0;
@@ -5053,22 +5121,22 @@ vec_any_le(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector signed char __a, vector bool char __b) {
+vec_any_le(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
+ __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector signed char __b) {
+vec_any_le(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned char __a, vector unsigned char __b) {
+vec_any_le(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__a, __b, &__cc);
return __cc != 0;
@@ -5076,31 +5144,31 @@ vec_any_le(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned char __a, vector bool char __b) {
+vec_any_le(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector unsigned char __b) {
+vec_any_le(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector bool char __b) {
+vec_any_le(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__a,
- (vector unsigned char)__b, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector signed short __a, vector signed short __b) {
+vec_any_le(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__a, __b, &__cc);
return __cc != 0;
@@ -5108,22 +5176,22 @@ vec_any_le(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector signed short __a, vector bool short __b) {
+vec_any_le(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
+ __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector signed short __b) {
+vec_any_le(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned short __a, vector unsigned short __b) {
+vec_any_le(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__a, __b, &__cc);
return __cc != 0;
@@ -5131,31 +5199,31 @@ vec_any_le(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned short __a, vector bool short __b) {
+vec_any_le(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector unsigned short __b) {
+vec_any_le(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector bool short __b) {
+vec_any_le(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__a,
- (vector unsigned short)__b, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector signed int __a, vector signed int __b) {
+vec_any_le(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__a, __b, &__cc);
return __cc != 0;
@@ -5163,22 +5231,22 @@ vec_any_le(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector signed int __a, vector bool int __b) {
+vec_any_le(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
+ __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector signed int __b) {
+vec_any_le(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned int __a, vector unsigned int __b) {
+vec_any_le(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__a, __b, &__cc);
return __cc != 0;
@@ -5186,31 +5254,31 @@ vec_any_le(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned int __a, vector bool int __b) {
+vec_any_le(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector unsigned int __b) {
+vec_any_le(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector bool int __b) {
+vec_any_le(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__a,
- (vector unsigned int)__b, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector signed long long __a, vector signed long long __b) {
+vec_any_le(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__a, __b, &__cc);
return __cc != 0;
@@ -5218,22 +5286,22 @@ vec_any_le(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector signed long long __a, vector bool long long __b) {
+vec_any_le(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
+ __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector signed long long __b) {
+vec_any_le(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_le(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__a, __b, &__cc);
return __cc != 0;
@@ -5241,32 +5309,32 @@ vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned long long __a, vector bool long long __b) {
+vec_any_le(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector unsigned long long __b) {
+vec_any_le(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
return __cc != 0;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector bool long long __b) {
+vec_any_le(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__a,
- (vector unsigned long long)__b, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc != 0;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_le(vector float __a, vector float __b) {
+vec_any_le(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5274,7 +5342,7 @@ vec_any_le(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_le(vector double __a, vector double __b) {
+vec_any_le(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5283,7 +5351,7 @@ vec_any_le(vector double __a, vector double __b) {
/*-- vec_any_lt -------------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed char __a, vector signed char __b) {
+vec_any_lt(__vector signed char __a, __vector signed char __b) {
int __cc;
__builtin_s390_vchbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5291,22 +5359,22 @@ vec_any_lt(vector signed char __a, vector signed char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed char __a, vector bool char __b) {
+vec_any_lt(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
+ __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector signed char __b) {
+vec_any_lt(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
+ __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
+vec_any_lt(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
__builtin_s390_vchlbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5314,31 +5382,31 @@ vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned char __a, vector bool char __b) {
+vec_any_lt(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector unsigned char __b) {
+vec_any_lt(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector bool char __b) {
+vec_any_lt(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vchlbs((vector unsigned char)__b,
- (vector unsigned char)__a, &__cc);
+ __builtin_s390_vchlbs((__vector unsigned char)__b,
+ (__vector unsigned char)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed short __a, vector signed short __b) {
+vec_any_lt(__vector signed short __a, __vector signed short __b) {
int __cc;
__builtin_s390_vchhs(__b, __a, &__cc);
return __cc <= 1;
@@ -5346,22 +5414,22 @@ vec_any_lt(vector signed short __a, vector signed short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed short __a, vector bool short __b) {
+vec_any_lt(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
+ __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector signed short __b) {
+vec_any_lt(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
+ __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
+vec_any_lt(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
__builtin_s390_vchlhs(__b, __a, &__cc);
return __cc <= 1;
@@ -5369,31 +5437,31 @@ vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned short __a, vector bool short __b) {
+vec_any_lt(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector unsigned short __b) {
+vec_any_lt(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector bool short __b) {
+vec_any_lt(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vchlhs((vector unsigned short)__b,
- (vector unsigned short)__a, &__cc);
+ __builtin_s390_vchlhs((__vector unsigned short)__b,
+ (__vector unsigned short)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed int __a, vector signed int __b) {
+vec_any_lt(__vector signed int __a, __vector signed int __b) {
int __cc;
__builtin_s390_vchfs(__b, __a, &__cc);
return __cc <= 1;
@@ -5401,22 +5469,22 @@ vec_any_lt(vector signed int __a, vector signed int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed int __a, vector bool int __b) {
+vec_any_lt(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
+ __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector signed int __b) {
+vec_any_lt(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
+ __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
+vec_any_lt(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
__builtin_s390_vchlfs(__b, __a, &__cc);
return __cc <= 1;
@@ -5424,31 +5492,31 @@ vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned int __a, vector bool int __b) {
+vec_any_lt(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector unsigned int __b) {
+vec_any_lt(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector bool int __b) {
+vec_any_lt(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vchlfs((vector unsigned int)__b,
- (vector unsigned int)__a, &__cc);
+ __builtin_s390_vchlfs((__vector unsigned int)__b,
+ (__vector unsigned int)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed long long __a, vector signed long long __b) {
+vec_any_lt(__vector signed long long __a, __vector signed long long __b) {
int __cc;
__builtin_s390_vchgs(__b, __a, &__cc);
return __cc <= 1;
@@ -5456,22 +5524,22 @@ vec_any_lt(vector signed long long __a, vector signed long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector signed long long __a, vector bool long long __b) {
+vec_any_lt(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
+ __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector signed long long __b) {
+vec_any_lt(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
+ __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
+vec_any_lt(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
__builtin_s390_vchlgs(__b, __a, &__cc);
return __cc <= 1;
@@ -5479,32 +5547,32 @@ vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned long long __a, vector bool long long __b) {
+vec_any_lt(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector unsigned long long __b) {
+vec_any_lt(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
return __cc <= 1;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector bool long long __b) {
+vec_any_lt(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vchlgs((vector unsigned long long)__b,
- (vector unsigned long long)__a, &__cc);
+ __builtin_s390_vchlgs((__vector unsigned long long)__b,
+ (__vector unsigned long long)__a, &__cc);
return __cc <= 1;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_lt(vector float __a, vector float __b) {
+vec_any_lt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5512,7 +5580,7 @@ vec_any_lt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_lt(vector double __a, vector double __b) {
+vec_any_lt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__b, __a, &__cc);
return __cc <= 1;
@@ -5522,7 +5590,7 @@ vec_any_lt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_nge(vector float __a, vector float __b) {
+vec_any_nge(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__a, __b, &__cc);
return __cc != 0;
@@ -5530,7 +5598,7 @@ vec_any_nge(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_nge(vector double __a, vector double __b) {
+vec_any_nge(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__a, __b, &__cc);
return __cc != 0;
@@ -5540,7 +5608,7 @@ vec_any_nge(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_ngt(vector float __a, vector float __b) {
+vec_any_ngt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__a, __b, &__cc);
return __cc != 0;
@@ -5548,7 +5616,7 @@ vec_any_ngt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_ngt(vector double __a, vector double __b) {
+vec_any_ngt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__a, __b, &__cc);
return __cc != 0;
@@ -5558,7 +5626,7 @@ vec_any_ngt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_nle(vector float __a, vector float __b) {
+vec_any_nle(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchesbs(__b, __a, &__cc);
return __cc != 0;
@@ -5566,7 +5634,7 @@ vec_any_nle(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_nle(vector double __a, vector double __b) {
+vec_any_nle(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchedbs(__b, __a, &__cc);
return __cc != 0;
@@ -5576,7 +5644,7 @@ vec_any_nle(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_nlt(vector float __a, vector float __b) {
+vec_any_nlt(__vector float __a, __vector float __b) {
int __cc;
__builtin_s390_vfchsbs(__b, __a, &__cc);
return __cc != 0;
@@ -5584,7 +5652,7 @@ vec_any_nlt(vector float __a, vector float __b) {
#endif
static inline __ATTRS_o_ai int
-vec_any_nlt(vector double __a, vector double __b) {
+vec_any_nlt(__vector double __a, __vector double __b) {
int __cc;
__builtin_s390_vfchdbs(__b, __a, &__cc);
return __cc != 0;
@@ -5594,7 +5662,7 @@ vec_any_nlt(vector double __a, vector double __b) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_nan(vector float __a) {
+vec_any_nan(__vector float __a) {
int __cc;
__builtin_s390_vftcisb(__a, 15, &__cc);
return __cc != 3;
@@ -5602,7 +5670,7 @@ vec_any_nan(vector float __a) {
#endif
static inline __ATTRS_o_ai int
-vec_any_nan(vector double __a) {
+vec_any_nan(__vector double __a) {
int __cc;
__builtin_s390_vftcidb(__a, 15, &__cc);
return __cc != 3;
@@ -5612,7 +5680,7 @@ vec_any_nan(vector double __a) {
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_any_numeric(vector float __a) {
+vec_any_numeric(__vector float __a) {
int __cc;
__builtin_s390_vftcisb(__a, 15, &__cc);
return __cc != 0;
@@ -5620,7 +5688,7 @@ vec_any_numeric(vector float __a) {
#endif
static inline __ATTRS_o_ai int
-vec_any_numeric(vector double __a) {
+vec_any_numeric(__vector double __a) {
int __cc;
__builtin_s390_vftcidb(__a, 15, &__cc);
return __cc != 0;
@@ -5628,2389 +5696,2393 @@ vec_any_numeric(vector double __a) {
/*-- vec_andc ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_andc(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_andc(__vector __bool char __a, __vector __bool char __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector signed char __a, __vector signed char __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector bool char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector __bool char __a, __vector signed char __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector signed char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector signed char __a, __vector __bool char __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector unsigned char __a, __vector unsigned char __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector bool char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector __bool char __a, __vector unsigned char __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector unsigned char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector unsigned char __a, __vector __bool char __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector bool short
-vec_andc(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_andc(__vector __bool short __a, __vector __bool short __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector signed short __a, __vector signed short __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector bool short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector __bool short __a, __vector signed short __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector signed short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector signed short __a, __vector __bool short __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector unsigned short __a, __vector unsigned short __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector bool short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector __bool short __a, __vector unsigned short __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector unsigned short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector unsigned short __a, __vector __bool short __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector bool int
-vec_andc(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_andc(__vector __bool int __a, __vector __bool int __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector signed int __a, __vector signed int __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector bool int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector __bool int __a, __vector signed int __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector signed int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector signed int __a, __vector __bool int __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector unsigned int __a, __vector unsigned int __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector bool int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector __bool int __a, __vector unsigned int __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector unsigned int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector unsigned int __a, __vector __bool int __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector bool long long
-vec_andc(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_andc(__vector __bool long long __a, __vector __bool long long __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector signed long long __a, __vector signed long long __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector bool long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector __bool long long __a, __vector signed long long __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector signed long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector signed long long __a, __vector __bool long long __b) {
return __a & ~__b;
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector unsigned long long __a, __vector unsigned long long __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector bool long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector __bool long long __a, __vector unsigned long long __b) {
return __a & ~__b;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector unsigned long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector unsigned long long __a, __vector __bool long long __b) {
return __a & ~__b;
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_andc(vector float __a, vector float __b) {
- return (vector float)((vector unsigned int)__a &
- ~(vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_andc(__vector float __a, __vector float __b) {
+ return (__vector float)((__vector unsigned int)__a &
+ ~(__vector unsigned int)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_andc(vector double __a, vector double __b) {
- return (vector double)((vector unsigned long long)__a &
- ~(vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector double __a, __vector double __b) {
+ return (__vector double)((__vector unsigned long long)__a &
+ ~(__vector unsigned long long)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_andc(vector bool long long __a, vector double __b) {
- return (vector double)((vector unsigned long long)__a &
- ~(vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector __bool long long __a, __vector double __b) {
+ return (__vector double)((__vector unsigned long long)__a &
+ ~(__vector unsigned long long)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_andc(vector double __a, vector bool long long __b) {
- return (vector double)((vector unsigned long long)__a &
- ~(vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector double __a, __vector __bool long long __b) {
+ return (__vector double)((__vector unsigned long long)__a &
+ ~(__vector unsigned long long)__b);
}
/*-- vec_nor ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_nor(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_nor(__vector __bool char __a, __vector __bool char __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector signed char __a, __vector signed char __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector bool char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector __bool char __a, __vector signed char __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector signed char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector signed char __a, __vector __bool char __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector unsigned char __a, __vector unsigned char __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector bool char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector __bool char __a, __vector unsigned char __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector unsigned char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector unsigned char __a, __vector __bool char __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_nor(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_nor(__vector __bool short __a, __vector __bool short __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector signed short __a, __vector signed short __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector bool short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector __bool short __a, __vector signed short __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector signed short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector signed short __a, __vector __bool short __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector unsigned short __a, __vector unsigned short __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector bool short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector __bool short __a, __vector unsigned short __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector unsigned short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector unsigned short __a, __vector __bool short __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_nor(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_nor(__vector __bool int __a, __vector __bool int __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector signed int __a, __vector signed int __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector bool int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector __bool int __a, __vector signed int __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector signed int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector signed int __a, __vector __bool int __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector unsigned int __a, __vector unsigned int __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector bool int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector __bool int __a, __vector unsigned int __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector unsigned int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector unsigned int __a, __vector __bool int __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_nor(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_nor(__vector __bool long long __a, __vector __bool long long __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector signed long long __a, __vector signed long long __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector bool long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector __bool long long __a, __vector signed long long __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector signed long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector signed long long __a, __vector __bool long long __b) {
return ~(__a | __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector unsigned long long __a, __vector unsigned long long __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector bool long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector __bool long long __a, __vector unsigned long long __b) {
return ~(__a | __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector unsigned long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector unsigned long long __a, __vector __bool long long __b) {
return ~(__a | __b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nor(vector float __a, vector float __b) {
- return (vector float)~((vector unsigned int)__a |
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_nor(__vector float __a, __vector float __b) {
+ return (__vector float)~((__vector unsigned int)__a |
+ (__vector unsigned int)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_nor(vector double __a, vector double __b) {
- return (vector double)~((vector unsigned long long)__a |
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector double __a, __vector double __b) {
+ return (__vector double)~((__vector unsigned long long)__a |
+ (__vector unsigned long long)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_nor(vector bool long long __a, vector double __b) {
- return (vector double)~((vector unsigned long long)__a |
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector __bool long long __a, __vector double __b) {
+ return (__vector double)~((__vector unsigned long long)__a |
+ (__vector unsigned long long)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_nor(vector double __a, vector bool long long __b) {
- return (vector double)~((vector unsigned long long)__a |
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector double __a, __vector __bool long long __b) {
+ return (__vector double)~((__vector unsigned long long)__a |
+ (__vector unsigned long long)__b);
}
/*-- vec_orc ----------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_orc(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_orc(__vector __bool char __a, __vector __bool char __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector signed char
-vec_orc(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_orc(__vector signed char __a, __vector signed char __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_orc(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_orc(__vector unsigned char __a, __vector unsigned char __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector bool short
-vec_orc(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_orc(__vector __bool short __a, __vector __bool short __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector signed short
-vec_orc(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_orc(__vector signed short __a, __vector signed short __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_orc(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_orc(__vector unsigned short __a, __vector unsigned short __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector bool int
-vec_orc(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_orc(__vector __bool int __a, __vector __bool int __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector signed int
-vec_orc(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_orc(__vector signed int __a, __vector signed int __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_orc(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_orc(__vector unsigned int __a, __vector unsigned int __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector bool long long
-vec_orc(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_orc(__vector __bool long long __a, __vector __bool long long __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector signed long long
-vec_orc(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_orc(__vector signed long long __a, __vector signed long long __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_orc(__vector unsigned long long __a, __vector unsigned long long __b) {
return __a | ~__b;
}
-static inline __ATTRS_o_ai vector float
-vec_orc(vector float __a, vector float __b) {
- return (vector float)((vector unsigned int)__a |
- ~(vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_orc(__vector float __a, __vector float __b) {
+ return (__vector float)((__vector unsigned int)__a |
+ ~(__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_orc(vector double __a, vector double __b) {
- return (vector double)((vector unsigned long long)__a |
- ~(vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_orc(__vector double __a, __vector double __b) {
+ return (__vector double)((__vector unsigned long long)__a |
+ ~(__vector unsigned long long)__b);
}
#endif
/*-- vec_nand ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_nand(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_nand(__vector __bool char __a, __vector __bool char __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_nand(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_nand(__vector signed char __a, __vector signed char __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_nand(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nand(__vector unsigned char __a, __vector unsigned char __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_nand(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_nand(__vector __bool short __a, __vector __bool short __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_nand(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_nand(__vector signed short __a, __vector signed short __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_nand(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nand(__vector unsigned short __a, __vector unsigned short __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_nand(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_nand(__vector __bool int __a, __vector __bool int __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_nand(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_nand(__vector signed int __a, __vector signed int __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_nand(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nand(__vector unsigned int __a, __vector unsigned int __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_nand(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_nand(__vector __bool long long __a, __vector __bool long long __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_nand(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_nand(__vector signed long long __a, __vector signed long long __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nand(__vector unsigned long long __a, __vector unsigned long long __b) {
return ~(__a & __b);
}
-static inline __ATTRS_o_ai vector float
-vec_nand(vector float __a, vector float __b) {
- return (vector float)~((vector unsigned int)__a &
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_nand(__vector float __a, __vector float __b) {
+ return (__vector float)~((__vector unsigned int)__a &
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_nand(vector double __a, vector double __b) {
- return (vector double)~((vector unsigned long long)__a &
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_nand(__vector double __a, __vector double __b) {
+ return (__vector double)~((__vector unsigned long long)__a &
+ (__vector unsigned long long)__b);
}
#endif
/*-- vec_eqv ----------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_eqv(vector bool char __a, vector bool char __b) {
+static inline __ATTRS_o_ai __vector __bool char
+vec_eqv(__vector __bool char __a, __vector __bool char __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_eqv(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_eqv(__vector signed char __a, __vector signed char __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_eqv(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_eqv(__vector unsigned char __a, __vector unsigned char __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector bool short
-vec_eqv(vector bool short __a, vector bool short __b) {
+static inline __ATTRS_o_ai __vector __bool short
+vec_eqv(__vector __bool short __a, __vector __bool short __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_eqv(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_eqv(__vector signed short __a, __vector signed short __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_eqv(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_eqv(__vector unsigned short __a, __vector unsigned short __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector bool int
-vec_eqv(vector bool int __a, vector bool int __b) {
+static inline __ATTRS_o_ai __vector __bool int
+vec_eqv(__vector __bool int __a, __vector __bool int __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_eqv(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_eqv(__vector signed int __a, __vector signed int __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_eqv(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_eqv(__vector unsigned int __a, __vector unsigned int __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector bool long long
-vec_eqv(vector bool long long __a, vector bool long long __b) {
+static inline __ATTRS_o_ai __vector __bool long long
+vec_eqv(__vector __bool long long __a, __vector __bool long long __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_eqv(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_eqv(__vector signed long long __a, __vector signed long long __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_eqv(__vector unsigned long long __a, __vector unsigned long long __b) {
return ~(__a ^ __b);
}
-static inline __ATTRS_o_ai vector float
-vec_eqv(vector float __a, vector float __b) {
- return (vector float)~((vector unsigned int)__a ^
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector float
+vec_eqv(__vector float __a, __vector float __b) {
+ return (__vector float)~((__vector unsigned int)__a ^
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_eqv(vector double __a, vector double __b) {
- return (vector double)~((vector unsigned long long)__a ^
- (vector unsigned long long)__b);
+static inline __ATTRS_o_ai __vector double
+vec_eqv(__vector double __a, __vector double __b) {
+ return (__vector double)~((__vector unsigned long long)__a ^
+ (__vector unsigned long long)__b);
}
#endif
/*-- vec_cntlz --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cntlz(vector signed char __a) {
- return __builtin_s390_vclzb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cntlz(__vector signed char __a) {
+ return __builtin_s390_vclzb((__vector unsigned char)__a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cntlz(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cntlz(__vector unsigned char __a) {
return __builtin_s390_vclzb(__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cntlz(vector signed short __a) {
- return __builtin_s390_vclzh((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cntlz(__vector signed short __a) {
+ return __builtin_s390_vclzh((__vector unsigned short)__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cntlz(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cntlz(__vector unsigned short __a) {
return __builtin_s390_vclzh(__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cntlz(vector signed int __a) {
- return __builtin_s390_vclzf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cntlz(__vector signed int __a) {
+ return __builtin_s390_vclzf((__vector unsigned int)__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cntlz(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cntlz(__vector unsigned int __a) {
return __builtin_s390_vclzf(__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cntlz(vector signed long long __a) {
- return __builtin_s390_vclzg((vector unsigned long long)__a);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cntlz(__vector signed long long __a) {
+ return __builtin_s390_vclzg((__vector unsigned long long)__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cntlz(vector unsigned long long __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cntlz(__vector unsigned long long __a) {
return __builtin_s390_vclzg(__a);
}
/*-- vec_cnttz --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cnttz(vector signed char __a) {
- return __builtin_s390_vctzb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cnttz(__vector signed char __a) {
+ return __builtin_s390_vctzb((__vector unsigned char)__a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cnttz(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cnttz(__vector unsigned char __a) {
return __builtin_s390_vctzb(__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cnttz(vector signed short __a) {
- return __builtin_s390_vctzh((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cnttz(__vector signed short __a) {
+ return __builtin_s390_vctzh((__vector unsigned short)__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cnttz(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cnttz(__vector unsigned short __a) {
return __builtin_s390_vctzh(__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cnttz(vector signed int __a) {
- return __builtin_s390_vctzf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cnttz(__vector signed int __a) {
+ return __builtin_s390_vctzf((__vector unsigned int)__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cnttz(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cnttz(__vector unsigned int __a) {
return __builtin_s390_vctzf(__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cnttz(vector signed long long __a) {
- return __builtin_s390_vctzg((vector unsigned long long)__a);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cnttz(__vector signed long long __a) {
+ return __builtin_s390_vctzg((__vector unsigned long long)__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cnttz(vector unsigned long long __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cnttz(__vector unsigned long long __a) {
return __builtin_s390_vctzg(__a);
}
/*-- vec_popcnt -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_popcnt(vector signed char __a) {
- return __builtin_s390_vpopctb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_popcnt(__vector signed char __a) {
+ return __builtin_s390_vpopctb((__vector unsigned char)__a);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_popcnt(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_popcnt(__vector unsigned char __a) {
return __builtin_s390_vpopctb(__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_popcnt(vector signed short __a) {
- return __builtin_s390_vpopcth((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_popcnt(__vector signed short __a) {
+ return __builtin_s390_vpopcth((__vector unsigned short)__a);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_popcnt(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_popcnt(__vector unsigned short __a) {
return __builtin_s390_vpopcth(__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_popcnt(vector signed int __a) {
- return __builtin_s390_vpopctf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_popcnt(__vector signed int __a) {
+ return __builtin_s390_vpopctf((__vector unsigned int)__a);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_popcnt(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_popcnt(__vector unsigned int __a) {
return __builtin_s390_vpopctf(__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_popcnt(vector signed long long __a) {
- return __builtin_s390_vpopctg((vector unsigned long long)__a);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_popcnt(__vector signed long long __a) {
+ return __builtin_s390_vpopctg((__vector unsigned long long)__a);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_popcnt(vector unsigned long long __a) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_popcnt(__vector unsigned long long __a) {
return __builtin_s390_vpopctg(__a);
}
/*-- vec_rl -----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_rl(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_verllvb(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_rl(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_verllvb(
+ (__vector unsigned char)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_rl(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_rl(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_verllvb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_rl(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_verllvh(
- (vector unsigned short)__a, __b);
+static inline __ATTRS_o_ai __vector signed short
+vec_rl(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_verllvh(
+ (__vector unsigned short)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_rl(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_rl(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_verllvh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_rl(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_verllvf(
- (vector unsigned int)__a, __b);
+static inline __ATTRS_o_ai __vector signed int
+vec_rl(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_verllvf(
+ (__vector unsigned int)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_rl(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_rl(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_verllvf(__a, __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_rl(vector signed long long __a, vector unsigned long long __b) {
- return (vector signed long long)__builtin_s390_verllvg(
- (vector unsigned long long)__a, __b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_rl(__vector signed long long __a, __vector unsigned long long __b) {
+ return (__vector signed long long)__builtin_s390_verllvg(
+ (__vector unsigned long long)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_rl(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_verllvg(__a, __b);
}
/*-- vec_rli ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_rli(vector signed char __a, unsigned long __b) {
- return (vector signed char)__builtin_s390_verllb(
- (vector unsigned char)__a, (int)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_rli(__vector signed char __a, unsigned long __b) {
+ return (__vector signed char)__builtin_s390_verllb(
+ (__vector unsigned char)__a, (int)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_rli(vector unsigned char __a, unsigned long __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_rli(__vector unsigned char __a, unsigned long __b) {
return __builtin_s390_verllb(__a, (int)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_rli(vector signed short __a, unsigned long __b) {
- return (vector signed short)__builtin_s390_verllh(
- (vector unsigned short)__a, (int)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_rli(__vector signed short __a, unsigned long __b) {
+ return (__vector signed short)__builtin_s390_verllh(
+ (__vector unsigned short)__a, (int)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_rli(vector unsigned short __a, unsigned long __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_rli(__vector unsigned short __a, unsigned long __b) {
return __builtin_s390_verllh(__a, (int)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_rli(vector signed int __a, unsigned long __b) {
- return (vector signed int)__builtin_s390_verllf(
- (vector unsigned int)__a, (int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_rli(__vector signed int __a, unsigned long __b) {
+ return (__vector signed int)__builtin_s390_verllf(
+ (__vector unsigned int)__a, (int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_rli(vector unsigned int __a, unsigned long __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_rli(__vector unsigned int __a, unsigned long __b) {
return __builtin_s390_verllf(__a, (int)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_rli(vector signed long long __a, unsigned long __b) {
- return (vector signed long long)__builtin_s390_verllg(
- (vector unsigned long long)__a, (int)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_rli(__vector signed long long __a, unsigned long __b) {
+ return (__vector signed long long)__builtin_s390_verllg(
+ (__vector unsigned long long)__a, (int)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_rli(vector unsigned long long __a, unsigned long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_rli(__vector unsigned long long __a, unsigned long __b) {
return __builtin_s390_verllg(__a, (int)__b);
}
/*-- vec_rl_mask ------------------------------------------------------------*/
-extern __ATTRS_o vector signed char
-vec_rl_mask(vector signed char __a, vector unsigned char __b,
+extern __ATTRS_o __vector signed char
+vec_rl_mask(__vector signed char __a, __vector unsigned char __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector unsigned char
-vec_rl_mask(vector unsigned char __a, vector unsigned char __b,
+extern __ATTRS_o __vector unsigned char
+vec_rl_mask(__vector unsigned char __a, __vector unsigned char __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector signed short
-vec_rl_mask(vector signed short __a, vector unsigned short __b,
+extern __ATTRS_o __vector signed short
+vec_rl_mask(__vector signed short __a, __vector unsigned short __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector unsigned short
-vec_rl_mask(vector unsigned short __a, vector unsigned short __b,
+extern __ATTRS_o __vector unsigned short
+vec_rl_mask(__vector unsigned short __a, __vector unsigned short __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector signed int
-vec_rl_mask(vector signed int __a, vector unsigned int __b,
+extern __ATTRS_o __vector signed int
+vec_rl_mask(__vector signed int __a, __vector unsigned int __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector unsigned int
-vec_rl_mask(vector unsigned int __a, vector unsigned int __b,
+extern __ATTRS_o __vector unsigned int
+vec_rl_mask(__vector unsigned int __a, __vector unsigned int __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector signed long long
-vec_rl_mask(vector signed long long __a, vector unsigned long long __b,
+extern __ATTRS_o __vector signed long long
+vec_rl_mask(__vector signed long long __a, __vector unsigned long long __b,
unsigned char __c) __constant(__c);
-extern __ATTRS_o vector unsigned long long
-vec_rl_mask(vector unsigned long long __a, vector unsigned long long __b,
+extern __ATTRS_o __vector unsigned long long
+vec_rl_mask(__vector unsigned long long __a, __vector unsigned long long __b,
unsigned char __c) __constant(__c);
#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
__extension__ ({ \
- vector unsigned char __res; \
- vector unsigned char __x = (vector unsigned char)(X); \
- vector unsigned char __y = (vector unsigned char)(Y); \
+ __vector unsigned char __res; \
+ __vector unsigned char __x = (__vector unsigned char)(X); \
+ __vector unsigned char __y = (__vector unsigned char)(Y); \
switch (sizeof ((X)[0])) { \
- case 1: __res = (vector unsigned char) __builtin_s390_verimb( \
- (vector unsigned char)__x, (vector unsigned char)__x, \
- (vector unsigned char)__y, (Z)); break; \
- case 2: __res = (vector unsigned char) __builtin_s390_verimh( \
- (vector unsigned short)__x, (vector unsigned short)__x, \
- (vector unsigned short)__y, (Z)); break; \
- case 4: __res = (vector unsigned char) __builtin_s390_verimf( \
- (vector unsigned int)__x, (vector unsigned int)__x, \
- (vector unsigned int)__y, (Z)); break; \
- default: __res = (vector unsigned char) __builtin_s390_verimg( \
- (vector unsigned long long)__x, (vector unsigned long long)__x, \
- (vector unsigned long long)__y, (Z)); break; \
+ case 1: __res = (__vector unsigned char) __builtin_s390_verimb( \
+ (__vector unsigned char)__x, (__vector unsigned char)__x, \
+ (__vector unsigned char)__y, (Z)); break; \
+ case 2: __res = (__vector unsigned char) __builtin_s390_verimh( \
+ (__vector unsigned short)__x, (__vector unsigned short)__x, \
+ (__vector unsigned short)__y, (Z)); break; \
+ case 4: __res = (__vector unsigned char) __builtin_s390_verimf( \
+ (__vector unsigned int)__x, (__vector unsigned int)__x, \
+ (__vector unsigned int)__y, (Z)); break; \
+ default: __res = (__vector unsigned char) __builtin_s390_verimg( \
+ (__vector unsigned long long)__x, (__vector unsigned long long)__x, \
+ (__vector unsigned long long)__y, (Z)); break; \
} __res; }))
/*-- vec_sll ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned short __b) {
- return (vector signed char)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned short __b) {
+ return (__vector signed char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned int __b) {
- return (vector signed char)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned int __b) {
+ return (__vector signed char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned short __b) {
- return (vector bool char)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned short __b) {
+ return (__vector __bool char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned int __b) {
- return (vector bool char)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned int __b) {
+ return (__vector __bool char)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsl(__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned short __b) {
- return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned short __b) {
+ return __builtin_s390_vsl(__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned int __b) {
- return __builtin_s390_vsl(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned int __b) {
+ return __builtin_s390_vsl(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned char __b) {
- return (vector signed short)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned char __b) {
+ return (__vector signed short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned int __b) {
- return (vector signed short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned int __b) {
+ return (__vector signed short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned char __b) {
- return (vector bool short)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned char __b) {
+ return (__vector __bool short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned int __b) {
- return (vector bool short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned int __b) {
+ return (__vector __bool short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned char __b) {
- return (vector unsigned short)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned char __b) {
+ return (__vector unsigned short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned int __b) {
- return (vector unsigned short)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned int __b) {
+ return (__vector unsigned short)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned char __b) {
- return (vector signed int)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned char __b) {
+ return (__vector signed int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned short __b) {
- return (vector signed int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned short __b) {
+ return (__vector signed int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned char __b) {
- return (vector bool int)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned char __b) {
+ return (__vector __bool int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned short __b) {
- return (vector bool int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned short __b) {
+ return (__vector __bool int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned char __b) {
- return (vector unsigned int)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned char __b) {
+ return (__vector unsigned int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned short __b) {
- return (vector unsigned int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned short __b) {
+ return (__vector unsigned int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned char __b) {
- return (vector signed long long)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned char __b) {
+ return (__vector signed long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned short __b) {
- return (vector signed long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned short __b) {
+ return (__vector signed long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned int __b) {
- return (vector signed long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned int __b) {
+ return (__vector signed long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned char __b) {
- return (vector bool long long)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned char __b) {
+ return (__vector __bool long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned short __b) {
- return (vector bool long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned short __b) {
+ return (__vector __bool long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned int __b) {
- return (vector bool long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned int __b) {
+ return (__vector __bool long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned char __b) {
- return (vector unsigned long long)__builtin_s390_vsl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned char __b) {
+ return (__vector unsigned long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned short __b) {
- return (vector unsigned long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned short __b) {
+ return (__vector unsigned long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned int __b) {
- return (vector unsigned long long)__builtin_s390_vsl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned int __b) {
+ return (__vector unsigned long long)__builtin_s390_vsl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_slb ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_slb(vector signed char __a, vector signed char __b) {
- return (vector signed char)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_slb(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_slb(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vslb(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_slb(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vslb(
+ (__vector unsigned char)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_slb(vector unsigned char __a, vector signed char __b) {
- return __builtin_s390_vslb(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_slb(__vector unsigned char __a, __vector signed char __b) {
+ return __builtin_s390_vslb(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_slb(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_slb(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vslb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_slb(vector signed short __a, vector signed short __b) {
- return (vector signed short)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_slb(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_slb(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_slb(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_slb(vector unsigned short __a, vector signed short __b) {
- return (vector unsigned short)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_slb(__vector unsigned short __a, __vector signed short __b) {
+ return (__vector unsigned short)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_slb(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_slb(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_slb(vector signed int __a, vector signed int __b) {
- return (vector signed int)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_slb(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_slb(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_slb(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_slb(vector unsigned int __a, vector signed int __b) {
- return (vector unsigned int)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_slb(__vector unsigned int __a, __vector signed int __b) {
+ return (__vector unsigned int)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_slb(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_slb(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_slb(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_slb(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_slb(vector signed long long __a, vector unsigned long long __b) {
- return (vector signed long long)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_slb(__vector signed long long __a, __vector unsigned long long __b) {
+ return (__vector signed long long)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_slb(vector unsigned long long __a, vector signed long long __b) {
- return (vector unsigned long long)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_slb(__vector unsigned long long __a, __vector signed long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_slb(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_slb(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_slb(vector float __a, vector signed int __b) {
- return (vector float)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_slb(__vector float __a, __vector signed int __b) {
+ return (__vector float)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector float
-vec_slb(vector float __a, vector unsigned int __b) {
- return (vector float)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_slb(__vector float __a, __vector unsigned int __b) {
+ return (__vector float)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_slb(vector double __a, vector signed long long __b) {
- return (vector double)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_slb(__vector double __a, __vector signed long long __b) {
+ return (__vector double)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_slb(vector double __a, vector unsigned long long __b) {
- return (vector double)__builtin_s390_vslb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_slb(__vector double __a, __vector unsigned long long __b) {
+ return (__vector double)__builtin_s390_vslb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_sld ----------------------------------------------------------------*/
-extern __ATTRS_o vector signed char
-vec_sld(vector signed char __a, vector signed char __b, int __c)
+extern __ATTRS_o __vector signed char
+vec_sld(__vector signed char __a, __vector signed char __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector bool char
-vec_sld(vector bool char __a, vector bool char __b, int __c)
+extern __ATTRS_o __vector __bool char
+vec_sld(__vector __bool char __a, __vector __bool char __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector unsigned char
-vec_sld(vector unsigned char __a, vector unsigned char __b, int __c)
+extern __ATTRS_o __vector unsigned char
+vec_sld(__vector unsigned char __a, __vector unsigned char __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector signed short
-vec_sld(vector signed short __a, vector signed short __b, int __c)
+extern __ATTRS_o __vector signed short
+vec_sld(__vector signed short __a, __vector signed short __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector bool short
-vec_sld(vector bool short __a, vector bool short __b, int __c)
+extern __ATTRS_o __vector __bool short
+vec_sld(__vector __bool short __a, __vector __bool short __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector unsigned short
-vec_sld(vector unsigned short __a, vector unsigned short __b, int __c)
+extern __ATTRS_o __vector unsigned short
+vec_sld(__vector unsigned short __a, __vector unsigned short __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector signed int
-vec_sld(vector signed int __a, vector signed int __b, int __c)
+extern __ATTRS_o __vector signed int
+vec_sld(__vector signed int __a, __vector signed int __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector bool int
-vec_sld(vector bool int __a, vector bool int __b, int __c)
+extern __ATTRS_o __vector __bool int
+vec_sld(__vector __bool int __a, __vector __bool int __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector unsigned int
-vec_sld(vector unsigned int __a, vector unsigned int __b, int __c)
+extern __ATTRS_o __vector unsigned int
+vec_sld(__vector unsigned int __a, __vector unsigned int __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector signed long long
-vec_sld(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_sld(__vector signed long long __a, __vector signed long long __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector bool long long
-vec_sld(vector bool long long __a, vector bool long long __b, int __c)
+extern __ATTRS_o __vector __bool long long
+vec_sld(__vector __bool long long __a, __vector __bool long long __b, int __c)
__constant_range(__c, 0, 15);
-extern __ATTRS_o vector unsigned long long
-vec_sld(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_sld(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 15);
#if __ARCH__ >= 12
-extern __ATTRS_o vector float
-vec_sld(vector float __a, vector float __b, int __c)
+extern __ATTRS_o __vector float
+vec_sld(__vector float __a, __vector float __b, int __c)
__constant_range(__c, 0, 15);
#endif
-extern __ATTRS_o vector double
-vec_sld(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_sld(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 15);
#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
- __builtin_s390_vsldb((vector unsigned char)(X), \
- (vector unsigned char)(Y), (Z)))
+ __builtin_s390_vsldb((__vector unsigned char)(X), \
+ (__vector unsigned char)(Y), (Z)))
/*-- vec_sldw ---------------------------------------------------------------*/
-extern __ATTRS_o vector signed char
-vec_sldw(vector signed char __a, vector signed char __b, int __c)
+extern __ATTRS_o __vector signed char
+vec_sldw(__vector signed char __a, __vector signed char __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector unsigned char
-vec_sldw(vector unsigned char __a, vector unsigned char __b, int __c)
+extern __ATTRS_o __vector unsigned char
+vec_sldw(__vector unsigned char __a, __vector unsigned char __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector signed short
-vec_sldw(vector signed short __a, vector signed short __b, int __c)
+extern __ATTRS_o __vector signed short
+vec_sldw(__vector signed short __a, __vector signed short __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector unsigned short
-vec_sldw(vector unsigned short __a, vector unsigned short __b, int __c)
+extern __ATTRS_o __vector unsigned short
+vec_sldw(__vector unsigned short __a, __vector unsigned short __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector signed int
-vec_sldw(vector signed int __a, vector signed int __b, int __c)
+extern __ATTRS_o __vector signed int
+vec_sldw(__vector signed int __a, __vector signed int __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector unsigned int
-vec_sldw(vector unsigned int __a, vector unsigned int __b, int __c)
+extern __ATTRS_o __vector unsigned int
+vec_sldw(__vector unsigned int __a, __vector unsigned int __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector signed long long
-vec_sldw(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_sldw(__vector signed long long __a, __vector signed long long __b, int __c)
__constant_range(__c, 0, 3);
-extern __ATTRS_o vector unsigned long long
-vec_sldw(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_sldw(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 3);
// This prototype is deprecated.
-extern __ATTRS_o vector double
-vec_sldw(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_sldw(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 3);
#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
- __builtin_s390_vsldb((vector unsigned char)(X), \
- (vector unsigned char)(Y), (Z) * 4))
+ __builtin_s390_vsldb((__vector unsigned char)(X), \
+ (__vector unsigned char)(Y), (Z) * 4))
/*-- vec_sldb ---------------------------------------------------------------*/
#if __ARCH__ >= 13
-extern __ATTRS_o vector signed char
-vec_sldb(vector signed char __a, vector signed char __b, int __c)
+extern __ATTRS_o __vector signed char
+vec_sldb(__vector signed char __a, __vector signed char __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned char
-vec_sldb(vector unsigned char __a, vector unsigned char __b, int __c)
+extern __ATTRS_o __vector unsigned char
+vec_sldb(__vector unsigned char __a, __vector unsigned char __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed short
-vec_sldb(vector signed short __a, vector signed short __b, int __c)
+extern __ATTRS_o __vector signed short
+vec_sldb(__vector signed short __a, __vector signed short __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned short
-vec_sldb(vector unsigned short __a, vector unsigned short __b, int __c)
+extern __ATTRS_o __vector unsigned short
+vec_sldb(__vector unsigned short __a, __vector unsigned short __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed int
-vec_sldb(vector signed int __a, vector signed int __b, int __c)
+extern __ATTRS_o __vector signed int
+vec_sldb(__vector signed int __a, __vector signed int __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned int
-vec_sldb(vector unsigned int __a, vector unsigned int __b, int __c)
+extern __ATTRS_o __vector unsigned int
+vec_sldb(__vector unsigned int __a, __vector unsigned int __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed long long
-vec_sldb(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_sldb(__vector signed long long __a, __vector signed long long __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned long long
-vec_sldb(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_sldb(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector float
-vec_sldb(vector float __a, vector float __b, int __c)
+extern __ATTRS_o __vector float
+vec_sldb(__vector float __a, __vector float __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector double
-vec_sldb(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_sldb(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 7);
#define vec_sldb(X, Y, Z) ((__typeof__((vec_sldb)((X), (Y), (Z)))) \
- __builtin_s390_vsld((vector unsigned char)(X), \
- (vector unsigned char)(Y), (Z)))
+ __builtin_s390_vsld((__vector unsigned char)(X), \
+ (__vector unsigned char)(Y), (Z)))
#endif
/*-- vec_sral ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned short __b) {
- return (vector signed char)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned short __b) {
+ return (__vector signed char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned int __b) {
- return (vector signed char)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned int __b) {
+ return (__vector signed char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned short __b) {
- return (vector bool char)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned short __b) {
+ return (__vector __bool char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned int __b) {
- return (vector bool char)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned int __b) {
+ return (__vector __bool char)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsra(__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned short __b) {
- return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned short __b) {
+ return __builtin_s390_vsra(__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned int __b) {
- return __builtin_s390_vsra(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned int __b) {
+ return __builtin_s390_vsra(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned char __b) {
- return (vector signed short)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned char __b) {
+ return (__vector signed short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned int __b) {
- return (vector signed short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned int __b) {
+ return (__vector signed short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned char __b) {
- return (vector bool short)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned char __b) {
+ return (__vector __bool short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned int __b) {
- return (vector bool short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned int __b) {
+ return (__vector __bool short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned char __b) {
- return (vector unsigned short)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned char __b) {
+ return (__vector unsigned short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned int __b) {
- return (vector unsigned short)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned int __b) {
+ return (__vector unsigned short)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned char __b) {
- return (vector signed int)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned char __b) {
+ return (__vector signed int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned short __b) {
- return (vector signed int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned short __b) {
+ return (__vector signed int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned char __b) {
- return (vector bool int)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned char __b) {
+ return (__vector __bool int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned short __b) {
- return (vector bool int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned short __b) {
+ return (__vector __bool int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned char __b) {
- return (vector unsigned int)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned char __b) {
+ return (__vector unsigned int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned short __b) {
- return (vector unsigned int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned short __b) {
+ return (__vector unsigned int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned char __b) {
- return (vector signed long long)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned char __b) {
+ return (__vector signed long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned short __b) {
- return (vector signed long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned short __b) {
+ return (__vector signed long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned int __b) {
- return (vector signed long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned int __b) {
+ return (__vector signed long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned char __b) {
- return (vector bool long long)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned char __b) {
+ return (__vector __bool long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned short __b) {
- return (vector bool long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned short __b) {
+ return (__vector __bool long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned int __b) {
- return (vector bool long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned int __b) {
+ return (__vector __bool long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned char __b) {
- return (vector unsigned long long)__builtin_s390_vsra(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned char __b) {
+ return (__vector unsigned long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned short __b) {
- return (vector unsigned long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned short __b) {
+ return (__vector unsigned long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned int __b) {
- return (vector unsigned long long)__builtin_s390_vsra(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned int __b) {
+ return (__vector unsigned long long)__builtin_s390_vsra(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_srab ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_srab(vector signed char __a, vector signed char __b) {
- return (vector signed char)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srab(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_srab(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsrab(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srab(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srab(vector unsigned char __a, vector signed char __b) {
- return __builtin_s390_vsrab(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srab(__vector unsigned char __a, __vector signed char __b) {
+ return __builtin_s390_vsrab(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srab(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srab(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsrab(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srab(vector signed short __a, vector signed short __b) {
- return (vector signed short)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srab(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srab(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srab(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srab(vector unsigned short __a, vector signed short __b) {
- return (vector unsigned short)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srab(__vector unsigned short __a, __vector signed short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srab(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srab(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srab(vector signed int __a, vector signed int __b) {
- return (vector signed int)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srab(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srab(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srab(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srab(vector unsigned int __a, vector signed int __b) {
- return (vector unsigned int)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srab(__vector unsigned int __a, __vector signed int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srab(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srab(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srab(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srab(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srab(vector signed long long __a, vector unsigned long long __b) {
- return (vector signed long long)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srab(__vector signed long long __a, __vector unsigned long long __b) {
+ return (__vector signed long long)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srab(vector unsigned long long __a, vector signed long long __b) {
- return (vector unsigned long long)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srab(__vector unsigned long long __a, __vector signed long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srab(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srab(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_srab(vector float __a, vector signed int __b) {
- return (vector float)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_srab(__vector float __a, __vector signed int __b) {
+ return (__vector float)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector float
-vec_srab(vector float __a, vector unsigned int __b) {
- return (vector float)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_srab(__vector float __a, __vector unsigned int __b) {
+ return (__vector float)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_srab(vector double __a, vector signed long long __b) {
- return (vector double)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_srab(__vector double __a, __vector signed long long __b) {
+ return (__vector double)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_srab(vector double __a, vector unsigned long long __b) {
- return (vector double)__builtin_s390_vsrab(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_srab(__vector double __a, __vector unsigned long long __b) {
+ return (__vector double)__builtin_s390_vsrab(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_srl ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned short __b) {
- return (vector signed char)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned short __b) {
+ return (__vector signed char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned int __b) {
- return (vector signed char)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned int __b) {
+ return (__vector signed char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned short __b) {
- return (vector bool char)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned short __b) {
+ return (__vector __bool char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned int __b) {
- return (vector bool char)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned int __b) {
+ return (__vector __bool char)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsrl(__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned short __b) {
- return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned short __b) {
+ return __builtin_s390_vsrl(__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned int __b) {
- return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned int __b) {
+ return __builtin_s390_vsrl(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned char __b) {
- return (vector signed short)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned char __b) {
+ return (__vector signed short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned int __b) {
- return (vector signed short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned int __b) {
+ return (__vector signed short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned char __b) {
- return (vector bool short)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned char __b) {
+ return (__vector __bool short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned int __b) {
- return (vector bool short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned int __b) {
+ return (__vector __bool short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned char __b) {
- return (vector unsigned short)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned char __b) {
+ return (__vector unsigned short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned int __b) {
- return (vector unsigned short)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned int __b) {
+ return (__vector unsigned short)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned char __b) {
- return (vector signed int)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned char __b) {
+ return (__vector signed int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned short __b) {
- return (vector signed int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned short __b) {
+ return (__vector signed int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned char __b) {
- return (vector bool int)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned char __b) {
+ return (__vector __bool int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned short __b) {
- return (vector bool int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned short __b) {
+ return (__vector __bool int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned char __b) {
- return (vector unsigned int)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned char __b) {
+ return (__vector unsigned int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned short __b) {
- return (vector unsigned int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned short __b) {
+ return (__vector unsigned int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned char __b) {
- return (vector signed long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned char __b) {
+ return (__vector signed long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned short __b) {
- return (vector signed long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned short __b) {
+ return (__vector signed long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned int __b) {
- return (vector signed long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned int __b) {
+ return (__vector signed long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned char __b) {
- return (vector bool long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned char __b) {
+ return (__vector __bool long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned short __b) {
- return (vector bool long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned short __b) {
+ return (__vector __bool long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned int __b) {
- return (vector bool long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned int __b) {
+ return (__vector __bool long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned char __b) {
- return (vector unsigned long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned char __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, __b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned short __b) {
- return (vector unsigned long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned short __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned int __b) {
- return (vector unsigned long long)__builtin_s390_vsrl(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned int __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrl(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_srb ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_srb(vector signed char __a, vector signed char __b) {
- return (vector signed char)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srb(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed char
-vec_srb(vector signed char __a, vector unsigned char __b) {
- return (vector signed char)__builtin_s390_vsrlb(
- (vector unsigned char)__a, __b);
+static inline __ATTRS_o_ai __vector signed char
+vec_srb(__vector signed char __a, __vector unsigned char __b) {
+ return (__vector signed char)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srb(vector unsigned char __a, vector signed char __b) {
- return __builtin_s390_vsrlb(__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srb(__vector unsigned char __a, __vector signed char __b) {
+ return __builtin_s390_vsrlb(__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_srb(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srb(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsrlb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srb(vector signed short __a, vector signed short __b) {
- return (vector signed short)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srb(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_srb(vector signed short __a, vector unsigned short __b) {
- return (vector signed short)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_srb(__vector signed short __a, __vector unsigned short __b) {
+ return (__vector signed short)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srb(vector unsigned short __a, vector signed short __b) {
- return (vector unsigned short)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srb(__vector unsigned short __a, __vector signed short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_srb(vector unsigned short __a, vector unsigned short __b) {
- return (vector unsigned short)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srb(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector unsigned short)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srb(vector signed int __a, vector signed int __b) {
- return (vector signed int)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srb(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_srb(vector signed int __a, vector unsigned int __b) {
- return (vector signed int)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_srb(__vector signed int __a, __vector unsigned int __b) {
+ return (__vector signed int)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srb(vector unsigned int __a, vector signed int __b) {
- return (vector unsigned int)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srb(__vector unsigned int __a, __vector signed int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_srb(vector unsigned int __a, vector unsigned int __b) {
- return (vector unsigned int)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srb(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector unsigned int)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srb(vector signed long long __a, vector signed long long __b) {
- return (vector signed long long)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srb(__vector signed long long __a, __vector signed long long __b) {
+ return (__vector signed long long)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_srb(vector signed long long __a, vector unsigned long long __b) {
- return (vector signed long long)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed long long
+vec_srb(__vector signed long long __a, __vector unsigned long long __b) {
+ return (__vector signed long long)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srb(vector unsigned long long __a, vector signed long long __b) {
- return (vector unsigned long long)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srb(__vector unsigned long long __a, __vector signed long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srb(vector unsigned long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srb(__vector unsigned long long __a, __vector unsigned long long __b) {
+ return (__vector unsigned long long)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_srb(vector float __a, vector signed int __b) {
- return (vector float)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_srb(__vector float __a, __vector signed int __b) {
+ return (__vector float)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector float
-vec_srb(vector float __a, vector unsigned int __b) {
- return (vector float)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector float
+vec_srb(__vector float __a, __vector unsigned int __b) {
+ return (__vector float)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_srb(vector double __a, vector signed long long __b) {
- return (vector double)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_srb(__vector double __a, __vector signed long long __b) {
+ return (__vector double)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector double
-vec_srb(vector double __a, vector unsigned long long __b) {
- return (vector double)__builtin_s390_vsrlb(
- (vector unsigned char)__a, (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector double
+vec_srb(__vector double __a, __vector unsigned long long __b) {
+ return (__vector double)__builtin_s390_vsrlb(
+ (__vector unsigned char)__a, (__vector unsigned char)__b);
}
/*-- vec_srdb ---------------------------------------------------------------*/
#if __ARCH__ >= 13
-extern __ATTRS_o vector signed char
-vec_srdb(vector signed char __a, vector signed char __b, int __c)
+extern __ATTRS_o __vector signed char
+vec_srdb(__vector signed char __a, __vector signed char __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned char
-vec_srdb(vector unsigned char __a, vector unsigned char __b, int __c)
+extern __ATTRS_o __vector unsigned char
+vec_srdb(__vector unsigned char __a, __vector unsigned char __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed short
-vec_srdb(vector signed short __a, vector signed short __b, int __c)
+extern __ATTRS_o __vector signed short
+vec_srdb(__vector signed short __a, __vector signed short __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned short
-vec_srdb(vector unsigned short __a, vector unsigned short __b, int __c)
+extern __ATTRS_o __vector unsigned short
+vec_srdb(__vector unsigned short __a, __vector unsigned short __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed int
-vec_srdb(vector signed int __a, vector signed int __b, int __c)
+extern __ATTRS_o __vector signed int
+vec_srdb(__vector signed int __a, __vector signed int __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned int
-vec_srdb(vector unsigned int __a, vector unsigned int __b, int __c)
+extern __ATTRS_o __vector unsigned int
+vec_srdb(__vector unsigned int __a, __vector unsigned int __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector signed long long
-vec_srdb(vector signed long long __a, vector signed long long __b, int __c)
+extern __ATTRS_o __vector signed long long
+vec_srdb(__vector signed long long __a, __vector signed long long __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector unsigned long long
-vec_srdb(vector unsigned long long __a, vector unsigned long long __b, int __c)
+extern __ATTRS_o __vector unsigned long long
+vec_srdb(__vector unsigned long long __a, __vector unsigned long long __b,
+ int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector float
-vec_srdb(vector float __a, vector float __b, int __c)
+extern __ATTRS_o __vector float
+vec_srdb(__vector float __a, __vector float __b, int __c)
__constant_range(__c, 0, 7);
-extern __ATTRS_o vector double
-vec_srdb(vector double __a, vector double __b, int __c)
+extern __ATTRS_o __vector double
+vec_srdb(__vector double __a, __vector double __b, int __c)
__constant_range(__c, 0, 7);
#define vec_srdb(X, Y, Z) ((__typeof__((vec_srdb)((X), (Y), (Z)))) \
- __builtin_s390_vsrd((vector unsigned char)(X), \
- (vector unsigned char)(Y), (Z)))
+ __builtin_s390_vsrd((__vector unsigned char)(X), \
+ (__vector unsigned char)(Y), (Z)))
#endif
/*-- vec_abs ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_abs(vector signed char __a) {
- return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed char)0));
+static inline __ATTRS_o_ai __vector signed char
+vec_abs(__vector signed char __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed char)0));
}
-static inline __ATTRS_o_ai vector signed short
-vec_abs(vector signed short __a) {
- return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed short)0));
+static inline __ATTRS_o_ai __vector signed short
+vec_abs(__vector signed short __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed short)0));
}
-static inline __ATTRS_o_ai vector signed int
-vec_abs(vector signed int __a) {
- return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed int)0));
+static inline __ATTRS_o_ai __vector signed int
+vec_abs(__vector signed int __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed int)0));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_abs(vector signed long long __a) {
- return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed long long)0));
+static inline __ATTRS_o_ai __vector signed long long
+vec_abs(__vector signed long long __a) {
+ return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed long long)0));
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_abs(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_abs(__vector float __a) {
return __builtin_s390_vflpsb(__a);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_abs(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_abs(__vector double __a) {
return __builtin_s390_vflpdb(__a);
}
/*-- vec_nabs ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nabs(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_nabs(__vector float __a) {
return __builtin_s390_vflnsb(__a);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_nabs(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_nabs(__vector double __a) {
return __builtin_s390_vflndb(__a);
}
/*-- vec_max ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector signed char __a, __vector signed char __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector signed char __a, vector bool char __b) {
- vector signed char __bc = (vector signed char)__b;
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector signed char __a, __vector __bool char __b) {
+ __vector signed char __bc = (__vector signed char)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector bool char __a, vector signed char __b) {
- vector signed char __ac = (vector signed char)__a;
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector __bool char __a, __vector signed char __b) {
+ __vector signed char __ac = (__vector signed char)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector unsigned char __a, __vector unsigned char __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector unsigned char __a, vector bool char __b) {
- vector unsigned char __bc = (vector unsigned char)__b;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector unsigned char __a, __vector __bool char __b) {
+ __vector unsigned char __bc = (__vector unsigned char)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector bool char __a, vector unsigned char __b) {
- vector unsigned char __ac = (vector unsigned char)__a;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector __bool char __a, __vector unsigned char __b) {
+ __vector unsigned char __ac = (__vector unsigned char)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector signed short __a, __vector signed short __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector signed short __a, vector bool short __b) {
- vector signed short __bc = (vector signed short)__b;
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector signed short __a, __vector __bool short __b) {
+ __vector signed short __bc = (__vector signed short)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector bool short __a, vector signed short __b) {
- vector signed short __ac = (vector signed short)__a;
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector __bool short __a, __vector signed short __b) {
+ __vector signed short __ac = (__vector signed short)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector unsigned short __a, __vector unsigned short __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector unsigned short __a, vector bool short __b) {
- vector unsigned short __bc = (vector unsigned short)__b;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector unsigned short __a, __vector __bool short __b) {
+ __vector unsigned short __bc = (__vector unsigned short)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector bool short __a, vector unsigned short __b) {
- vector unsigned short __ac = (vector unsigned short)__a;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector __bool short __a, __vector unsigned short __b) {
+ __vector unsigned short __ac = (__vector unsigned short)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector signed int __a, __vector signed int __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector signed int __a, vector bool int __b) {
- vector signed int __bc = (vector signed int)__b;
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector signed int __a, __vector __bool int __b) {
+ __vector signed int __bc = (__vector signed int)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector bool int __a, vector signed int __b) {
- vector signed int __ac = (vector signed int)__a;
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector __bool int __a, __vector signed int __b) {
+ __vector signed int __ac = (__vector signed int)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector unsigned int __a, __vector unsigned int __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector unsigned int __a, vector bool int __b) {
- vector unsigned int __bc = (vector unsigned int)__b;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector unsigned int __a, __vector __bool int __b) {
+ __vector unsigned int __bc = (__vector unsigned int)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector bool int __a, vector unsigned int __b) {
- vector unsigned int __ac = (vector unsigned int)__a;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector __bool int __a, __vector unsigned int __b) {
+ __vector unsigned int __ac = (__vector unsigned int)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector signed long long __a, __vector signed long long __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector signed long long __a, vector bool long long __b) {
- vector signed long long __bc = (vector signed long long)__b;
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector signed long long __a, __vector __bool long long __b) {
+ __vector signed long long __bc = (__vector signed long long)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector bool long long __a, vector signed long long __b) {
- vector signed long long __ac = (vector signed long long)__a;
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector __bool long long __a, __vector signed long long __b) {
+ __vector signed long long __ac = (__vector signed long long)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector unsigned long long __a, __vector unsigned long long __b) {
return vec_sel(__b, __a, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector unsigned long long __a, vector bool long long __b) {
- vector unsigned long long __bc = (vector unsigned long long)__b;
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector unsigned long long __a, __vector __bool long long __b) {
+ __vector unsigned long long __bc = (__vector unsigned long long)__b;
return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector bool long long __a, vector unsigned long long __b) {
- vector unsigned long long __ac = (vector unsigned long long)__a;
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector __bool long long __a, __vector unsigned long long __b) {
+ __vector unsigned long long __ac = (__vector unsigned long long)__a;
return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_max(vector float __a, vector float __b) {
+static inline __ATTRS_o_ai __vector float
+vec_max(__vector float __a, __vector float __b) {
return __builtin_s390_vfmaxsb(__a, __b, 0);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_max(vector double __a, vector double __b) {
+static inline __ATTRS_o_ai __vector double
+vec_max(__vector double __a, __vector double __b) {
#if __ARCH__ >= 12
return __builtin_s390_vfmaxdb(__a, __b, 0);
#else
@@ -8020,167 +8092,167 @@ vec_max(vector double __a, vector double __b) {
/*-- vec_min ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector signed char __a, __vector signed char __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector signed char __a, vector bool char __b) {
- vector signed char __bc = (vector signed char)__b;
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector signed char __a, __vector __bool char __b) {
+ __vector signed char __bc = (__vector signed char)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector bool char __a, vector signed char __b) {
- vector signed char __ac = (vector signed char)__a;
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector __bool char __a, __vector signed char __b) {
+ __vector signed char __ac = (__vector signed char)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector unsigned char __a, __vector unsigned char __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector unsigned char __a, vector bool char __b) {
- vector unsigned char __bc = (vector unsigned char)__b;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector unsigned char __a, __vector __bool char __b) {
+ __vector unsigned char __bc = (__vector unsigned char)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector bool char __a, vector unsigned char __b) {
- vector unsigned char __ac = (vector unsigned char)__a;
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector __bool char __a, __vector unsigned char __b) {
+ __vector unsigned char __ac = (__vector unsigned char)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector signed short __a, __vector signed short __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector signed short __a, vector bool short __b) {
- vector signed short __bc = (vector signed short)__b;
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector signed short __a, __vector __bool short __b) {
+ __vector signed short __bc = (__vector signed short)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector bool short __a, vector signed short __b) {
- vector signed short __ac = (vector signed short)__a;
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector __bool short __a, __vector signed short __b) {
+ __vector signed short __ac = (__vector signed short)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector unsigned short __a, __vector unsigned short __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector unsigned short __a, vector bool short __b) {
- vector unsigned short __bc = (vector unsigned short)__b;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector unsigned short __a, __vector __bool short __b) {
+ __vector unsigned short __bc = (__vector unsigned short)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector bool short __a, vector unsigned short __b) {
- vector unsigned short __ac = (vector unsigned short)__a;
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector __bool short __a, __vector unsigned short __b) {
+ __vector unsigned short __ac = (__vector unsigned short)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector signed int __a, __vector signed int __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector signed int __a, vector bool int __b) {
- vector signed int __bc = (vector signed int)__b;
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector signed int __a, __vector __bool int __b) {
+ __vector signed int __bc = (__vector signed int)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector bool int __a, vector signed int __b) {
- vector signed int __ac = (vector signed int)__a;
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector __bool int __a, __vector signed int __b) {
+ __vector signed int __ac = (__vector signed int)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector unsigned int __a, __vector unsigned int __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector unsigned int __a, vector bool int __b) {
- vector unsigned int __bc = (vector unsigned int)__b;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector unsigned int __a, __vector __bool int __b) {
+ __vector unsigned int __bc = (__vector unsigned int)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector bool int __a, vector unsigned int __b) {
- vector unsigned int __ac = (vector unsigned int)__a;
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector __bool int __a, __vector unsigned int __b) {
+ __vector unsigned int __ac = (__vector unsigned int)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector signed long long __a, __vector signed long long __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector signed long long __a, vector bool long long __b) {
- vector signed long long __bc = (vector signed long long)__b;
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector signed long long __a, __vector __bool long long __b) {
+ __vector signed long long __bc = (__vector signed long long)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector bool long long __a, vector signed long long __b) {
- vector signed long long __ac = (vector signed long long)__a;
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector __bool long long __a, __vector signed long long __b) {
+ __vector signed long long __ac = (__vector signed long long)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector unsigned long long __a, __vector unsigned long long __b) {
return vec_sel(__a, __b, vec_cmpgt(__a, __b));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector unsigned long long __a, vector bool long long __b) {
- vector unsigned long long __bc = (vector unsigned long long)__b;
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector unsigned long long __a, __vector __bool long long __b) {
+ __vector unsigned long long __bc = (__vector unsigned long long)__b;
return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector bool long long __a, vector unsigned long long __b) {
- vector unsigned long long __ac = (vector unsigned long long)__a;
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector __bool long long __a, __vector unsigned long long __b) {
+ __vector unsigned long long __ac = (__vector unsigned long long)__a;
return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
}
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_min(vector float __a, vector float __b) {
+static inline __ATTRS_o_ai __vector float
+vec_min(__vector float __a, __vector float __b) {
return __builtin_s390_vfminsb(__a, __b, 0);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_min(vector double __a, vector double __b) {
+static inline __ATTRS_o_ai __vector double
+vec_min(__vector double __a, __vector double __b) {
#if __ARCH__ >= 12
return __builtin_s390_vfmindb(__a, __b, 0);
#else
@@ -8190,439 +8262,440 @@ vec_min(vector double __a, vector double __b) {
/*-- vec_add_u128 -----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned char
+vec_add_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vaq(__a, __b);
}
/*-- vec_addc ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_addc(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_addc(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vaccb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_addc(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_addc(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vacch(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_addc(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_addc(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vaccf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_addc(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_addc(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vaccg(__a, __b);
}
/*-- vec_addc_u128 ----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned char
+vec_addc_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vaccq(__a, __b);
}
/*-- vec_adde_u128 ----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_ai __vector unsigned char
+vec_adde_u128(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vacq(__a, __b, __c);
}
/*-- vec_addec_u128 ---------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_ai __vector unsigned char
+vec_addec_u128(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vacccq(__a, __b, __c);
}
/*-- vec_avg ----------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_avg(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_avg(__vector signed char __a, __vector signed char __b) {
return __builtin_s390_vavgb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_avg(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_avg(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vavgh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_avg(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_avg(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vavgf(__a, __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_avg(vector signed long long __a, vector signed long long __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_avg(__vector signed long long __a, __vector signed long long __b) {
return __builtin_s390_vavgg(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_avg(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_avg(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vavglb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_avg(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_avg(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vavglh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_avg(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_avg(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vavglf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_avg(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_avg(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vavglg(__a, __b);
}
/*-- vec_checksum -----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned int
-vec_checksum(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_ai __vector unsigned int
+vec_checksum(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vcksm(__a, __b);
}
/*-- vec_gfmsum -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned short
-vec_gfmsum(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_gfmsum(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vgfmb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_gfmsum(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gfmsum(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vgfmh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gfmsum(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gfmsum(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vgfmf(__a, __b);
}
/*-- vec_gfmsum_128 ---------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_gfmsum_128(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_gfmsum_128(__vector unsigned long long __a,
+ __vector unsigned long long __b) {
return __builtin_s390_vgfmg(__a, __b);
}
/*-- vec_gfmsum_accum -------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned short
-vec_gfmsum_accum(vector unsigned char __a, vector unsigned char __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_gfmsum_accum(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned short __c) {
return __builtin_s390_vgfmab(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_gfmsum_accum(vector unsigned short __a, vector unsigned short __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gfmsum_accum(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned int __c) {
return __builtin_s390_vgfmah(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gfmsum_accum(vector unsigned int __a, vector unsigned int __b,
- vector unsigned long long __c) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gfmsum_accum(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned long long __c) {
return __builtin_s390_vgfmaf(__a, __b, __c);
}
/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_gfmsum_accum_128(vector unsigned long long __a,
- vector unsigned long long __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_gfmsum_accum_128(__vector unsigned long long __a,
+ __vector unsigned long long __b,
+ __vector unsigned char __c) {
return __builtin_s390_vgfmag(__a, __b, __c);
}
/*-- vec_mladd --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector signed char __a, vector signed char __b,
- vector signed char __c) {
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector signed char __a, __vector signed char __b,
+ __vector signed char __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector unsigned char __a, vector signed char __b,
- vector signed char __c) {
- return (vector signed char)__a * __b + __c;
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector unsigned char __a, __vector signed char __b,
+ __vector signed char __c) {
+ return (__vector signed char)__a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector signed char __a, vector unsigned char __b,
- vector unsigned char __c) {
- return __a * (vector signed char)__b + (vector signed char)__c;
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector signed char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
+ return __a * (__vector signed char)__b + (__vector signed char)__c;
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mladd(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mladd(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector signed short __a, vector signed short __b,
- vector signed short __c) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector signed short __a, __vector signed short __b,
+ __vector signed short __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector unsigned short __a, vector signed short __b,
- vector signed short __c) {
- return (vector signed short)__a * __b + __c;
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector unsigned short __a, __vector signed short __b,
+ __vector signed short __c) {
+ return (__vector signed short)__a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector signed short __a, vector unsigned short __b,
- vector unsigned short __c) {
- return __a * (vector signed short)__b + (vector signed short)__c;
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector signed short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
+ return __a * (__vector signed short)__b + (__vector signed short)__c;
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mladd(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mladd(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector signed int __a, vector signed int __b,
- vector signed int __c) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector signed int __a, __vector signed int __b,
+ __vector signed int __c) {
return __a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector unsigned int __a, vector signed int __b,
- vector signed int __c) {
- return (vector signed int)__a * __b + __c;
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector unsigned int __a, __vector signed int __b,
+ __vector signed int __c) {
+ return (__vector signed int)__a * __b + __c;
}
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector signed int __a, vector unsigned int __b,
- vector unsigned int __c) {
- return __a * (vector signed int)__b + (vector signed int)__c;
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector signed int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
+ return __a * (__vector signed int)__b + (__vector signed int)__c;
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mladd(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mladd(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __a * __b + __c;
}
/*-- vec_mhadd --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mhadd(vector signed char __a, vector signed char __b,
- vector signed char __c) {
+static inline __ATTRS_o_ai __vector signed char
+vec_mhadd(__vector signed char __a, __vector signed char __b,
+ __vector signed char __c) {
return __builtin_s390_vmahb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mhadd(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mhadd(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vmalhb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed short
-vec_mhadd(vector signed short __a, vector signed short __b,
- vector signed short __c) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mhadd(__vector signed short __a, __vector signed short __b,
+ __vector signed short __c) {
return __builtin_s390_vmahh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mhadd(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mhadd(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vmalhh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mhadd(vector signed int __a, vector signed int __b,
- vector signed int __c) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mhadd(__vector signed int __a, __vector signed int __b,
+ __vector signed int __c) {
return __builtin_s390_vmahf(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mhadd(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mhadd(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vmalhf(__a, __b, __c);
}
/*-- vec_meadd --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_meadd(vector signed char __a, vector signed char __b,
- vector signed short __c) {
+static inline __ATTRS_o_ai __vector signed short
+vec_meadd(__vector signed char __a, __vector signed char __b,
+ __vector signed short __c) {
return __builtin_s390_vmaeb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_meadd(vector unsigned char __a, vector unsigned char __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_meadd(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned short __c) {
return __builtin_s390_vmaleb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed int
-vec_meadd(vector signed short __a, vector signed short __b,
- vector signed int __c) {
+static inline __ATTRS_o_ai __vector signed int
+vec_meadd(__vector signed short __a, __vector signed short __b,
+ __vector signed int __c) {
return __builtin_s390_vmaeh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_meadd(vector unsigned short __a, vector unsigned short __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_meadd(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned int __c) {
return __builtin_s390_vmaleh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_meadd(vector signed int __a, vector signed int __b,
- vector signed long long __c) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_meadd(__vector signed int __a, __vector signed int __b,
+ __vector signed long long __c) {
return __builtin_s390_vmaef(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_meadd(vector unsigned int __a, vector unsigned int __b,
- vector unsigned long long __c) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_meadd(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned long long __c) {
return __builtin_s390_vmalef(__a, __b, __c);
}
/*-- vec_moadd --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_moadd(vector signed char __a, vector signed char __b,
- vector signed short __c) {
+static inline __ATTRS_o_ai __vector signed short
+vec_moadd(__vector signed char __a, __vector signed char __b,
+ __vector signed short __c) {
return __builtin_s390_vmaob(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_moadd(vector unsigned char __a, vector unsigned char __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_moadd(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned short __c) {
return __builtin_s390_vmalob(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed int
-vec_moadd(vector signed short __a, vector signed short __b,
- vector signed int __c) {
+static inline __ATTRS_o_ai __vector signed int
+vec_moadd(__vector signed short __a, __vector signed short __b,
+ __vector signed int __c) {
return __builtin_s390_vmaoh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_moadd(vector unsigned short __a, vector unsigned short __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_moadd(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned int __c) {
return __builtin_s390_vmaloh(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_moadd(vector signed int __a, vector signed int __b,
- vector signed long long __c) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_moadd(__vector signed int __a, __vector signed int __b,
+ __vector signed long long __c) {
return __builtin_s390_vmaof(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_moadd(vector unsigned int __a, vector unsigned int __b,
- vector unsigned long long __c) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_moadd(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned long long __c) {
return __builtin_s390_vmalof(__a, __b, __c);
}
/*-- vec_mulh ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_mulh(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed char
+vec_mulh(__vector signed char __a, __vector signed char __b) {
return __builtin_s390_vmhb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_mulh(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mulh(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vmlhb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_mulh(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mulh(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vmhh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mulh(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mulh(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vmlhh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mulh(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mulh(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vmhf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mulh(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mulh(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vmlhf(__a, __b);
}
/*-- vec_mule ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_mule(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mule(__vector signed char __a, __vector signed char __b) {
return __builtin_s390_vmeb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mule(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mule(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vmleb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mule(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mule(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vmeh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mule(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mule(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vmleh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_mule(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_mule(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vmef(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mule(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mule(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vmlef(__a, __b);
}
/*-- vec_mulo ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed short
-vec_mulo(vector signed char __a, vector signed char __b) {
+static inline __ATTRS_o_ai __vector signed short
+vec_mulo(__vector signed char __a, __vector signed char __b) {
return __builtin_s390_vmob(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_mulo(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mulo(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vmlob(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_mulo(vector signed short __a, vector signed short __b) {
+static inline __ATTRS_o_ai __vector signed int
+vec_mulo(__vector signed short __a, __vector signed short __b) {
return __builtin_s390_vmoh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_mulo(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mulo(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vmloh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed long long
-vec_mulo(vector signed int __a, vector signed int __b) {
+static inline __ATTRS_o_ai __vector signed long long
+vec_mulo(__vector signed int __a, __vector signed int __b) {
return __builtin_s390_vmof(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mulo(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mulo(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vmlof(__a, __b);
}
@@ -8630,198 +8703,199 @@ vec_mulo(vector unsigned int __a, vector unsigned int __b) {
#if __ARCH__ >= 12
#define vec_msum_u128(X, Y, Z, W) \
- ((vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W)));
+ ((__vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W)));
#endif
/*-- vec_sub_u128 -----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned char
+vec_sub_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsq(__a, __b);
}
/*-- vec_subc ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_subc(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_subc(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vscbib(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_subc(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_subc(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vscbih(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_subc(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_subc(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vscbif(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_subc(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_subc(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vscbig(__a, __b);
}
/*-- vec_subc_u128 ----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_ai __vector unsigned char
+vec_subc_u128(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vscbiq(__a, __b);
}
/*-- vec_sube_u128 ----------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_ai __vector unsigned char
+vec_sube_u128(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vsbiq(__a, __b, __c);
}
/*-- vec_subec_u128 ---------------------------------------------------------*/
-static inline __ATTRS_ai vector unsigned char
-vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_ai __vector unsigned char
+vec_subec_u128(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vsbcbiq(__a, __b, __c);
}
/*-- vec_sum2 ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sum2(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sum2(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vsumgh(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sum2(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sum2(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vsumgf(__a, __b);
}
/*-- vec_sum_u128 -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_sum_u128(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sum_u128(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vsumqf(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_sum_u128(vector unsigned long long __a, vector unsigned long long __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sum_u128(__vector unsigned long long __a, __vector unsigned long long __b) {
return __builtin_s390_vsumqg(__a, __b);
}
/*-- vec_sum4 ---------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned int
-vec_sum4(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sum4(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vsumb(__a, __b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_sum4(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sum4(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vsumh(__a, __b);
}
/*-- vec_test_mask ----------------------------------------------------------*/
static inline __ATTRS_o_ai int
-vec_test_mask(vector signed char __a, vector unsigned char __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector signed char __a, __vector unsigned char __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned char __a, vector unsigned char __b) {
+vec_test_mask(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vtm(__a, __b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector signed short __a, vector unsigned short __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector signed short __a, __vector unsigned short __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned short __a, vector unsigned short __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector unsigned short __a, __vector unsigned short __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector signed int __a, vector unsigned int __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector signed int __a, __vector unsigned int __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned int __a, vector unsigned int __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector unsigned int __a, __vector unsigned int __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector signed long long __a, vector unsigned long long __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector signed long long __a, __vector unsigned long long __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned long long __a, vector unsigned long long __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector unsigned long long __a,
+ __vector unsigned long long __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai int
-vec_test_mask(vector float __a, vector unsigned int __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector float __a, __vector unsigned int __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
#endif
static inline __ATTRS_o_ai int
-vec_test_mask(vector double __a, vector unsigned long long __b) {
- return __builtin_s390_vtm((vector unsigned char)__a,
- (vector unsigned char)__b);
+vec_test_mask(__vector double __a, __vector unsigned long long __b) {
+ return __builtin_s390_vtm((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
/*-- vec_madd ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_madd(vector float __a, vector float __b, vector float __c) {
+static inline __ATTRS_o_ai __vector float
+vec_madd(__vector float __a, __vector float __b, __vector float __c) {
return __builtin_s390_vfmasb(__a, __b, __c);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_madd(vector double __a, vector double __b, vector double __c) {
+static inline __ATTRS_o_ai __vector double
+vec_madd(__vector double __a, __vector double __b, __vector double __c) {
return __builtin_s390_vfmadb(__a, __b, __c);
}
/*-- vec_msub ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_msub(vector float __a, vector float __b, vector float __c) {
+static inline __ATTRS_o_ai __vector float
+vec_msub(__vector float __a, __vector float __b, __vector float __c) {
return __builtin_s390_vfmssb(__a, __b, __c);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_msub(vector double __a, vector double __b, vector double __c) {
+static inline __ATTRS_o_ai __vector double
+vec_msub(__vector double __a, __vector double __b, __vector double __c) {
return __builtin_s390_vfmsdb(__a, __b, __c);
}
/*-- vec_nmadd ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nmadd(vector float __a, vector float __b, vector float __c) {
+static inline __ATTRS_o_ai __vector float
+vec_nmadd(__vector float __a, __vector float __b, __vector float __c) {
return __builtin_s390_vfnmasb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector double
-vec_nmadd(vector double __a, vector double __b, vector double __c) {
+static inline __ATTRS_o_ai __vector double
+vec_nmadd(__vector double __a, __vector double __b, __vector double __c) {
return __builtin_s390_vfnmadb(__a, __b, __c);
}
#endif
@@ -8829,13 +8903,13 @@ vec_nmadd(vector double __a, vector double __b, vector double __c) {
/*-- vec_nmsub ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nmsub(vector float __a, vector float __b, vector float __c) {
+static inline __ATTRS_o_ai __vector float
+vec_nmsub(__vector float __a, __vector float __b, __vector float __c) {
return __builtin_s390_vfnmssb(__a, __b, __c);
}
-static inline __ATTRS_o_ai vector double
-vec_nmsub(vector double __a, vector double __b, vector double __c) {
+static inline __ATTRS_o_ai __vector double
+vec_nmsub(__vector double __a, __vector double __b, __vector double __c) {
return __builtin_s390_vfnmsdb(__a, __b, __c);
}
#endif
@@ -8843,31 +8917,31 @@ vec_nmsub(vector double __a, vector double __b, vector double __c) {
/*-- vec_sqrt ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_sqrt(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_sqrt(__vector float __a) {
return __builtin_s390_vfsqsb(__a);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_sqrt(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_sqrt(__vector double __a) {
return __builtin_s390_vfsqdb(__a);
}
/*-- vec_ld2f ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_ai vector double
+static inline __ATTRS_ai __vector double
vec_ld2f(const float *__ptr) {
typedef float __v2f32 __attribute__((__vector_size__(8)));
- return __builtin_convertvector(*(const __v2f32 *)__ptr, vector double);
+ return __builtin_convertvector(*(const __v2f32 *)__ptr, __vector double);
}
/*-- vec_st2f ---------------------------------------------------------------*/
// This prototype is deprecated.
static inline __ATTRS_ai void
-vec_st2f(vector double __a, float *__ptr) {
+vec_st2f(__vector double __a, float *__ptr) {
typedef float __v2f32 __attribute__((__vector_size__(8)));
*(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
}
@@ -8875,59 +8949,63 @@ vec_st2f(vector double __a, float *__ptr) {
/*-- vec_ctd ----------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_ctd(vector signed long long __a, int __b)
+static inline __ATTRS_o_ai __vector double
+vec_ctd(__vector signed long long __a, int __b)
__constant_range(__b, 0, 31) {
- vector double __conv = __builtin_convertvector(__a, vector double);
- __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ __vector double __conv = __builtin_convertvector(__a, __vector double);
+ __conv *= ((__vector double)(__vector unsigned long long)
+ ((0x3ffULL - __b) << 52));
return __conv;
}
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_ctd(vector unsigned long long __a, int __b)
+static inline __ATTRS_o_ai __vector double
+vec_ctd(__vector unsigned long long __a, int __b)
__constant_range(__b, 0, 31) {
- vector double __conv = __builtin_convertvector(__a, vector double);
- __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
+ __vector double __conv = __builtin_convertvector(__a, __vector double);
+ __conv *= ((__vector double)(__vector unsigned long long)
+ ((0x3ffULL - __b) << 52));
return __conv;
}
/*-- vec_ctsl ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_ctsl(vector double __a, int __b)
+static inline __ATTRS_o_ai __vector signed long long
+vec_ctsl(__vector double __a, int __b)
__constant_range(__b, 0, 31) {
- __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
- return __builtin_convertvector(__a, vector signed long long);
+ __a *= ((__vector double)(__vector unsigned long long)
+ ((0x3ffULL + __b) << 52));
+ return __builtin_convertvector(__a, __vector signed long long);
}
/*-- vec_ctul ---------------------------------------------------------------*/
// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_ctul(vector double __a, int __b)
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_ctul(__vector double __a, int __b)
__constant_range(__b, 0, 31) {
- __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
- return __builtin_convertvector(__a, vector unsigned long long);
+ __a *= ((__vector double)(__vector unsigned long long)
+ ((0x3ffULL + __b) << 52));
+ return __builtin_convertvector(__a, __vector unsigned long long);
}
/*-- vec_doublee ------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_ai vector double
-vec_doublee(vector float __a) {
+static inline __ATTRS_ai __vector double
+vec_doublee(__vector float __a) {
typedef float __v2f32 __attribute__((__vector_size__(8)));
__v2f32 __pack = __builtin_shufflevector(__a, __a, 0, 2);
- return __builtin_convertvector(__pack, vector double);
+ return __builtin_convertvector(__pack, __vector double);
}
#endif
/*-- vec_floate -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_ai vector float
-vec_floate(vector double __a) {
+static inline __ATTRS_ai __vector float
+vec_floate(__vector double __a) {
typedef float __v2f32 __attribute__((__vector_size__(8)));
__v2f32 __pack = __builtin_convertvector(__a, __v2f32);
return __builtin_shufflevector(__pack, __pack, 0, -1, 1, -1);
@@ -8936,86 +9014,86 @@ vec_floate(vector double __a) {
/*-- vec_double -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector double
-vec_double(vector signed long long __a) {
- return __builtin_convertvector(__a, vector double);
+static inline __ATTRS_o_ai __vector double
+vec_double(__vector signed long long __a) {
+ return __builtin_convertvector(__a, __vector double);
}
-static inline __ATTRS_o_ai vector double
-vec_double(vector unsigned long long __a) {
- return __builtin_convertvector(__a, vector double);
+static inline __ATTRS_o_ai __vector double
+vec_double(__vector unsigned long long __a) {
+ return __builtin_convertvector(__a, __vector double);
}
/*-- vec_float --------------------------------------------------------------*/
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector float
-vec_float(vector signed int __a) {
- return __builtin_convertvector(__a, vector float);
+static inline __ATTRS_o_ai __vector float
+vec_float(__vector signed int __a) {
+ return __builtin_convertvector(__a, __vector float);
}
-static inline __ATTRS_o_ai vector float
-vec_float(vector unsigned int __a) {
- return __builtin_convertvector(__a, vector float);
+static inline __ATTRS_o_ai __vector float
+vec_float(__vector unsigned int __a) {
+ return __builtin_convertvector(__a, __vector float);
}
#endif
/*-- vec_signed -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed long long
-vec_signed(vector double __a) {
- return __builtin_convertvector(__a, vector signed long long);
+static inline __ATTRS_o_ai __vector signed long long
+vec_signed(__vector double __a) {
+ return __builtin_convertvector(__a, __vector signed long long);
}
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector signed int
-vec_signed(vector float __a) {
- return __builtin_convertvector(__a, vector signed int);
+static inline __ATTRS_o_ai __vector signed int
+vec_signed(__vector float __a) {
+ return __builtin_convertvector(__a, __vector signed int);
}
#endif
/*-- vec_unsigned -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unsigned(vector double __a) {
- return __builtin_convertvector(__a, vector unsigned long long);
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unsigned(__vector double __a) {
+ return __builtin_convertvector(__a, __vector unsigned long long);
}
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector unsigned int
-vec_unsigned(vector float __a) {
- return __builtin_convertvector(__a, vector unsigned int);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unsigned(__vector float __a) {
+ return __builtin_convertvector(__a, __vector unsigned int);
}
#endif
/*-- vec_roundp -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundp(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_roundp(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 6);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_roundp(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_roundp(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 6);
}
/*-- vec_ceil ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_ceil(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_ceil(__vector float __a) {
// On this platform, vec_ceil never triggers the IEEE-inexact exception.
return __builtin_s390_vfisb(__a, 4, 6);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_ceil(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_ceil(__vector double __a) {
// On this platform, vec_ceil never triggers the IEEE-inexact exception.
return __builtin_s390_vfidb(__a, 4, 6);
}
@@ -9023,29 +9101,29 @@ vec_ceil(vector double __a) {
/*-- vec_roundm -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundm(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_roundm(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 7);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_roundm(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_roundm(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 7);
}
/*-- vec_floor --------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_floor(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_floor(__vector float __a) {
// On this platform, vec_floor never triggers the IEEE-inexact exception.
return __builtin_s390_vfisb(__a, 4, 7);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_floor(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_floor(__vector double __a) {
// On this platform, vec_floor never triggers the IEEE-inexact exception.
return __builtin_s390_vfidb(__a, 4, 7);
}
@@ -9053,29 +9131,29 @@ vec_floor(vector double __a) {
/*-- vec_roundz -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundz(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_roundz(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 5);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_roundz(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_roundz(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 5);
}
/*-- vec_trunc --------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_trunc(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_trunc(__vector float __a) {
// On this platform, vec_trunc never triggers the IEEE-inexact exception.
return __builtin_s390_vfisb(__a, 4, 5);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_trunc(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_trunc(__vector double __a) {
// On this platform, vec_trunc never triggers the IEEE-inexact exception.
return __builtin_s390_vfidb(__a, 4, 5);
}
@@ -9083,29 +9161,29 @@ vec_trunc(vector double __a) {
/*-- vec_roundc -------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundc(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_roundc(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 0);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_roundc(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_roundc(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 0);
}
/*-- vec_rint ---------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_rint(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_rint(__vector float __a) {
// vec_rint may trigger the IEEE-inexact exception.
return __builtin_s390_vfisb(__a, 0, 0);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_rint(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_rint(__vector double __a) {
// vec_rint may trigger the IEEE-inexact exception.
return __builtin_s390_vfidb(__a, 0, 0);
}
@@ -9113,45 +9191,45 @@ vec_rint(vector double __a) {
/*-- vec_round --------------------------------------------------------------*/
#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_round(vector float __a) {
+static inline __ATTRS_o_ai __vector float
+vec_round(__vector float __a) {
return __builtin_s390_vfisb(__a, 4, 4);
}
#endif
-static inline __ATTRS_o_ai vector double
-vec_round(vector double __a) {
+static inline __ATTRS_o_ai __vector double
+vec_round(__vector double __a) {
return __builtin_s390_vfidb(__a, 4, 4);
}
/*-- vec_fp_test_data_class -------------------------------------------------*/
#if __ARCH__ >= 12
-extern __ATTRS_o vector bool int
-vec_fp_test_data_class(vector float __a, int __b, int *__c)
+extern __ATTRS_o __vector __bool int
+vec_fp_test_data_class(__vector float __a, int __b, int *__c)
__constant_range(__b, 0, 4095);
-extern __ATTRS_o vector bool long long
-vec_fp_test_data_class(vector double __a, int __b, int *__c)
+extern __ATTRS_o __vector __bool long long
+vec_fp_test_data_class(__vector double __a, int __b, int *__c)
__constant_range(__b, 0, 4095);
#define vec_fp_test_data_class(X, Y, Z) \
((__typeof__((vec_fp_test_data_class)((X), (Y), (Z)))) \
__extension__ ({ \
- vector unsigned char __res; \
- vector unsigned char __x = (vector unsigned char)(X); \
+ __vector unsigned char __res; \
+ __vector unsigned char __x = (__vector unsigned char)(X); \
int *__z = (Z); \
switch (sizeof ((X)[0])) { \
- case 4: __res = (vector unsigned char) \
- __builtin_s390_vftcisb((vector float)__x, (Y), __z); \
+ case 4: __res = (__vector unsigned char) \
+ __builtin_s390_vftcisb((__vector float)__x, (Y), __z); \
break; \
- default: __res = (vector unsigned char) \
- __builtin_s390_vftcidb((vector double)__x, (Y), __z); \
+ default: __res = (__vector unsigned char) \
+ __builtin_s390_vftcidb((__vector double)__x, (Y), __z); \
break; \
} __res; }))
#else
#define vec_fp_test_data_class(X, Y, Z) \
- ((vector bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
+ ((__vector __bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
#endif
#define __VEC_CLASS_FP_ZERO_P (1 << 11)
@@ -9183,1527 +9261,1585 @@ vec_fp_test_data_class(vector double __a, int __b, int *__c)
/*-- vec_cp_until_zero ------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cp_until_zero(vector signed char __a) {
- return (vector signed char)__builtin_s390_vistrb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector signed char
+vec_cp_until_zero(__vector signed char __a) {
+ return ((__vector signed char)
+ __builtin_s390_vistrb((__vector unsigned char)__a));
}
-static inline __ATTRS_o_ai vector bool char
-vec_cp_until_zero(vector bool char __a) {
- return (vector bool char)__builtin_s390_vistrb((vector unsigned char)__a);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cp_until_zero(__vector __bool char __a) {
+ return ((__vector __bool char)
+ __builtin_s390_vistrb((__vector unsigned char)__a));
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cp_until_zero(vector unsigned char __a) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cp_until_zero(__vector unsigned char __a) {
return __builtin_s390_vistrb(__a);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cp_until_zero(vector signed short __a) {
- return (vector signed short)__builtin_s390_vistrh((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector signed short
+vec_cp_until_zero(__vector signed short __a) {
+ return ((__vector signed short)
+ __builtin_s390_vistrh((__vector unsigned short)__a));
}
-static inline __ATTRS_o_ai vector bool short
-vec_cp_until_zero(vector bool short __a) {
- return (vector bool short)__builtin_s390_vistrh((vector unsigned short)__a);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cp_until_zero(__vector __bool short __a) {
+ return ((__vector __bool short)
+ __builtin_s390_vistrh((__vector unsigned short)__a));
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cp_until_zero(vector unsigned short __a) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cp_until_zero(__vector unsigned short __a) {
return __builtin_s390_vistrh(__a);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cp_until_zero(vector signed int __a) {
- return (vector signed int)__builtin_s390_vistrf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector signed int
+vec_cp_until_zero(__vector signed int __a) {
+ return ((__vector signed int)
+ __builtin_s390_vistrf((__vector unsigned int)__a));
}
-static inline __ATTRS_o_ai vector bool int
-vec_cp_until_zero(vector bool int __a) {
- return (vector bool int)__builtin_s390_vistrf((vector unsigned int)__a);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cp_until_zero(__vector __bool int __a) {
+ return ((__vector __bool int)
+ __builtin_s390_vistrf((__vector unsigned int)__a));
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cp_until_zero(vector unsigned int __a) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cp_until_zero(__vector unsigned int __a) {
return __builtin_s390_vistrf(__a);
}
/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cp_until_zero_cc(vector signed char __a, int *__cc) {
- return (vector signed char)
- __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_cp_until_zero_cc(__vector signed char __a, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vistrbs((__vector unsigned char)__a, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_cp_until_zero_cc(vector bool char __a, int *__cc) {
- return (vector bool char)
- __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cp_until_zero_cc(__vector __bool char __a, int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vistrbs((__vector unsigned char)__a, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cp_until_zero_cc(vector unsigned char __a, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cp_until_zero_cc(__vector unsigned char __a, int *__cc) {
return __builtin_s390_vistrbs(__a, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cp_until_zero_cc(vector signed short __a, int *__cc) {
- return (vector signed short)
- __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_cp_until_zero_cc(__vector signed short __a, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vistrhs((__vector unsigned short)__a, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cp_until_zero_cc(vector bool short __a, int *__cc) {
- return (vector bool short)
- __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cp_until_zero_cc(__vector __bool short __a, int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vistrhs((__vector unsigned short)__a, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cp_until_zero_cc(vector unsigned short __a, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cp_until_zero_cc(__vector unsigned short __a, int *__cc) {
return __builtin_s390_vistrhs(__a, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cp_until_zero_cc(vector signed int __a, int *__cc) {
- return (vector signed int)
- __builtin_s390_vistrfs((vector unsigned int)__a, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cp_until_zero_cc(__vector signed int __a, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vistrfs((__vector unsigned int)__a, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cp_until_zero_cc(vector bool int __a, int *__cc) {
- return (vector bool int)__builtin_s390_vistrfs((vector unsigned int)__a,
- __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cp_until_zero_cc(__vector __bool int __a, int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vistrfs((__vector unsigned int)__a, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cp_until_zero_cc(vector unsigned int __a, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cp_until_zero_cc(__vector unsigned int __a, int *__cc) {
return __builtin_s390_vistrfs(__a, __cc);
}
/*-- vec_cmpeq_idx ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfeeb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfeeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfeeb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfeeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfeeb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfeeh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfeeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfeeh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfeeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfeeh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfeef((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfeef((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfeef((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfeef((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfeef(__a, __b);
}
/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
- return (vector signed char)
- __builtin_s390_vfeebs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfeebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfeebs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfeebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfeebs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
- return (vector signed short)
- __builtin_s390_vfeehs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_idx_cc(__vector signed short __a, __vector signed short __b,
+ int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfeehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return __builtin_s390_vfeehs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx_cc(__vector __bool short __a, __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfeehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
return __builtin_s390_vfeehs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector signed int)
- __builtin_s390_vfeefs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfeefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfeefs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfeefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ int *__cc) {
return __builtin_s390_vfeefs(__a, __b, __cc);
}
/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_or_0_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfeezb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_or_0_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfeezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfeezb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfeezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfeezb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_or_0_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfeezh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_or_0_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfeezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfeezh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfeezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfeezh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_or_0_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfeezf((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_or_0_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfeezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfeezf((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfeezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfeezf(__a, __b);
}
/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_or_0_idx_cc(vector signed char __a, vector signed char __b,
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_or_0_idx_cc(__vector signed char __a, __vector signed char __b,
int *__cc) {
- return (vector signed char)
- __builtin_s390_vfeezbs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+ return (__vector signed char)
+ __builtin_s390_vfeezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfeezbs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfeezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfeezbs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_or_0_idx_cc(vector signed short __a, vector signed short __b,
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_or_0_idx_cc(__vector signed short __a, __vector signed short __b,
int *__cc) {
- return (vector signed short)
- __builtin_s390_vfeezhs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+ return (__vector signed short)
+ __builtin_s390_vfeezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return __builtin_s390_vfeezhs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfeezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
return __builtin_s390_vfeezhs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector signed int)
- __builtin_s390_vfeezfs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_or_0_idx_cc(__vector signed int __a, __vector signed int __b,
+ int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfeezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfeezfs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfeezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
int *__cc) {
return __builtin_s390_vfeezfs(__a, __b, __cc);
}
/*-- vec_cmpne_idx ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfeneb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfeneb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfeneb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfeneb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfeneb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfeneh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfeneh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfeneh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfeneh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfeneh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfenef((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfenef((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfenef((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfenef((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfenef(__a, __b);
}
/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
- return (vector signed char)
- __builtin_s390_vfenebs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfenebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfenebs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfenebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfenebs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
- return (vector signed short)
- __builtin_s390_vfenehs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_idx_cc(__vector signed short __a, __vector signed short __b,
+ int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfenehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return __builtin_s390_vfenehs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx_cc(__vector __bool short __a, __vector __bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfenehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
return __builtin_s390_vfenehs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector signed int)
- __builtin_s390_vfenefs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfenefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfenefs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfenefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ int *__cc) {
return __builtin_s390_vfenefs(__a, __b, __cc);
}
/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_or_0_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfenezb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_or_0_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfenezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfenezb((vector unsigned char)__a,
- (vector unsigned char)__b);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfenezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfenezb(__a, __b);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_or_0_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfenezh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_or_0_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfenezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfenezh((vector unsigned short)__a,
- (vector unsigned short)__b);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfenezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfenezh(__a, __b);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_or_0_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfenezf((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_or_0_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfenezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfenezf((vector unsigned int)__a,
- (vector unsigned int)__b);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfenezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfenezf(__a, __b);
}
/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_or_0_idx_cc(vector signed char __a, vector signed char __b,
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_or_0_idx_cc(__vector signed char __a, __vector signed char __b,
int *__cc) {
- return (vector signed char)
- __builtin_s390_vfenezbs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+ return (__vector signed char)
+ __builtin_s390_vfenezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfenezbs((vector unsigned char)__a,
- (vector unsigned char)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b,
+ int *__cc) {
+ return __builtin_s390_vfenezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfenezbs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_or_0_idx_cc(vector signed short __a, vector signed short __b,
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_or_0_idx_cc(__vector signed short __a, __vector signed short __b,
int *__cc) {
- return (vector signed short)
- __builtin_s390_vfenezhs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+ return (__vector signed short)
+ __builtin_s390_vfenezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return __builtin_s390_vfenezhs((vector unsigned short)__a,
- (vector unsigned short)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b,
+ int *__cc) {
+ return __builtin_s390_vfenezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
return __builtin_s390_vfenezhs(__a, __b, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector signed int)
- __builtin_s390_vfenezfs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_or_0_idx_cc(__vector signed int __a, __vector signed int __b,
+ int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfenezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfenezfs((vector unsigned int)__a,
- (vector unsigned int)__b, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b,
+ int *__cc) {
+ return __builtin_s390_vfenezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
int *__cc) {
return __builtin_s390_vfenezfs(__a, __b, __cc);
}
/*-- vec_cmprg --------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmprg(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
- return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmprg(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
+ return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmprg(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
- return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmprg(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
+ return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmprg(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
- return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmprg(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
+ return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
}
/*-- vec_cmprg_cc -----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmprg_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
- return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmprg_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
+ return (__vector __bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmprg_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
- return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmprg_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
+ return (__vector __bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmprg_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
- return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmprg_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
+ return (__vector __bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
}
/*-- vec_cmprg_idx ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_idx(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_idx(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vstrcb(__a, __b, __c, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_idx(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_idx(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vstrch(__a, __b, __c, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_idx(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_idx(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vstrcf(__a, __b, __c, 0);
}
/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_idx_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_idx_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_idx_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
}
/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vstrczb(__a, __b, __c, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vstrczh(__a, __b, __c, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vstrczf(__a, __b, __c, 0);
}
/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
}
/*-- vec_cmpnrg -------------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpnrg(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
- return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpnrg(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
+ return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpnrg(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
- return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpnrg(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
+ return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpnrg(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
- return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpnrg(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
+ return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
}
/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_cmpnrg_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
- return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpnrg_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_cmpnrg_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
- return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpnrg_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_cmpnrg_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
- return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpnrg_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
}
/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_idx(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_idx(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vstrcb(__a, __b, __c, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_idx(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_idx(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vstrch(__a, __b, __c, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_idx(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_idx(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vstrcf(__a, __b, __c, 8);
}
/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_idx_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_idx_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_idx_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
}
/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c) {
return __builtin_s390_vstrczb(__a, __b, __c, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned short __c) {
return __builtin_s390_vstrczh(__a, __b, __c, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned int __c) {
return __builtin_s390_vstrczf(__a, __b, __c, 8);
}
/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_or_0_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned short __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_or_0_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b,
+ __vector unsigned short __c, int *__cc) {
return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned int __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_or_0_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b,
+ __vector unsigned int __c, int *__cc) {
return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
}
/*-- vec_find_any_eq --------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector signed char __a, vector signed char __b) {
- return (vector bool char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 4);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 4);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector bool char __a, vector bool char __b) {
- return (vector bool char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 4);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 4);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vfaeb(__a, __b, 4);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 4);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector signed short __a, vector signed short __b) {
- return (vector bool short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 4);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 4);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector bool short __a, vector bool short __b) {
- return (vector bool short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 4);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 4);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vfaeh(__a, __b, 4);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 4);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector signed int __a, vector signed int __b) {
- return (vector bool int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 4);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 4);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector bool int __a, vector bool int __b) {
- return (vector bool int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 4);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 4);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vfaef(__a, __b, 4);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 4);
}
/*-- vec_find_any_eq_cc -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector signed char __a, vector signed char __b, int *__cc) {
- return (vector bool char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector signed char __a, __vector signed char __b,
+ int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return (vector bool char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector __bool char __a, __vector __bool char __b,
+ int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector unsigned char __a, __vector unsigned char __b,
int *__cc) {
- return (vector bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
+ return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector signed short __a, vector signed short __b,
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector signed short __a, __vector signed short __b,
int *__cc) {
- return (vector bool short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 4, __cc);
+ return (__vector __bool short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return (vector bool short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector __bool short __a, __vector __bool short __b,
+ int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector unsigned short __a, vector unsigned short __b,
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector unsigned short __a, __vector unsigned short __b,
int *__cc) {
- return (vector bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
+ return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector bool int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector signed int __a, __vector signed int __b,
+ int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return (vector bool int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 4, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector __bool int __a, __vector __bool int __b,
+ int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 4, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector unsigned int __a, vector unsigned int __b,
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector unsigned int __a, __vector unsigned int __b,
int *__cc) {
- return (vector bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
+ return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
}
/*-- vec_find_any_eq_idx ----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 0);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfaeb(__a, __b, 0);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 0);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfaeh(__a, __b, 0);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 0);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfaef(__a, __b, 0);
}
/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_idx_cc(vector signed char __a, vector signed char __b,
- int *__cc) {
- return (vector signed char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_idx_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx_cc(vector unsigned char __a, vector unsigned char __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b, int *__cc) {
return __builtin_s390_vfaebs(__a, __b, 0, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_idx_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector signed short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_idx_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx_cc(vector bool short __a, vector bool short __b,
- int *__cc) {
- return __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx_cc(vector unsigned short __a, vector unsigned short __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
return __builtin_s390_vfaehs(__a, __b, 0, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_idx_cc(vector signed int __a, vector signed int __b,
- int *__cc) {
- return (vector signed int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_idx_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
return __builtin_s390_vfaefs(__a, __b, 0, __cc);
}
/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_or_0_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfaezb((vector unsigned char)__a,
- (vector unsigned char)__b, 0);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_or_0_idx(__vector signed char __a,
+ __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfaezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfaezb((vector unsigned char)__a,
- (vector unsigned char)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx(__vector __bool char __a,
+ __vector __bool char __b) {
+ return __builtin_s390_vfaezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx(__vector unsigned char __a,
+ __vector unsigned char __b) {
return __builtin_s390_vfaezb(__a, __b, 0);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_or_0_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfaezh((vector unsigned short)__a,
- (vector unsigned short)__b, 0);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_or_0_idx(__vector signed short __a,
+ __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfaezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfaezh((vector unsigned short)__a,
- (vector unsigned short)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx(__vector __bool short __a,
+ __vector __bool short __b) {
+ return __builtin_s390_vfaezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx(__vector unsigned short __a,
+ __vector unsigned short __b) {
return __builtin_s390_vfaezh(__a, __b, 0);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_or_0_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfaezf((vector unsigned int)__a,
- (vector unsigned int)__b, 0);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_or_0_idx(__vector signed int __a,
+ __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfaezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfaezf((vector unsigned int)__a,
- (vector unsigned int)__b, 0);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx(__vector __bool int __a,
+ __vector __bool int __b) {
+ return __builtin_s390_vfaezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx(__vector unsigned int __a,
+ __vector unsigned int __b) {
return __builtin_s390_vfaezf(__a, __b, 0);
}
/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_or_0_idx_cc(vector signed char __a, vector signed char __b,
- int *__cc) {
- return (vector signed char)
- __builtin_s390_vfaezbs((vector unsigned char)__a,
- (vector unsigned char)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_or_0_idx_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfaezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx_cc(vector bool char __a, vector bool char __b,
- int *__cc) {
- return __builtin_s390_vfaezbs((vector unsigned char)__a,
- (vector unsigned char)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfaezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b, int *__cc) {
return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_or_0_idx_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector signed short)
- __builtin_s390_vfaezhs((vector unsigned short)__a,
- (vector unsigned short)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_or_0_idx_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfaezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx_cc(vector bool short __a, vector bool short __b,
- int *__cc) {
- return __builtin_s390_vfaezhs((vector unsigned short)__a,
- (vector unsigned short)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfaezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx_cc(vector unsigned short __a,
- vector unsigned short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_or_0_idx_cc(vector signed int __a, vector signed int __b,
- int *__cc) {
- return (vector signed int)
- __builtin_s390_vfaezfs((vector unsigned int)__a,
- (vector unsigned int)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_or_0_idx_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfaezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx_cc(vector bool int __a, vector bool int __b,
- int *__cc) {
- return __builtin_s390_vfaezfs((vector unsigned int)__a,
- (vector unsigned int)__b, 0, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfaezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 0, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
}
/*-- vec_find_any_ne --------------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector signed char __a, vector signed char __b) {
- return (vector bool char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 12);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector signed char __a, __vector signed char __b) {
+ return (__vector __bool char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 12);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector bool char __a, vector bool char __b) {
- return (vector bool char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 12);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector __bool char __a, __vector __bool char __b) {
+ return (__vector __bool char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 12);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector unsigned char __a, vector unsigned char __b) {
- return (vector bool char)__builtin_s390_vfaeb(__a, __b, 12);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
+ return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 12);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector signed short __a, vector signed short __b) {
- return (vector bool short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 12);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector signed short __a, __vector signed short __b) {
+ return (__vector __bool short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 12);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector bool short __a, vector bool short __b) {
- return (vector bool short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 12);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector __bool short __a, __vector __bool short __b) {
+ return (__vector __bool short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 12);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector unsigned short __a, vector unsigned short __b) {
- return (vector bool short)__builtin_s390_vfaeh(__a, __b, 12);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
+ return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 12);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector signed int __a, vector signed int __b) {
- return (vector bool int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 12);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector signed int __a, __vector signed int __b) {
+ return (__vector __bool int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 12);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector bool int __a, vector bool int __b) {
- return (vector bool int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 12);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector __bool int __a, __vector __bool int __b) {
+ return (__vector __bool int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 12);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector unsigned int __a, vector unsigned int __b) {
- return (vector bool int)__builtin_s390_vfaef(__a, __b, 12);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
+ return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 12);
}
/*-- vec_find_any_ne_cc -----------------------------------------------------*/
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector signed char __a, vector signed char __b, int *__cc) {
- return (vector bool char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return (vector bool char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return (__vector __bool char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector unsigned char __a, vector unsigned char __b,
- int *__cc) {
- return (vector bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector unsigned char __a,
+ __vector unsigned char __b, int *__cc) {
+ return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector bool short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector bool short __a, vector bool short __b, int *__cc) {
- return (vector bool short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return (__vector __bool short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector unsigned short __a, vector unsigned short __b,
- int *__cc) {
- return (vector bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
+ return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector signed int __a, vector signed int __b, int *__cc) {
- return (vector bool int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return (vector bool int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return (__vector __bool int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 12, __cc);
}
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
- return (vector bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
+ return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
}
/*-- vec_find_any_ne_idx ----------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 8);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_idx(__vector signed char __a, __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfaeb((vector unsigned char)__a,
- (vector unsigned char)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx(__vector __bool char __a, __vector __bool char __b) {
+ return __builtin_s390_vfaeb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx(__vector unsigned char __a, __vector unsigned char __b) {
return __builtin_s390_vfaeb(__a, __b, 8);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 8);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_idx(__vector signed short __a, __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfaeh((vector unsigned short)__a,
- (vector unsigned short)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx(__vector __bool short __a, __vector __bool short __b) {
+ return __builtin_s390_vfaeh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx(__vector unsigned short __a, __vector unsigned short __b) {
return __builtin_s390_vfaeh(__a, __b, 8);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 8);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_idx(__vector signed int __a, __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfaef((vector unsigned int)__a,
- (vector unsigned int)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx(__vector __bool int __a, __vector __bool int __b) {
+ return __builtin_s390_vfaef((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx(__vector unsigned int __a, __vector unsigned int __b) {
return __builtin_s390_vfaef(__a, __b, 8);
}
/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_idx_cc(vector signed char __a, vector signed char __b,
- int *__cc) {
- return (vector signed char)
- __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_idx_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
- return __builtin_s390_vfaebs((vector unsigned char)__a,
- (vector unsigned char)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfaebs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx_cc(vector unsigned char __a, vector unsigned char __b,
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b,
int *__cc) {
return __builtin_s390_vfaebs(__a, __b, 8, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_idx_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector signed short)
- __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_idx_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx_cc(vector bool short __a, vector bool short __b,
- int *__cc) {
- return __builtin_s390_vfaehs((vector unsigned short)__a,
- (vector unsigned short)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfaehs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx_cc(vector unsigned short __a, vector unsigned short __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
return __builtin_s390_vfaehs(__a, __b, 8, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_idx_cc(vector signed int __a, vector signed int __b,
- int *__cc) {
- return (vector signed int)
- __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_idx_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
- return __builtin_s390_vfaefs((vector unsigned int)__a,
- (vector unsigned int)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfaefs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
return __builtin_s390_vfaefs(__a, __b, 8, __cc);
}
/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_or_0_idx(vector signed char __a, vector signed char __b) {
- return (vector signed char)
- __builtin_s390_vfaezb((vector unsigned char)__a,
- (vector unsigned char)__b, 8);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_or_0_idx(__vector signed char __a,
+ __vector signed char __b) {
+ return (__vector signed char)
+ __builtin_s390_vfaezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx(vector bool char __a, vector bool char __b) {
- return __builtin_s390_vfaezb((vector unsigned char)__a,
- (vector unsigned char)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx(__vector __bool char __a,
+ __vector __bool char __b) {
+ return __builtin_s390_vfaezb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx(__vector unsigned char __a,
+ __vector unsigned char __b) {
return __builtin_s390_vfaezb(__a, __b, 8);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_or_0_idx(vector signed short __a, vector signed short __b) {
- return (vector signed short)
- __builtin_s390_vfaezh((vector unsigned short)__a,
- (vector unsigned short)__b, 8);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_or_0_idx(__vector signed short __a,
+ __vector signed short __b) {
+ return (__vector signed short)
+ __builtin_s390_vfaezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx(vector bool short __a, vector bool short __b) {
- return __builtin_s390_vfaezh((vector unsigned short)__a,
- (vector unsigned short)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx(__vector __bool short __a,
+ __vector __bool short __b) {
+ return __builtin_s390_vfaezh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx(__vector unsigned short __a,
+ __vector unsigned short __b) {
return __builtin_s390_vfaezh(__a, __b, 8);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_or_0_idx(vector signed int __a, vector signed int __b) {
- return (vector signed int)
- __builtin_s390_vfaezf((vector unsigned int)__a,
- (vector unsigned int)__b, 8);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_or_0_idx(__vector signed int __a,
+ __vector signed int __b) {
+ return (__vector signed int)
+ __builtin_s390_vfaezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx(vector bool int __a, vector bool int __b) {
- return __builtin_s390_vfaezf((vector unsigned int)__a,
- (vector unsigned int)__b, 8);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx(__vector __bool int __a,
+ __vector __bool int __b) {
+ return __builtin_s390_vfaezf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx(__vector unsigned int __a,
+ __vector unsigned int __b) {
return __builtin_s390_vfaezf(__a, __b, 8);
}
/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_or_0_idx_cc(vector signed char __a, vector signed char __b,
- int *__cc) {
- return (vector signed char)
- __builtin_s390_vfaezbs((vector unsigned char)__a,
- (vector unsigned char)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_or_0_idx_cc(__vector signed char __a,
+ __vector signed char __b, int *__cc) {
+ return (__vector signed char)
+ __builtin_s390_vfaezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx_cc(vector bool char __a, vector bool char __b,
- int *__cc) {
- return __builtin_s390_vfaezbs((vector unsigned char)__a,
- (vector unsigned char)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx_cc(__vector __bool char __a,
+ __vector __bool char __b, int *__cc) {
+ return __builtin_s390_vfaezbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx_cc(__vector unsigned char __a,
+ __vector unsigned char __b, int *__cc) {
return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
}
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_or_0_idx_cc(vector signed short __a, vector signed short __b,
- int *__cc) {
- return (vector signed short)
- __builtin_s390_vfaezhs((vector unsigned short)__a,
- (vector unsigned short)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_or_0_idx_cc(__vector signed short __a,
+ __vector signed short __b, int *__cc) {
+ return (__vector signed short)
+ __builtin_s390_vfaezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx_cc(vector bool short __a, vector bool short __b,
- int *__cc) {
- return __builtin_s390_vfaezhs((vector unsigned short)__a,
- (vector unsigned short)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx_cc(__vector __bool short __a,
+ __vector __bool short __b, int *__cc) {
+ return __builtin_s390_vfaezhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx_cc(vector unsigned short __a,
- vector unsigned short __b, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx_cc(__vector unsigned short __a,
+ __vector unsigned short __b, int *__cc) {
return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
}
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_or_0_idx_cc(vector signed int __a, vector signed int __b,
- int *__cc) {
- return (vector signed int)
- __builtin_s390_vfaezfs((vector unsigned int)__a,
- (vector unsigned int)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_or_0_idx_cc(__vector signed int __a,
+ __vector signed int __b, int *__cc) {
+ return (__vector signed int)
+ __builtin_s390_vfaezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx_cc(vector bool int __a, vector bool int __b,
- int *__cc) {
- return __builtin_s390_vfaezfs((vector unsigned int)__a,
- (vector unsigned int)__b, 8, __cc);
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx_cc(__vector __bool int __a,
+ __vector __bool int __b, int *__cc) {
+ return __builtin_s390_vfaezfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, 8, __cc);
}
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
- int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx_cc(__vector unsigned int __a,
+ __vector unsigned int __b, int *__cc) {
return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
}
@@ -10711,63 +10847,63 @@ vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed char __a, vector signed char __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsb((vector unsigned char)__a,
- (vector unsigned char)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed char __a, __vector signed char __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool char __a, vector bool char __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsb((vector unsigned char)__a,
- (vector unsigned char)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool char __a, __vector __bool char __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned char __a, vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned char __a, __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrsb(__a, __b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed short __a, vector signed short __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsh((vector unsigned short)__a,
- (vector unsigned short)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed short __a, __vector signed short __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool short __a, vector bool short __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsh((vector unsigned short)__a,
- (vector unsigned short)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool short __a, __vector __bool short __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned short __a, vector unsigned short __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned short __a, __vector unsigned short __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrsh(__a, __b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed int __a, vector signed int __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsf((vector unsigned int)__a,
- (vector unsigned int)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed int __a, __vector signed int __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool int __a, vector bool int __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrsf((vector unsigned int)__a,
- (vector unsigned int)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool int __a, __vector __bool int __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrsf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned int __a, vector unsigned int __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned int __a, __vector unsigned int __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrsf(__a, __b, __c, __cc);
}
@@ -10777,72 +10913,72 @@ vec_search_string_cc(vector unsigned int __a, vector unsigned int __b,
#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed char __a,
- vector signed char __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszb((vector unsigned char)__a,
- (vector unsigned char)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed char __a,
+ __vector signed char __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool char __a,
- vector bool char __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszb((vector unsigned char)__a,
- (vector unsigned char)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool char __a,
+ __vector __bool char __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszb((__vector unsigned char)__a,
+ (__vector unsigned char)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned char __a,
- vector unsigned char __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned char __a,
+ __vector unsigned char __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrszb(__a, __b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed short __a,
- vector signed short __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszh((vector unsigned short)__a,
- (vector unsigned short)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed short __a,
+ __vector signed short __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool short __a,
- vector bool short __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszh((vector unsigned short)__a,
- (vector unsigned short)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool short __a,
+ __vector __bool short __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszh((__vector unsigned short)__a,
+ (__vector unsigned short)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned short __a,
- vector unsigned short __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned short __a,
+ __vector unsigned short __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrszh(__a, __b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed int __a,
- vector signed int __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszf((vector unsigned int)__a,
- (vector unsigned int)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed int __a,
+ __vector signed int __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool int __a,
- vector bool int __b,
- vector unsigned char __c, int *__cc) {
- return __builtin_s390_vstrszf((vector unsigned int)__a,
- (vector unsigned int)__b, __c, __cc);
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool int __a,
+ __vector __bool int __b,
+ __vector unsigned char __c, int *__cc) {
+ return __builtin_s390_vstrszf((__vector unsigned int)__a,
+ (__vector unsigned int)__b, __c, __cc);
}
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned int __a,
- vector unsigned int __b,
- vector unsigned char __c, int *__cc) {
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned int __a,
+ __vector unsigned int __b,
+ __vector unsigned char __c, int *__cc) {
return __builtin_s390_vstrszf(__a, __b, __c, __cc);
}
diff --git a/clang/lib/Headers/wasm_simd128.h b/clang/lib/Headers/wasm_simd128.h
new file mode 100644
index 000000000000..b78123834b64
--- /dev/null
+++ b/clang/lib/Headers/wasm_simd128.h
@@ -0,0 +1,1133 @@
+/*===---- wasm_simd128.h - WebAssembly portable SIMD intrinsics ------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __WASM_SIMD128_H
+#define __WASM_SIMD128_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+// User-facing type
+typedef int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16)));
+
+// Internal types determined by clang builtin definitions
+typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));
+typedef char __i8x16 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef signed char __s8x16
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned char __u8x16
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef short __i16x8 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned short __u16x8
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef int __i32x4 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned int __u32x4
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef long long __i64x2 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned long long __u64x2
+ __attribute__((__vector_size__(16), __aligned__(16)));
+typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));
+
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("simd128"), \
+ __min_vector_width__(128)))
+
+#define __REQUIRE_CONSTANT(e) \
+ _Static_assert(__builtin_constant_p(e), "Expected constant")
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {
+ // UB-free unaligned access copied from xmmintrin.h
+ struct __wasm_v128_load_struct {
+ __v128_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((const struct __wasm_v128_load_struct *)__mem)->__v;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v8x16_load_splat(const void *__mem) {
+ struct __wasm_v8x16_load_splat_struct {
+ uint8_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ uint8_t __v = ((const struct __wasm_v8x16_load_splat_struct *)__mem)->__v;
+ return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v,
+ __v, __v, __v, __v, __v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v16x8_load_splat(const void *__mem) {
+ struct __wasm_v16x8_load_splat_struct {
+ uint16_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ uint16_t __v = ((const struct __wasm_v16x8_load_splat_struct *)__mem)->__v;
+ return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v32x4_load_splat(const void *__mem) {
+ struct __wasm_v32x4_load_splat_struct {
+ uint32_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ uint32_t __v = ((const struct __wasm_v32x4_load_splat_struct *)__mem)->__v;
+ return (v128_t)(__u32x4){__v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v64x2_load_splat(const void *__mem) {
+ struct __wasm_v64x2_load_splat_struct {
+ uint64_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ uint64_t __v = ((const struct __wasm_v64x2_load_splat_struct *)__mem)->__v;
+ return (v128_t)(__u64x2){__v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_load_8x8(const void *__mem) {
+ typedef int8_t __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_i16x8_load_8x8_struct {
+ __i8x8 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __i8x8 __v = ((const struct __wasm_i16x8_load_8x8_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __i16x8);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_load_8x8(const void *__mem) {
+ typedef uint8_t __u8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_u16x8_load_8x8_struct {
+ __u8x8 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __u8x8 __v = ((const struct __wasm_u16x8_load_8x8_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __u16x8);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_load_16x4(const void *__mem) {
+ typedef int16_t __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_i32x4_load_16x4_struct {
+ __i16x4 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __i16x4 __v = ((const struct __wasm_i32x4_load_16x4_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __i32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_load_16x4(const void *__mem) {
+ typedef uint16_t __u16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_u32x4_load_16x4_struct {
+ __u16x4 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __u16x4 __v = ((const struct __wasm_u32x4_load_16x4_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __u32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i64x2_load_32x2(const void *__mem) {
+ typedef int32_t __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_i64x2_load_32x2_struct {
+ __i32x2 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __i32x2 __v = ((const struct __wasm_i64x2_load_32x2_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __i64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u64x2_load_32x2(const void *__mem) {
+ typedef uint32_t __u32x2 __attribute__((__vector_size__(8), __aligned__(8)));
+ struct __wasm_u64x2_load_32x2_struct {
+ __u32x2 __v;
+ } __attribute__((__packed__, __may_alias__));
+ __u32x2 __v = ((const struct __wasm_u64x2_load_32x2_struct *)__mem)->__v;
+ return (v128_t) __builtin_convertvector(__v, __u64x2);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
+ v128_t __a) {
+ // UB-free unaligned access copied from xmmintrin.h
+ struct __wasm_v128_store_struct {
+ __v128_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __wasm_v128_store_struct *)__mem)->__v = __a;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,
+ int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,
+ int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13,
+ int8_t __c14, int8_t __c15) {
+ return (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5,
+ __c6, __c7, __c8, __c9, __c10, __c11,
+ __c12, __c13, __c14, __c15};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
+ int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) {
+ return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,
+ int32_t __c1,
+ int32_t __c2,
+ int32_t __c3) {
+ return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,
+ float __c1,
+ float __c2,
+ float __c3) {
+ return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,
+ int64_t __c1) {
+ return (v128_t)(__i64x2){__c0, __c1};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0,
+ double __c1) {
+ return (v128_t)(__f64x2){__c0, __c1};
+}
+
+#define wasm_i8x16_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, __c8, \
+ __c9, __c10, __c11, __c12, __c13, __c14, __c15) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ __REQUIRE_CONSTANT(__c2); \
+ __REQUIRE_CONSTANT(__c3); \
+ __REQUIRE_CONSTANT(__c4); \
+ __REQUIRE_CONSTANT(__c5); \
+ __REQUIRE_CONSTANT(__c6); \
+ __REQUIRE_CONSTANT(__c7); \
+ __REQUIRE_CONSTANT(__c8); \
+ __REQUIRE_CONSTANT(__c9); \
+ __REQUIRE_CONSTANT(__c10); \
+ __REQUIRE_CONSTANT(__c11); \
+ __REQUIRE_CONSTANT(__c12); \
+ __REQUIRE_CONSTANT(__c13); \
+ __REQUIRE_CONSTANT(__c14); \
+ __REQUIRE_CONSTANT(__c15); \
+ (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, \
+ __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15}; \
+ })
+
+#define wasm_i16x8_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ __REQUIRE_CONSTANT(__c2); \
+ __REQUIRE_CONSTANT(__c3); \
+ __REQUIRE_CONSTANT(__c4); \
+ __REQUIRE_CONSTANT(__c5); \
+ __REQUIRE_CONSTANT(__c6); \
+ __REQUIRE_CONSTANT(__c7); \
+ (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7}; \
+ })
+
+#define wasm_i32x4_const(__c0, __c1, __c2, __c3) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ __REQUIRE_CONSTANT(__c2); \
+ __REQUIRE_CONSTANT(__c3); \
+ (v128_t)(__i32x4){__c0, __c1, __c2, __c3}; \
+ })
+
+#define wasm_f32x4_const(__c0, __c1, __c2, __c3) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ __REQUIRE_CONSTANT(__c2); \
+ __REQUIRE_CONSTANT(__c3); \
+ (v128_t)(__f32x4){__c0, __c1, __c2, __c3}; \
+ })
+
+#define wasm_i64x2_const(__c0, __c1) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ (v128_t)(__i64x2){__c0, __c1}; \
+ })
+
+#define wasm_f64x2_const(__c0, __c1) \
+ __extension__({ \
+ __REQUIRE_CONSTANT(__c0); \
+ __REQUIRE_CONSTANT(__c1); \
+ (v128_t)(__f64x2){__c0, __c1}; \
+ })
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {
+ return (v128_t)(__i8x16){__a, __a, __a, __a, __a, __a, __a, __a,
+ __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+#define wasm_i8x16_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_s_i8x16((__i8x16)(__a), __i))
+
+#define wasm_u8x16_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_u_i8x16((__i8x16)(__a), __i))
+
+#define wasm_i8x16_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_i8x16((__i8x16)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {
+ return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+#define wasm_i16x8_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_s_i16x8((__i16x8)(__a), __i))
+
+#define wasm_u16x8_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_u_i16x8((__i16x8)(__a), __i))
+
+#define wasm_i16x8_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_i16x8((__i16x8)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) {
+ return (v128_t)(__i32x4){__a, __a, __a, __a};
+}
+
+#define wasm_i32x4_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_i32x4((__i32x4)(__a), __i))
+
+#define wasm_i32x4_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_i32x4((__i32x4)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) {
+ return (v128_t)(__i64x2){__a, __a};
+}
+
+#define wasm_i64x2_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_i64x2((__i64x2)(__a), __i))
+
+#define wasm_i64x2_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_i64x2((__i64x2)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) {
+ return (v128_t)(__f32x4){__a, __a, __a, __a};
+}
+
+#define wasm_f32x4_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_f32x4((__f32x4)(__a), __i))
+
+#define wasm_f32x4_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_f32x4((__f32x4)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) {
+ return (v128_t)(__f64x2){__a, __a};
+}
+
+#define wasm_f64x2_extract_lane(__a, __i) \
+ (__builtin_wasm_extract_lane_f64x2((__f64x2)(__a), __i))
+
+#define wasm_f64x2_replace_lane(__a, __i, __b) \
+ ((v128_t)__builtin_wasm_replace_lane_f64x2((__f64x2)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a == (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a != (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a < (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a < (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a > (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a > (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a <= (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a <= (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__s8x16)__a >= (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a >= (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a == (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a != (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a < (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a < (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a > (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a > (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a <= (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a <= (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a >= (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a >= (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a == (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a != (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a < (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a < (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a > (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a > (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a <= (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a <= (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i32x4)__a >= (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a >= (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a == (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a != (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a < (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a > (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a <= (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a >= (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a == (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a != (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a < (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a > (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a <= (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a >= (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_not(v128_t __a) {
+ return ~__a;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_and(v128_t __a,
+ v128_t __b) {
+ return __a & __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_or(v128_t __a,
+ v128_t __b) {
+ return __a | __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_xor(v128_t __a,
+ v128_t __b) {
+ return __a ^ __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a,
+ v128_t __b) {
+ return __a & ~__b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a,
+ v128_t __b,
+ v128_t __mask) {
+ return (v128_t)__builtin_wasm_bitselect((__i32x4)__a, (__i32x4)__b,
+ (__i32x4)__mask);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) {
+ return (v128_t)(-(__u8x16)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_any_true(v128_t __a) {
+ return __builtin_wasm_any_true_i8x16((__i8x16)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {
+ return __builtin_wasm_all_true_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i8x16)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__s8x16)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__u8x16)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a + (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_add_saturate_s_i8x16((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u8x16)__a - (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_saturate_s_i8x16((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_s_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_s_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_avgr_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) {
+ return (v128_t)(-(__u16x8)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_any_true(v128_t __a) {
+ return __builtin_wasm_any_true_i16x8((__i16x8)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {
+ return __builtin_wasm_all_true_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i16x8)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i16x8)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__u16x8)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a + (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_add_saturate_s_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i16x8)__a - (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_saturate_s_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u16x8)__a * (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_s_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_s_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_avgr_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_i32x4((__i32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) {
+ return (v128_t)(-(__u32x4)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_any_true(v128_t __a) {
+ return __builtin_wasm_any_true_i32x4((__i32x4)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {
+ return __builtin_wasm_all_true_i32x4((__i32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i32x4)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i32x4)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__u32x4)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a + (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a - (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u32x4)__a * (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_s_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_u_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_s_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_u_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
+ return (v128_t)(-(__u64x2)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_any_true(v128_t __a) {
+ return __builtin_wasm_any_true_i64x2((__i64x2)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {
+ return __builtin_wasm_all_true_i64x2((__i64x2)__a);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i64x2)__a << (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__i64x2)__a >> (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,
+ int32_t __b) {
+ return (v128_t)((__u64x2)__a >> (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u64x2)__a + (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u64x2)__a - (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__u64x2)__a * (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_neg(v128_t __a) {
+ return (v128_t)(-(__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) {
+ return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfma(v128_t __a,
+ v128_t __b,
+ v128_t __c) {
+ return (v128_t)__builtin_wasm_qfma_f32x4((__f32x4)__a, (__f32x4)__b,
+ (__f32x4)__c);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfms(v128_t __a,
+ v128_t __b,
+ v128_t __c) {
+ return (v128_t)__builtin_wasm_qfms_f32x4((__f32x4)__a, (__f32x4)__b,
+ (__f32x4)__c);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a + (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a - (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a * (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_div(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f32x4)__a / (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_f64x2((__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_neg(v128_t __a) {
+ return (v128_t)(-(__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) {
+ return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfma(v128_t __a,
+ v128_t __b,
+ v128_t __c) {
+ return (v128_t)__builtin_wasm_qfma_f64x2((__f64x2)__a, (__f64x2)__b,
+ (__f64x2)__c);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfms(v128_t __a,
+ v128_t __b,
+ v128_t __c) {
+ return (v128_t)__builtin_wasm_qfms_f64x2((__f64x2)__a, (__f64x2)__b,
+ (__f64x2)__c);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a + (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sub(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a - (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_mul(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a * (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_div(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__f64x2)__a / (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_min_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_max_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
+ return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
+ return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_convert_i32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector((__i32x4)__a, __f32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_convert_u32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4);
+}
+
+#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+ __c7, __c8, __c9, __c10, __c11, __c12, __c13, \
+ __c14, __c15) \
+ ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ (__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5, \
+ __c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))
+
+#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+ __c7) \
+ ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ (__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2, \
+ (__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2, \
+ (__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2, \
+ (__c7)*2 + 1))
+
+#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
+ ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ (__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2, \
+ (__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3, \
+ (__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4, \
+ (__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))
+
+#define wasm_v64x2_shuffle(__a, __b, __c0, __c1) \
+ ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ (__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2, \
+ (__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7, \
+ (__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4, \
+ (__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v8x16_swizzle(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_swizzle_v8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_narrow_s_i8x16_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_narrow_s_i16x8_i32x4((__i32x4)__a,
+ (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
+ (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_low_i8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_low_s_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_high_i8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_high_s_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_low_u8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_low_u_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_high_u8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_high_u_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_low_i16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_low_s_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_high_i16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_high_s_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_low_u16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_low_u_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_high_u16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_widen_high_u_i32x4_i16x8((__i16x8)__a);
+}
+
+// Undefine helper macros
+#undef __DEFAULT_FN_ATTRS
+
+#endif // __WASM_SIMD128_H
diff --git a/clang/lib/Headers/x86intrin.h b/clang/lib/Headers/x86intrin.h
index a8b36622d410..768d0e56ab05 100644
--- a/clang/lib/Headers/x86intrin.h
+++ b/clang/lib/Headers/x86intrin.h
@@ -14,39 +14,48 @@
#include <immintrin.h>
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__3dNOW__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__3dNOW__)
#include <mm3dnow.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PRFCHW__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__PRFCHW__)
#include <prfchwintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE4A__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SSE4A__)
#include <ammintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA4__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__FMA4__)
#include <fma4intrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XOP__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__XOP__)
#include <xopintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__TBM__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__TBM__)
#include <tbmintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LWP__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__LWP__)
#include <lwpintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MWAITX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__MWAITX__)
#include <mwaitxintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLZERO__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CLZERO__)
#include <clzerointrin.h>
#endif
diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
index 0e61eab44aeb..f4686691c7ed 100644
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -2181,7 +2181,7 @@ void _mm_sfence(void);
/// 3: Bits [63:48] are copied to the destination.
/// \returns A 16-bit integer containing the extracted 16 bits of packed data.
#define _mm_extract_pi16(a, n) \
- (int)__builtin_ia32_vec_ext_v4hi((__m64)a, (int)n)
+ (int)__builtin_ia32_vec_ext_v4hi((__v4hi)a, (int)n)
/// Copies data from the 64-bit vector of [4 x i16] to the destination,
/// and inserts the lower 16-bits of an integer operand at the 16-bit offset
@@ -2212,7 +2212,7 @@ void _mm_sfence(void);
/// \returns A 64-bit integer vector containing the copied packed data from the
/// operands.
#define _mm_insert_pi16(a, d, n) \
- (__m64)__builtin_ia32_vec_set_v4hi((__m64)a, (int)d, (int)n)
+ (__m64)__builtin_ia32_vec_set_v4hi((__v4hi)a, (int)d, (int)n)
/// Compares each of the corresponding packed 16-bit integer values of
/// the 64-bit integer vectors, and writes the greater value to the
@@ -2931,31 +2931,31 @@ _mm_movemask_ps(__m128 __a)
#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
-#define _MM_EXCEPT_INVALID (0x0001)
-#define _MM_EXCEPT_DENORM (0x0002)
-#define _MM_EXCEPT_DIV_ZERO (0x0004)
-#define _MM_EXCEPT_OVERFLOW (0x0008)
-#define _MM_EXCEPT_UNDERFLOW (0x0010)
-#define _MM_EXCEPT_INEXACT (0x0020)
-#define _MM_EXCEPT_MASK (0x003f)
-
-#define _MM_MASK_INVALID (0x0080)
-#define _MM_MASK_DENORM (0x0100)
-#define _MM_MASK_DIV_ZERO (0x0200)
-#define _MM_MASK_OVERFLOW (0x0400)
-#define _MM_MASK_UNDERFLOW (0x0800)
-#define _MM_MASK_INEXACT (0x1000)
-#define _MM_MASK_MASK (0x1f80)
-
-#define _MM_ROUND_NEAREST (0x0000)
-#define _MM_ROUND_DOWN (0x2000)
-#define _MM_ROUND_UP (0x4000)
-#define _MM_ROUND_TOWARD_ZERO (0x6000)
-#define _MM_ROUND_MASK (0x6000)
-
-#define _MM_FLUSH_ZERO_MASK (0x8000)
-#define _MM_FLUSH_ZERO_ON (0x8000)
-#define _MM_FLUSH_ZERO_OFF (0x0000)
+#define _MM_EXCEPT_INVALID (0x0001U)
+#define _MM_EXCEPT_DENORM (0x0002U)
+#define _MM_EXCEPT_DIV_ZERO (0x0004U)
+#define _MM_EXCEPT_OVERFLOW (0x0008U)
+#define _MM_EXCEPT_UNDERFLOW (0x0010U)
+#define _MM_EXCEPT_INEXACT (0x0020U)
+#define _MM_EXCEPT_MASK (0x003fU)
+
+#define _MM_MASK_INVALID (0x0080U)
+#define _MM_MASK_DENORM (0x0100U)
+#define _MM_MASK_DIV_ZERO (0x0200U)
+#define _MM_MASK_OVERFLOW (0x0400U)
+#define _MM_MASK_UNDERFLOW (0x0800U)
+#define _MM_MASK_INEXACT (0x1000U)
+#define _MM_MASK_MASK (0x1f80U)
+
+#define _MM_ROUND_NEAREST (0x0000U)
+#define _MM_ROUND_DOWN (0x2000U)
+#define _MM_ROUND_UP (0x4000U)
+#define _MM_ROUND_TOWARD_ZERO (0x6000U)
+#define _MM_ROUND_MASK (0x6000U)
+
+#define _MM_FLUSH_ZERO_MASK (0x8000U)
+#define _MM_FLUSH_ZERO_ON (0x8000U)
+#define _MM_FLUSH_ZERO_OFF (0x0000U)
#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
diff --git a/clang/lib/Index/CommentToXML.cpp b/clang/lib/Index/CommentToXML.cpp
index ce6f9e2b13bd..1cbd14cd326c 100644
--- a/clang/lib/Index/CommentToXML.cpp
+++ b/clang/lib/Index/CommentToXML.cpp
@@ -11,6 +11,8 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Comment.h"
#include "clang/AST/CommentVisitor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "clang/Index/USRGeneration.h"
#include "llvm/ADT/StringExtras.h"
diff --git a/clang/lib/Index/FileIndexRecord.cpp b/clang/lib/Index/FileIndexRecord.cpp
index c9dcb0f5377d..753bdf2ce21d 100644
--- a/clang/lib/Index/FileIndexRecord.cpp
+++ b/clang/lib/Index/FileIndexRecord.cpp
@@ -10,6 +10,7 @@
#include "FileIndexRecord.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Path.h"
diff --git a/clang/lib/Index/IndexBody.cpp b/clang/lib/Index/IndexBody.cpp
index 07a94f30c883..01cf559d7057 100644
--- a/clang/lib/Index/IndexBody.cpp
+++ b/clang/lib/Index/IndexBody.cpp
@@ -414,7 +414,7 @@ public:
auto visitSyntacticDesignatedInitExpr = [&](DesignatedInitExpr *E) -> bool {
for (DesignatedInitExpr::Designator &D : llvm::reverse(E->designators())) {
- if (D.isFieldDesignator())
+ if (D.isFieldDesignator() && D.getField())
return IndexCtx.handleReference(D.getField(), D.getFieldLoc(),
Parent, ParentDC, SymbolRoleSet(),
{}, E);
diff --git a/clang/lib/Index/IndexDecl.cpp b/clang/lib/Index/IndexDecl.cpp
index c59b1372e399..2ba323e63575 100644
--- a/clang/lib/Index/IndexDecl.cpp
+++ b/clang/lib/Index/IndexDecl.cpp
@@ -80,7 +80,7 @@ public:
!MD->isSynthesizedAccessorStub();
}
-
+
void handleDeclarator(const DeclaratorDecl *D,
const NamedDecl *Parent = nullptr,
bool isIBType = false) {
@@ -90,6 +90,12 @@ public:
Parent->getLexicalDeclContext(),
/*isBase=*/false, isIBType);
IndexCtx.indexNestedNameSpecifierLoc(D->getQualifierLoc(), Parent);
+ auto IndexDefaultParmeterArgument = [&](const ParmVarDecl *Parm,
+ const NamedDecl *Parent) {
+ if (Parm->hasDefaultArg() && !Parm->hasUninstantiatedDefaultArg() &&
+ !Parm->hasUnparsedDefaultArg())
+ IndexCtx.indexBody(Parm->getDefaultArg(), Parent);
+ };
if (IndexCtx.shouldIndexFunctionLocalSymbols()) {
if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
auto *DC = Parm->getDeclContext();
@@ -106,7 +112,8 @@ public:
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (IndexCtx.shouldIndexParametersInDeclarations() ||
FD->isThisDeclarationADefinition()) {
- for (auto PI : FD->parameters()) {
+ for (const auto *PI : FD->parameters()) {
+ IndexDefaultParmeterArgument(PI, D);
IndexCtx.handleDecl(PI);
}
}
@@ -116,9 +123,7 @@ public:
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->isThisDeclarationADefinition()) {
for (const auto *PV : FD->parameters()) {
- if (PV->hasDefaultArg() && !PV->hasUninstantiatedDefaultArg() &&
- !PV->hasUnparsedDefaultArg())
- IndexCtx.indexBody(PV->getDefaultArg(), D);
+ IndexDefaultParmeterArgument(PV, D);
}
}
}
@@ -760,6 +765,9 @@ bool IndexingContext::indexTopLevelDecl(const Decl *D) {
if (isa<ObjCMethodDecl>(D))
return true; // Wait for the objc container.
+ if (IndexOpts.ShouldTraverseDecl && !IndexOpts.ShouldTraverseDecl(D))
+ return true; // skip
+
return indexDecl(D);
}
diff --git a/clang/lib/Index/IndexSymbol.cpp b/clang/lib/Index/IndexSymbol.cpp
index ae9134bf1182..0d2e557cdd36 100644
--- a/clang/lib/Index/IndexSymbol.cpp
+++ b/clang/lib/Index/IndexSymbol.cpp
@@ -357,6 +357,15 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
case Decl::VarTemplate:
llvm_unreachable("variables handled before");
break;
+ case Decl::TemplateTypeParm:
+ Info.Kind = SymbolKind::TemplateTypeParm;
+ break;
+ case Decl::TemplateTemplateParm:
+ Info.Kind = SymbolKind::TemplateTemplateParm;
+ break;
+ case Decl::NonTypeTemplateParm:
+ Info.Kind = SymbolKind::NonTypeTemplateParm;
+ break;
// Other decls get the 'unknown' kind.
default:
break;
@@ -517,6 +526,9 @@ StringRef index::getSymbolKindString(SymbolKind K) {
case SymbolKind::ConversionFunction: return "conversion-func";
case SymbolKind::Parameter: return "param";
case SymbolKind::Using: return "using";
+ case SymbolKind::TemplateTypeParm: return "template-type-param";
+ case SymbolKind::TemplateTemplateParm: return "template-template-param";
+ case SymbolKind::NonTypeTemplateParm: return "non-type-template-param";
}
llvm_unreachable("invalid symbol kind");
}
diff --git a/clang/lib/Index/IndexTypeSourceInfo.cpp b/clang/lib/Index/IndexTypeSourceInfo.cpp
index 959d5f1197fe..b9fc90040cfc 100644
--- a/clang/lib/Index/IndexTypeSourceInfo.cpp
+++ b/clang/lib/Index/IndexTypeSourceInfo.cpp
@@ -170,6 +170,11 @@ public:
return true;
}
+ bool VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ return IndexCtx.handleReference(TL.getDecl(), TL.getNameLoc(), Parent,
+ ParentDC, SymbolRoleSet(), Relations);
+ }
+
bool VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
const DependentNameType *DNT = TL.getTypePtr();
const NestedNameSpecifier *NNS = DNT->getQualifier();
diff --git a/clang/lib/Index/IndexingAction.cpp b/clang/lib/Index/IndexingAction.cpp
index 4f402135672c..e698c07133a9 100644
--- a/clang/lib/Index/IndexingAction.cpp
+++ b/clang/lib/Index/IndexingAction.cpp
@@ -131,6 +131,21 @@ std::unique_ptr<ASTConsumer> index::createIndexingASTConsumer(
ShouldSkipFunctionBody);
}
+std::unique_ptr<ASTConsumer> clang::index::createIndexingASTConsumer(
+ std::shared_ptr<IndexDataConsumer> DataConsumer,
+ const IndexingOptions &Opts, std::shared_ptr<Preprocessor> PP) {
+ std::function<bool(const Decl *)> ShouldSkipFunctionBody = [](const Decl *) {
+ return false;
+ };
+ if (Opts.ShouldTraverseDecl)
+ ShouldSkipFunctionBody =
+ [ShouldTraverseDecl(Opts.ShouldTraverseDecl)](const Decl *D) {
+ return !ShouldTraverseDecl(D);
+ };
+ return createIndexingASTConsumer(std::move(DataConsumer), Opts, std::move(PP),
+ std::move(ShouldSkipFunctionBody));
+}
+
std::unique_ptr<FrontendAction>
index::createIndexingAction(std::shared_ptr<IndexDataConsumer> DataConsumer,
const IndexingOptions &Opts) {
diff --git a/clang/lib/Index/IndexingContext.cpp b/clang/lib/Index/IndexingContext.cpp
index a7c37e8528d1..784a6008575b 100644
--- a/clang/lib/Index/IndexingContext.cpp
+++ b/clang/lib/Index/IndexingContext.cpp
@@ -169,6 +169,10 @@ bool IndexingContext::isTemplateImplicitInstantiation(const Decl *D) {
}
switch (TKind) {
case TSK_Undeclared:
+ // Instantiation maybe not happen yet when we see a SpecializationDecl,
+ // e.g. when the type doesn't need to be complete, we still treat it as an
+ // instantiation as we'd like to keep the canonicalized result consistent.
+ return isa<ClassTemplateSpecializationDecl>(D);
case TSK_ExplicitSpecialization:
return false;
case TSK_ImplicitInstantiation:
@@ -206,7 +210,12 @@ getDeclContextForTemplateInstationPattern(const Decl *D) {
static const Decl *adjustTemplateImplicitInstantiation(const Decl *D) {
if (const ClassTemplateSpecializationDecl *
SD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
- return SD->getTemplateInstantiationPattern();
+ const auto *Template = SD->getTemplateInstantiationPattern();
+ if (Template)
+ return Template;
+ // Fallback to primary template if no instantiation is available yet (e.g.
+ // the type doesn't need to be complete).
+ return SD->getSpecializedTemplate()->getTemplatedDecl();
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
return FD->getTemplateInstantiationPattern();
} else if (auto *VD = dyn_cast<VarDecl>(D)) {
diff --git a/clang/lib/Index/USRGeneration.cpp b/clang/lib/Index/USRGeneration.cpp
index 394daf94c4b2..0d1e81219823 100644
--- a/clang/lib/Index/USRGeneration.cpp
+++ b/clang/lib/Index/USRGeneration.cpp
@@ -11,6 +11,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -382,6 +383,14 @@ void USRGenerator::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
Out << "@NA@" << D->getName();
}
+static const ObjCCategoryDecl *getCategoryContext(const NamedDecl *D) {
+ if (auto *CD = dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
+ return CD;
+ if (auto *ICD = dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
+ return ICD->getCategoryDecl();
+ return nullptr;
+}
+
void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
const DeclContext *container = D->getDeclContext();
if (const ObjCProtocolDecl *pd = dyn_cast<ObjCProtocolDecl>(container)) {
@@ -395,14 +404,6 @@ void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
IgnoreResults = true;
return;
}
- auto getCategoryContext = [](const ObjCMethodDecl *D) ->
- const ObjCCategoryDecl * {
- if (auto *CD = dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
- return CD;
- if (auto *ICD = dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
- return ICD->getCategoryDecl();
- return nullptr;
- };
auto *CD = getCategoryContext(D);
VisitObjCContainerDecl(ID, CD);
}
@@ -475,7 +476,7 @@ void USRGenerator::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
// The USR for a property declared in a class extension or category is based
// on the ObjCInterfaceDecl, not the ObjCCategoryDecl.
if (const ObjCInterfaceDecl *ID = Context->getObjContainingInterface(D))
- Visit(ID);
+ VisitObjCContainerDecl(ID, getCategoryContext(D));
else
Visit(cast<Decl>(D->getDeclContext()));
GenObjCProperty(D->getName(), D->isClassProperty());
@@ -752,6 +753,7 @@ void USRGenerator::VisitType(QualType T) {
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
+ case BuiltinType::BFloat16:
IgnoreResults = true;
return;
case BuiltinType::ObjCId:
diff --git a/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp b/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
index 029bfe1cd600..cdb4a79fa11a 100644
--- a/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
+++ b/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
@@ -18,6 +18,7 @@
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/MemoryBuffer.h"
diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp
index f0c5900c8ce4..1df28cc07209 100644
--- a/clang/lib/Lex/HeaderSearch.cpp
+++ b/clang/lib/Lex/HeaderSearch.cpp
@@ -133,7 +133,7 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
void HeaderSearch::getHeaderMapFileNames(
SmallVectorImpl<std::string> &Names) const {
for (auto &HM : HeaderMaps)
- Names.push_back(HM.first->getName());
+ Names.push_back(std::string(HM.first->getName()));
}
std::string HeaderSearch::getCachedModuleFileName(Module *Module) {
@@ -145,7 +145,7 @@ std::string HeaderSearch::getCachedModuleFileName(Module *Module) {
std::string HeaderSearch::getPrebuiltModuleFileName(StringRef ModuleName,
bool FileMapOnly) {
// First check the module name to pcm file map.
- auto i (HSOpts->PrebuiltModuleFiles.find(ModuleName));
+ auto i(HSOpts->PrebuiltModuleFiles.find(ModuleName));
if (i != HSOpts->PrebuiltModuleFiles.end())
return i->second;
@@ -159,7 +159,7 @@ std::string HeaderSearch::getPrebuiltModuleFileName(StringRef ModuleName,
llvm::sys::fs::make_absolute(Result);
llvm::sys::path::append(Result, ModuleName + ".pcm");
if (getFileMgr().getFile(Result.str()))
- return Result.str().str();
+ return std::string(Result);
}
return {};
}
@@ -184,7 +184,8 @@ std::string HeaderSearch::getCachedModuleFileName(StringRef ModuleName,
//
// To avoid false-negatives, we form as canonical a path as we can, and map
// to lower-case in case we're on a case-insensitive file system.
- std::string Parent = llvm::sys::path::parent_path(ModuleMapPath);
+ std::string Parent =
+ std::string(llvm::sys::path::parent_path(ModuleMapPath));
if (Parent.empty())
Parent = ".";
auto Dir = FileMgr.getDirectory(Parent);
@@ -468,7 +469,7 @@ getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
// If this is a framework directory, then we're a subframework of this
// framework.
if (llvm::sys::path::extension(DirName) == ".framework") {
- SubmodulePath.push_back(llvm::sys::path::stem(DirName));
+ SubmodulePath.push_back(std::string(llvm::sys::path::stem(DirName)));
TopFrameworkDir = *Dir;
}
} while (true);
@@ -1218,9 +1219,11 @@ HeaderSearch::getExistingFileInfo(const FileEntry *FE,
}
bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) {
- // Check if we've ever seen this file as a header.
+ // Check if we've entered this file and found an include guard or #pragma
+ // once. Note that we dor't check for #import, because that's not a property
+ // of the file itself.
if (auto *HFI = getExistingFileInfo(File))
- return HFI->isPragmaOnce || HFI->isImport || HFI->ControllingMacro ||
+ return HFI->isPragmaOnce || HFI->ControllingMacro ||
HFI->ControllingMacroID;
return false;
}
@@ -1273,14 +1276,12 @@ bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
//
// It's common that libc++ and system modules will both define such
// submodules. Make sure cached results for a builtin header won't
- // prevent other builtin modules to potentially enter the builtin header.
- // Note that builtins are header guarded and the decision to actually
- // enter them is postponed to the controlling macros logic below.
+ // prevent other builtin modules from potentially entering the builtin
+ // header. Note that builtins are header guarded and the decision to
+ // actually enter them is postponed to the controlling macros logic below.
bool TryEnterHdr = false;
if (FileInfo.isCompilingModuleHeader && FileInfo.isModuleHeader)
- TryEnterHdr = File->getDir() == ModMap.getBuiltinDir() &&
- ModuleMap::isBuiltinHeader(
- llvm::sys::path::filename(File->getName()));
+ TryEnterHdr = ModMap.isBuiltinHeader(File);
// Textual headers can be #imported from different modules. Since ObjC
// headers find in the wild might rely only on #import and do not contain
@@ -1398,25 +1399,46 @@ HeaderSearch::findModuleForHeader(const FileEntry *File,
return ModMap.findModuleForHeader(File, AllowTextual);
}
+ArrayRef<ModuleMap::KnownHeader>
+HeaderSearch::findAllModulesForHeader(const FileEntry *File) const {
+ if (ExternalSource) {
+ // Make sure the external source has handled header info about this file,
+ // which includes whether the file is part of a module.
+ (void)getExistingFileInfo(File);
+ }
+ return ModMap.findAllModulesForHeader(File);
+}
+
static bool suggestModule(HeaderSearch &HS, const FileEntry *File,
Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule) {
ModuleMap::KnownHeader Module =
HS.findModuleForHeader(File, /*AllowTextual*/true);
- if (SuggestedModule)
- *SuggestedModule = (Module.getRole() & ModuleMap::TextualHeader)
- ? ModuleMap::KnownHeader()
- : Module;
// If this module specifies [no_undeclared_includes], we cannot find any
// file that's in a non-dependency module.
if (RequestingModule && Module && RequestingModule->NoUndeclaredIncludes) {
- HS.getModuleMap().resolveUses(RequestingModule, /*Complain*/false);
+ HS.getModuleMap().resolveUses(RequestingModule, /*Complain*/ false);
if (!RequestingModule->directlyUses(Module.getModule())) {
+ // Builtin headers are a special case. Multiple modules can use the same
+ // builtin as a modular header (see also comment in
+ // ShouldEnterIncludeFile()), so the builtin header may have been
+ // "claimed" by an unrelated module. This shouldn't prevent us from
+ // including the builtin header textually in this module.
+ if (HS.getModuleMap().isBuiltinHeader(File)) {
+ if (SuggestedModule)
+ *SuggestedModule = ModuleMap::KnownHeader();
+ return true;
+ }
return false;
}
}
+ if (SuggestedModule)
+ *SuggestedModule = (Module.getRole() & ModuleMap::TextualHeader)
+ ? ModuleMap::KnownHeader()
+ : Module;
+
return true;
}
@@ -1567,6 +1589,16 @@ HeaderSearch::lookupModuleMapFile(const DirectoryEntry *Dir, bool IsFramework) {
llvm::sys::path::append(ModuleMapFileName, "module.map");
if (auto F = FileMgr.getFile(ModuleMapFileName))
return *F;
+
+ // For frameworks, allow to have a private module map with a preferred
+ // spelling when a public module map is absent.
+ if (IsFramework) {
+ ModuleMapFileName = Dir->getName();
+ llvm::sys::path::append(ModuleMapFileName, "Modules",
+ "module.private.modulemap");
+ if (auto F = FileMgr.getFile(ModuleMapFileName))
+ return *F;
+ }
return nullptr;
}
diff --git a/clang/lib/Lex/Lexer.cpp b/clang/lib/Lex/Lexer.cpp
index 648bda270578..a559ca3eac2b 100644
--- a/clang/lib/Lex/Lexer.cpp
+++ b/clang/lib/Lex/Lexer.cpp
@@ -29,6 +29,7 @@
#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/StringRef.h"
@@ -253,7 +254,7 @@ template <typename T> static void StringifyImpl(T &Str, char Quote) {
}
std::string Lexer::Stringify(StringRef Str, bool Charify) {
- std::string Result = Str;
+ std::string Result = std::string(Str);
char Quote = Charify ? '\'' : '"';
StringifyImpl(Result, Quote);
return Result;
@@ -1861,7 +1862,7 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize,
getLangOpts());
if (!isIdentifierBody(Next)) {
- // End of suffix. Check whether this is on the whitelist.
+ // End of suffix. Check whether this is on the allowed list.
const StringRef CompleteSuffix(Buffer, Chars);
IsUDSuffix = StringLiteralParser::isValidUDSuffix(getLangOpts(),
CompleteSuffix);
@@ -2092,7 +2093,8 @@ void Lexer::codeCompleteIncludedFile(const char *PathStart,
bool IsAngled) {
// Completion only applies to the filename, after the last slash.
StringRef PartialPath(PathStart, CompletionPoint - PathStart);
- auto Slash = PartialPath.find_last_of(LangOpts.MSVCCompat ? "/\\" : "/");
+ llvm::StringRef SlashChars = LangOpts.MSVCCompat ? "/\\" : "/";
+ auto Slash = PartialPath.find_last_of(SlashChars);
StringRef Dir =
(Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash);
const char *StartOfFilename =
@@ -2100,7 +2102,8 @@ void Lexer::codeCompleteIncludedFile(const char *PathStart,
// Code completion filter range is the filename only, up to completion point.
PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get(
StringRef(StartOfFilename, CompletionPoint - StartOfFilename)));
- // We should replace the characters up to the closing quote, if any.
+ // We should replace the characters up to the closing quote or closest slash,
+ // if any.
while (CompletionPoint < BufferEnd) {
char Next = *(CompletionPoint + 1);
if (Next == 0 || Next == '\r' || Next == '\n')
@@ -2108,7 +2111,10 @@ void Lexer::codeCompleteIncludedFile(const char *PathStart,
++CompletionPoint;
if (Next == (IsAngled ? '>' : '"'))
break;
+ if (llvm::is_contained(SlashChars, Next))
+ break;
}
+
PP->setCodeCompletionTokenRange(
FileLoc.getLocWithOffset(StartOfFilename - BufferStart),
FileLoc.getLocWithOffset(CompletionPoint - BufferStart));
@@ -2552,8 +2558,8 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
'/', '/', '/', '/', '/', '/', '/', '/',
'/', '/', '/', '/', '/', '/', '/', '/'
};
- while (CurPtr+16 <= BufferEnd &&
- !vec_any_eq(*(const vector unsigned char*)CurPtr, Slashes))
+ while (CurPtr + 16 <= BufferEnd &&
+ !vec_any_eq(*(const __vector unsigned char *)CurPtr, Slashes))
CurPtr += 16;
#else
// Scan for '/' quickly. Many block comments are very large.
@@ -3694,7 +3700,7 @@ LexNextToken:
} else if (Char == '=') {
char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
if (After == '>') {
- if (getLangOpts().CPlusPlus2a) {
+ if (getLangOpts().CPlusPlus20) {
if (!isLexingRawMode())
Diag(BufferPtr, diag::warn_cxx17_compat_spaceship);
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
@@ -3705,7 +3711,7 @@ LexNextToken:
// Suggest adding a space between the '<=' and the '>' to avoid a
// change in semantics if this turns up in C++ <=17 mode.
if (getLangOpts().CPlusPlus && !isLexingRawMode()) {
- Diag(BufferPtr, diag::warn_cxx2a_compat_spaceship)
+ Diag(BufferPtr, diag::warn_cxx20_compat_spaceship)
<< FixItHint::CreateInsertion(
getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " ");
}
diff --git a/clang/lib/Lex/LiteralSupport.cpp b/clang/lib/Lex/LiteralSupport.cpp
index 9a852141c6ee..eb16bc8c7da2 100644
--- a/clang/lib/Lex/LiteralSupport.cpp
+++ b/clang/lib/Lex/LiteralSupport.cpp
@@ -25,6 +25,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
@@ -524,8 +525,12 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
///
NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
SourceLocation TokLoc,
- Preprocessor &PP)
- : PP(PP), ThisTokBegin(TokSpelling.begin()), ThisTokEnd(TokSpelling.end()) {
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const TargetInfo &Target,
+ DiagnosticsEngine &Diags)
+ : SM(SM), LangOpts(LangOpts), Diags(Diags),
+ ThisTokBegin(TokSpelling.begin()), ThisTokEnd(TokSpelling.end()) {
// This routine assumes that the range begin/end matches the regex for integer
// and FP constants (specifically, the 'pp-number' regex), and assumes that
@@ -571,7 +576,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
checkSeparator(TokLoc, s, CSK_AfterDigits);
// Initial scan to lookahead for fixed point suffix.
- if (PP.getLangOpts().FixedPoint) {
+ if (LangOpts.FixedPoint) {
for (const char *c = s; c != ThisTokEnd; ++c) {
if (*c == 'r' || *c == 'k' || *c == 'R' || *c == 'K') {
saw_fixed_point_suffix = true;
@@ -582,6 +587,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// Parse the suffix. At this point we can classify whether we have an FP or
// integer constant.
+ bool isFixedPointConstant = isFixedPointLiteral();
bool isFPConstant = isFloatingLiteral();
// Loop over all of the characters of the suffix. If we see something bad,
@@ -590,14 +596,16 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
switch (*s) {
case 'R':
case 'r':
- if (!PP.getLangOpts().FixedPoint) break;
+ if (!LangOpts.FixedPoint)
+ break;
if (isFract || isAccum) break;
if (!(saw_period || saw_exponent)) break;
isFract = true;
continue;
case 'K':
case 'k':
- if (!PP.getLangOpts().FixedPoint) break;
+ if (!LangOpts.FixedPoint)
+ break;
if (isFract || isAccum) break;
if (!(saw_period || saw_exponent)) break;
isAccum = true;
@@ -605,7 +613,8 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
case 'h': // FP Suffix for "half".
case 'H':
// OpenCL Extension v1.2 s9.5 - h or H suffix for half type.
- if (!(PP.getLangOpts().Half || PP.getLangOpts().FixedPoint)) break;
+ if (!(LangOpts.Half || LangOpts.FixedPoint))
+ break;
if (isIntegerLiteral()) break; // Error for integer constant.
if (isHalf || isFloat || isLong) break; // HH, FH, LH invalid.
isHalf = true;
@@ -619,8 +628,8 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// CUDA host and device may have different _Float16 support, therefore
// allows f16 literals to avoid false alarm.
// ToDo: more precise check for CUDA.
- if ((PP.getTargetInfo().hasFloat16Type() || PP.getLangOpts().CUDA) &&
- s + 2 < ThisTokEnd && s[1] == '1' && s[2] == '6') {
+ if ((Target.hasFloat16Type() || LangOpts.CUDA) && s + 2 < ThisTokEnd &&
+ s[1] == '1' && s[2] == '6') {
s += 2; // success, eat up 2 characters.
isFloat16 = true;
continue;
@@ -655,10 +664,10 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
} else {
isLong = true;
}
- continue; // Success.
+ continue; // Success.
case 'i':
case 'I':
- if (PP.getLangOpts().MicrosoftExt) {
+ if (LangOpts.MicrosoftExt) {
if (isLong || isLongLong || MicrosoftInteger)
break;
@@ -711,7 +720,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
if (s != ThisTokEnd || isImaginary) {
// FIXME: Don't bother expanding UCNs if !tok.hasUCN().
expandUCNs(UDSuffixBuf, StringRef(SuffixBegin, ThisTokEnd - SuffixBegin));
- if (isValidUDSuffix(PP.getLangOpts(), UDSuffixBuf)) {
+ if (isValidUDSuffix(LangOpts, UDSuffixBuf)) {
if (!isImaginary) {
// Any suffix pieces we might have parsed are actually part of the
// ud-suffix.
@@ -734,9 +743,11 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
if (s != ThisTokEnd) {
// Report an error if there are any.
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, SuffixBegin - ThisTokBegin),
- diag::err_invalid_suffix_constant)
- << StringRef(SuffixBegin, ThisTokEnd - SuffixBegin) << isFPConstant;
+ Diags.Report(Lexer::AdvanceToTokenCharacter(
+ TokLoc, SuffixBegin - ThisTokBegin, SM, LangOpts),
+ diag::err_invalid_suffix_constant)
+ << StringRef(SuffixBegin, ThisTokEnd - SuffixBegin)
+ << (isFixedPointConstant ? 2 : isFPConstant);
hadError = true;
}
}
@@ -755,9 +766,11 @@ void NumericLiteralParser::ParseDecimalOrOctalCommon(SourceLocation TokLoc){
// If we have a hex digit other than 'e' (which denotes a FP exponent) then
// the code is using an incorrect base.
if (isHexDigit(*s) && *s != 'e' && *s != 'E' &&
- !isValidUDSuffix(PP.getLangOpts(), StringRef(s, ThisTokEnd - s))) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
- diag::err_invalid_digit) << StringRef(s, 1) << (radix == 8 ? 1 : 0);
+ !isValidUDSuffix(LangOpts, StringRef(s, ThisTokEnd - s))) {
+ Diags.Report(
+ Lexer::AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin, SM, LangOpts),
+ diag::err_invalid_digit)
+ << StringRef(s, 1) << (radix == 8 ? 1 : 0);
hadError = true;
return;
}
@@ -783,8 +796,9 @@ void NumericLiteralParser::ParseDecimalOrOctalCommon(SourceLocation TokLoc){
s = first_non_digit;
} else {
if (!hadError) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
- diag::err_exponent_has_no_digits);
+ Diags.Report(Lexer::AdvanceToTokenCharacter(
+ TokLoc, Exponent - ThisTokBegin, SM, LangOpts),
+ diag::err_exponent_has_no_digits);
hadError = true;
}
return;
@@ -815,7 +829,7 @@ bool NumericLiteralParser::isValidUDSuffix(const LangOptions &LangOpts,
.Cases("h", "min", "s", true)
.Cases("ms", "us", "ns", true)
.Cases("il", "i", "if", true)
- .Cases("d", "y", LangOpts.CPlusPlus2a)
+ .Cases("d", "y", LangOpts.CPlusPlus20)
.Default(false);
}
@@ -830,9 +844,10 @@ void NumericLiteralParser::checkSeparator(SourceLocation TokLoc,
return;
if (isDigitSeparator(*Pos)) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Pos - ThisTokBegin),
- diag::err_digit_separator_not_between_digits)
- << IsAfterDigits;
+ Diags.Report(Lexer::AdvanceToTokenCharacter(TokLoc, Pos - ThisTokBegin, SM,
+ LangOpts),
+ diag::err_digit_separator_not_between_digits)
+ << IsAfterDigits;
hadError = true;
}
}
@@ -870,9 +885,10 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
}
if (!HasSignificandDigits) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
- diag::err_hex_constant_requires)
- << PP.getLangOpts().CPlusPlus << 1;
+ Diags.Report(Lexer::AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin, SM,
+ LangOpts),
+ diag::err_hex_constant_requires)
+ << LangOpts.CPlusPlus << 1;
hadError = true;
return;
}
@@ -888,8 +904,9 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
const char *first_non_digit = SkipDigits(s);
if (!containsDigits(s, first_non_digit)) {
if (!hadError) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
- diag::err_exponent_has_no_digits);
+ Diags.Report(Lexer::AdvanceToTokenCharacter(
+ TokLoc, Exponent - ThisTokBegin, SM, LangOpts),
+ diag::err_exponent_has_no_digits);
hadError = true;
}
return;
@@ -897,16 +914,17 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
checkSeparator(TokLoc, s, CSK_BeforeDigits);
s = first_non_digit;
- if (!PP.getLangOpts().HexFloats)
- PP.Diag(TokLoc, PP.getLangOpts().CPlusPlus
- ? diag::ext_hex_literal_invalid
- : diag::ext_hex_constant_invalid);
- else if (PP.getLangOpts().CPlusPlus17)
- PP.Diag(TokLoc, diag::warn_cxx17_hex_literal);
+ if (!LangOpts.HexFloats)
+ Diags.Report(TokLoc, LangOpts.CPlusPlus
+ ? diag::ext_hex_literal_invalid
+ : diag::ext_hex_constant_invalid);
+ else if (LangOpts.CPlusPlus17)
+ Diags.Report(TokLoc, diag::warn_cxx17_hex_literal);
} else if (saw_period) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin),
- diag::err_hex_constant_requires)
- << PP.getLangOpts().CPlusPlus << 0;
+ Diags.Report(Lexer::AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin, SM,
+ LangOpts),
+ diag::err_hex_constant_requires)
+ << LangOpts.CPlusPlus << 0;
hadError = true;
}
return;
@@ -915,12 +933,10 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// Handle simple binary numbers 0b01010
if ((c1 == 'b' || c1 == 'B') && (s[1] == '0' || s[1] == '1')) {
// 0b101010 is a C++1y / GCC extension.
- PP.Diag(TokLoc,
- PP.getLangOpts().CPlusPlus14
- ? diag::warn_cxx11_compat_binary_literal
- : PP.getLangOpts().CPlusPlus
- ? diag::ext_binary_literal_cxx14
- : diag::ext_binary_literal);
+ Diags.Report(TokLoc, LangOpts.CPlusPlus14
+ ? diag::warn_cxx11_compat_binary_literal
+ : LangOpts.CPlusPlus ? diag::ext_binary_literal_cxx14
+ : diag::ext_binary_literal);
++s;
assert(s < ThisTokEnd && "didn't maximally munch?");
radix = 2;
@@ -929,10 +945,11 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
if (s == ThisTokEnd) {
// Done.
} else if (isHexDigit(*s) &&
- !isValidUDSuffix(PP.getLangOpts(),
- StringRef(s, ThisTokEnd - s))) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
- diag::err_invalid_digit) << StringRef(s, 1) << 2;
+ !isValidUDSuffix(LangOpts, StringRef(s, ThisTokEnd - s))) {
+ Diags.Report(Lexer::AdvanceToTokenCharacter(TokLoc, s - ThisTokBegin, SM,
+ LangOpts),
+ diag::err_invalid_digit)
+ << StringRef(s, 1) << 2;
hadError = true;
}
// Other suffixes will be diagnosed by the caller.
diff --git a/clang/lib/Lex/ModuleMap.cpp b/clang/lib/Lex/ModuleMap.cpp
index fe20a3507036..bcdc5b8062a0 100644
--- a/clang/lib/Lex/ModuleMap.cpp
+++ b/clang/lib/Lex/ModuleMap.cpp
@@ -262,7 +262,7 @@ void ModuleMap::resolveHeader(Module *Mod,
// Record this umbrella header.
setUmbrellaHeader(Mod, File, RelativePathName.str());
} else {
- Module::Header H = {RelativePathName.str(), File};
+ Module::Header H = {std::string(RelativePathName.str()), File};
if (Header.Kind == Module::HK_Excluded)
excludeHeader(Mod, H);
else
@@ -282,7 +282,7 @@ void ModuleMap::resolveHeader(Module *Mod,
// resolved. (Such a module still can't be built though, except from
// preprocessed source.)
if (!Header.Size && !Header.ModTime)
- Mod->markUnavailable();
+ Mod->markUnavailable(/*Unimportable=*/false);
}
}
@@ -305,7 +305,7 @@ bool ModuleMap::resolveAsBuiltinHeader(
return false;
auto Role = headerKindToRole(Header.Kind);
- Module::Header H = {Path.str(), *File};
+ Module::Header H = {std::string(Path.str()), *File};
addHeader(Mod, H, Role);
return true;
}
@@ -387,13 +387,17 @@ bool ModuleMap::isBuiltinHeader(StringRef FileName) {
.Default(false);
}
+bool ModuleMap::isBuiltinHeader(const FileEntry *File) {
+ return File->getDir() == BuiltinIncludeDir &&
+ ModuleMap::isBuiltinHeader(llvm::sys::path::filename(File->getName()));
+}
+
ModuleMap::HeadersMap::iterator
ModuleMap::findKnownHeader(const FileEntry *File) {
resolveHeaderDirectives(File);
HeadersMap::iterator Known = Headers.find(File);
if (HeaderInfo.getHeaderSearchOpts().ImplicitModuleMaps &&
- Known == Headers.end() && File->getDir() == BuiltinIncludeDir &&
- ModuleMap::isBuiltinHeader(llvm::sys::path::filename(File->getName()))) {
+ Known == Headers.end() && ModuleMap::isBuiltinHeader(File)) {
HeaderInfo.loadTopLevelSystemModules();
return Headers.find(File);
}
@@ -544,6 +548,9 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
static bool isBetterKnownHeader(const ModuleMap::KnownHeader &New,
const ModuleMap::KnownHeader &Old) {
// Prefer available modules.
+ // FIXME: Considering whether the module is available rather than merely
+ // importable is non-hermetic and can result in surprising behavior for
+ // prebuilt modules. Consider only checking for importability here.
if (New.getModule()->isAvailable() && !Old.getModule()->isAvailable())
return true;
@@ -659,7 +666,20 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
}
ArrayRef<ModuleMap::KnownHeader>
-ModuleMap::findAllModulesForHeader(const FileEntry *File) const {
+ModuleMap::findAllModulesForHeader(const FileEntry *File) {
+ HeadersMap::iterator Known = findKnownHeader(File);
+ if (Known != Headers.end())
+ return Known->second;
+
+ if (findOrCreateModuleForHeaderInUmbrellaDir(File))
+ return Headers.find(File)->second;
+
+ return None;
+}
+
+ArrayRef<ModuleMap::KnownHeader>
+ModuleMap::findResolvedModulesForHeader(const FileEntry *File) const {
+ // FIXME: Is this necessary?
resolveHeaderDirectives(File);
auto It = Headers.find(File);
if (It == Headers.end())
@@ -1094,7 +1114,7 @@ Module *ModuleMap::createShadowedModule(StringRef Name, bool IsFramework,
new Module(Name, SourceLocation(), /*Parent=*/nullptr, IsFramework,
/*IsExplicit=*/false, NumCreatedModules++);
Result->ShadowingModule = ShadowingModule;
- Result->IsAvailable = false;
+ Result->markUnavailable(/*Unimportable*/true);
ModuleScopeIDs[Result] = CurrentModuleScopeID;
ShadowModules.push_back(Result);
@@ -1105,6 +1125,7 @@ void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader,
Twine NameAsWritten) {
Headers[UmbrellaHeader].push_back(KnownHeader(Mod, NormalHeader));
Mod->Umbrella = UmbrellaHeader;
+ Mod->HasUmbrellaDir = false;
Mod->UmbrellaAsWritten = NameAsWritten.str();
UmbrellaDirs[UmbrellaHeader->getDir()] = Mod;
@@ -1116,6 +1137,7 @@ void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader,
void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
Twine NameAsWritten) {
Mod->Umbrella = UmbrellaDir;
+ Mod->HasUmbrellaDir = true;
Mod->UmbrellaAsWritten = NameAsWritten.str();
UmbrellaDirs[UmbrellaDir] = Mod;
}
@@ -1242,6 +1264,11 @@ void ModuleMap::setInferredModuleAllowedBy(Module *M, const FileEntry *ModMap) {
InferredModuleAllowedBy[M] = ModMap;
}
+void ModuleMap::addAdditionalModuleMapFile(const Module *M,
+ const FileEntry *ModuleMap) {
+ AdditionalModMaps[M].insert(ModuleMap);
+}
+
LLVM_DUMP_METHOD void ModuleMap::dump() {
llvm::errs() << "Modules:";
for (llvm::StringMap<Module *>::iterator M = Modules.begin(),
@@ -1681,7 +1708,8 @@ bool ModuleMapParser::parseModuleId(ModuleId &Id) {
Id.clear();
do {
if (Tok.is(MMToken::Identifier) || Tok.is(MMToken::StringLiteral)) {
- Id.push_back(std::make_pair(Tok.getString(), Tok.getLocation()));
+ Id.push_back(
+ std::make_pair(std::string(Tok.getString()), Tok.getLocation()));
consumeToken();
} else {
Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module_name);
@@ -2088,9 +2116,9 @@ void ModuleMapParser::parseModuleDecl() {
// If the module meets all requirements but is still unavailable, mark the
// whole tree as unavailable to prevent it from building.
- if (!ActiveModule->IsAvailable && !ActiveModule->IsMissingRequirement &&
+ if (!ActiveModule->IsAvailable && !ActiveModule->IsUnimportable &&
ActiveModule->Parent) {
- ActiveModule->getTopLevelModule()->markUnavailable();
+ ActiveModule->getTopLevelModule()->markUnavailable(/*Unimportable=*/false);
ActiveModule->getTopLevelModule()->MissingHeaders.append(
ActiveModule->MissingHeaders.begin(), ActiveModule->MissingHeaders.end());
}
@@ -2129,7 +2157,7 @@ void ModuleMapParser::parseExternModuleDecl() {
HadError = true;
return;
}
- std::string FileName = Tok.getString();
+ std::string FileName = std::string(Tok.getString());
consumeToken(); // filename
StringRef FileNameRef = FileName;
@@ -2209,7 +2237,7 @@ void ModuleMapParser::parseRequiresDecl() {
}
// Consume the feature name.
- std::string Feature = Tok.getString();
+ std::string Feature = std::string(Tok.getString());
consumeToken();
bool IsRequiresExcludedHack = false;
@@ -2283,7 +2311,7 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
return;
}
Module::UnresolvedHeaderDirective Header;
- Header.FileName = Tok.getString();
+ Header.FileName = std::string(Tok.getString());
Header.FileNameLoc = consumeToken();
Header.IsUmbrella = LeadingToken == MMToken::UmbrellaKeyword;
Header.Kind =
@@ -2380,7 +2408,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
return;
}
- std::string DirName = Tok.getString();
+ std::string DirName = std::string(Tok.getString());
SourceLocation DirNameLoc = consumeToken();
// Check whether we already have an umbrella.
@@ -2422,8 +2450,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
if (auto FE = SourceMgr.getFileManager().getFile(I->path())) {
-
- Module::Header Header = {I->path(), *FE};
+ Module::Header Header = {std::string(I->path()), *FE};
Headers.push_back(std::move(Header));
}
}
@@ -2466,8 +2493,8 @@ void ModuleMapParser::parseExportDecl() {
do {
// FIXME: Support string-literal module names here.
if (Tok.is(MMToken::Identifier)) {
- ParsedModuleId.push_back(std::make_pair(Tok.getString(),
- Tok.getLocation()));
+ ParsedModuleId.push_back(
+ std::make_pair(std::string(Tok.getString()), Tok.getLocation()));
consumeToken();
if (Tok.is(MMToken::Period)) {
@@ -2526,7 +2553,7 @@ void ModuleMapParser::parseExportAsDecl() {
}
}
- ActiveModule->ExportAsModule = Tok.getString();
+ ActiveModule->ExportAsModule = std::string(Tok.getString());
Map.addLinkAsDependency(ActiveModule);
consumeToken();
@@ -2572,7 +2599,7 @@ void ModuleMapParser::parseLinkDecl() {
return;
}
- std::string LibraryName = Tok.getString();
+ std::string LibraryName = std::string(Tok.getString());
consumeToken();
ActiveModule->LinkLibraries.push_back(Module::LinkLibrary(LibraryName,
IsFramework));
@@ -2794,8 +2821,8 @@ void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
break;
}
- Map.InferredDirectories[Directory].ExcludedModules
- .push_back(Tok.getString());
+ Map.InferredDirectories[Directory].ExcludedModules.push_back(
+ std::string(Tok.getString()));
consumeToken();
break;
diff --git a/clang/lib/Lex/PPCallbacks.cpp b/clang/lib/Lex/PPCallbacks.cpp
index cd8b04b20d24..b618071590ba 100644
--- a/clang/lib/Lex/PPCallbacks.cpp
+++ b/clang/lib/Lex/PPCallbacks.cpp
@@ -7,7 +7,24 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/PPCallbacks.h"
+#include "clang/Basic/FileManager.h"
using namespace clang;
-void PPChainedCallbacks::anchor() { }
+// Out of line key method.
+PPCallbacks::~PPCallbacks() = default;
+
+void PPCallbacks::HasInclude(SourceLocation Loc, StringRef FileName,
+ bool IsAngled, Optional<FileEntryRef> File,
+ SrcMgr::CharacteristicKind FileType) {}
+
+// Out of line key method.
+PPChainedCallbacks::~PPChainedCallbacks() = default;
+
+void PPChainedCallbacks::HasInclude(SourceLocation Loc, StringRef FileName,
+ bool IsAngled, Optional<FileEntryRef> File,
+ SrcMgr::CharacteristicKind FileType) {
+ First->HasInclude(Loc, FileName, IsAngled, File, FileType);
+ Second->HasInclude(Loc, FileName, IsAngled, File, FileType);
+}
+
diff --git a/clang/lib/Lex/PPDirectives.cpp b/clang/lib/Lex/PPDirectives.cpp
index e433b2cf1b95..053ef1d2dd18 100644
--- a/clang/lib/Lex/PPDirectives.cpp
+++ b/clang/lib/Lex/PPDirectives.cpp
@@ -432,6 +432,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// Skip to the next '#endif' / '#else' / '#elif'.
CurLexer->skipOver(*SkipLength);
}
+ SourceLocation endLoc;
while (true) {
CurLexer->Lex(Tok);
@@ -538,7 +539,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// Restore the value of LexingRawMode so that trailing comments
// are handled correctly, if we've reached the outermost block.
CurPPLexer->LexingRawMode = false;
- CheckEndOfDirective("endif");
+ endLoc = CheckEndOfDirective("endif");
CurPPLexer->LexingRawMode = true;
if (Callbacks)
Callbacks->Endif(Tok.getLocation(), CondInfo.IfLoc);
@@ -565,7 +566,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// Restore the value of LexingRawMode so that trailing comments
// are handled correctly.
CurPPLexer->LexingRawMode = false;
- CheckEndOfDirective("else");
+ endLoc = CheckEndOfDirective("else");
CurPPLexer->LexingRawMode = true;
if (Callbacks)
Callbacks->Else(Tok.getLocation(), CondInfo.IfLoc);
@@ -621,7 +622,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// by the end of the preamble; we'll resume parsing after the preamble.
if (Callbacks && (Tok.isNot(tok::eof) || !isRecordingPreamble()))
Callbacks->SourceRangeSkipped(
- SourceRange(HashTokenLoc, CurPPLexer->getSourceLocation()),
+ SourceRange(HashTokenLoc, endLoc.isValid()
+ ? endLoc
+ : CurPPLexer->getSourceLocation()),
Tok.getLocation());
}
@@ -646,24 +649,8 @@ Module *Preprocessor::getModuleForLocation(SourceLocation Loc) {
}
const FileEntry *
-Preprocessor::getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
- Module *M,
- SourceLocation Loc) {
- assert(M && "no module to include");
-
- // If the context is the global module fragment of some module, we never
- // want to return that file; instead, we want the innermost include-guarded
- // header that it included.
- bool InGlobalModuleFragment = M->Kind == Module::GlobalModuleFragment;
-
- // If we have a module import syntax, we shouldn't include a header to
- // make a particular module visible.
- if ((getLangOpts().ObjC || getLangOpts().CPlusPlusModules ||
- getLangOpts().ModulesTS) &&
- !InGlobalModuleFragment)
- return nullptr;
-
- Module *TopM = M->getTopLevelModule();
+Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
+ SourceLocation Loc) {
Module *IncM = getModuleForLocation(IncLoc);
// Walk up through the include stack, looking through textual headers of M
@@ -677,37 +664,50 @@ Preprocessor::getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
if (!FE)
break;
- if (InGlobalModuleFragment) {
- if (getHeaderSearchInfo().isFileMultipleIncludeGuarded(FE))
- return FE;
- Loc = SM.getIncludeLoc(ID);
- continue;
- }
-
- bool InTextualHeader = false;
- for (auto Header : HeaderInfo.getModuleMap().findAllModulesForHeader(FE)) {
- if (!Header.getModule()->isSubModuleOf(TopM))
- continue;
-
- if (!(Header.getRole() & ModuleMap::TextualHeader)) {
- // If this is an accessible, non-textual header of M's top-level module
- // that transitively includes the given location and makes the
- // corresponding module visible, this is the thing to #include.
- if (Header.isAccessibleFrom(IncM))
- return FE;
+ // We want to find all possible modules that might contain this header, so
+ // search all enclosing directories for module maps and load them.
+ HeaderInfo.hasModuleMap(FE->getName(), /*Root*/ nullptr,
+ SourceMgr.isInSystemHeader(Loc));
+ bool InPrivateHeader = false;
+ for (auto Header : HeaderInfo.findAllModulesForHeader(FE)) {
+ if (!Header.isAccessibleFrom(IncM)) {
// It's in a private header; we can't #include it.
// FIXME: If there's a public header in some module that re-exports it,
// then we could suggest including that, but it's not clear that's the
// expected way to make this entity visible.
+ InPrivateHeader = true;
continue;
}
- InTextualHeader = true;
+ // We'll suggest including textual headers below if they're
+ // include-guarded.
+ if (Header.getRole() & ModuleMap::TextualHeader)
+ continue;
+
+ // If we have a module import syntax, we shouldn't include a header to
+ // make a particular module visible. Let the caller know they should
+ // suggest an import instead.
+ if (getLangOpts().ObjC || getLangOpts().CPlusPlusModules ||
+ getLangOpts().ModulesTS)
+ return nullptr;
+
+ // If this is an accessible, non-textual header of M's top-level module
+ // that transitively includes the given location and makes the
+ // corresponding module visible, this is the thing to #include.
+ return FE;
}
- if (!InTextualHeader)
- break;
+ // FIXME: If we're bailing out due to a private header, we shouldn't suggest
+ // an import either.
+ if (InPrivateHeader)
+ return nullptr;
+
+ // If the header is includable and has an include guard, assume the
+ // intended way to expose its contents is by #include, not by importing a
+ // module that transitively includes it.
+ if (getHeaderSearchInfo().isFileMultipleIncludeGuarded(FE))
+ return FE;
Loc = SM.getIncludeLoc(ID);
}
@@ -1708,15 +1708,22 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
EnterAnnotationToken(SourceRange(HashLoc, EndLoc),
tok::annot_module_include, Action.ModuleForHeader);
break;
+ case ImportAction::Failure:
+ assert(TheModuleLoader.HadFatalFailure &&
+ "This should be an early exit only to a fatal error");
+ TheModuleLoader.HadFatalFailure = true;
+ IncludeTok.setKind(tok::eof);
+ CurLexer->cutOffLexing();
+ return;
}
}
Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
- const DirectoryLookup *&CurDir, StringRef Filename,
+ const DirectoryLookup *&CurDir, StringRef& Filename,
SourceLocation FilenameLoc, CharSourceRange FilenameRange,
const Token &FilenameTok, bool &IsFrameworkFound, bool IsImportDecl,
bool &IsMapped, const DirectoryLookup *LookupFrom,
- const FileEntry *LookupFromFile, StringRef LookupFilename,
+ const FileEntry *LookupFromFile, StringRef& LookupFilename,
SmallVectorImpl<char> &RelativePath, SmallVectorImpl<char> &SearchPath,
ModuleMap::KnownHeader &SuggestedModule, bool isAngled) {
Optional<FileEntryRef> File = LookupFile(
@@ -1785,21 +1792,10 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
return Filename;
};
StringRef TypoCorrectionName = CorrectTypoFilename(Filename);
-
-#ifndef _WIN32
- // Normalize slashes when compiling with -fms-extensions on non-Windows.
- // This is unnecessary on Windows since the filesystem there handles
- // backslashes.
- SmallString<128> NormalizedTypoCorrectionPath;
- if (LangOpts.MicrosoftExt) {
- NormalizedTypoCorrectionPath = TypoCorrectionName;
- llvm::sys::path::native(NormalizedTypoCorrectionPath);
- TypoCorrectionName = NormalizedTypoCorrectionPath;
- }
-#endif
+ StringRef TypoCorrectionLookupName = CorrectTypoFilename(LookupFilename);
Optional<FileEntryRef> File = LookupFile(
- FilenameLoc, TypoCorrectionName, isAngled, LookupFrom, LookupFromFile,
+ FilenameLoc, TypoCorrectionLookupName, isAngled, LookupFrom, LookupFromFile,
CurDir, Callbacks ? &SearchPath : nullptr,
Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped,
/*IsFrameworkFound=*/nullptr);
@@ -1814,6 +1810,7 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
// We found the file, so set the Filename to the name after typo
// correction.
Filename = TypoCorrectionName;
+ LookupFilename = TypoCorrectionLookupName;
return File;
}
}
@@ -1911,14 +1908,18 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
SourceLocation FilenameLoc = FilenameTok.getLocation();
StringRef LookupFilename = Filename;
-#ifndef _WIN32
+#ifdef _WIN32
+ llvm::sys::path::Style BackslashStyle = llvm::sys::path::Style::windows;
+#else
// Normalize slashes when compiling with -fms-extensions on non-Windows. This
// is unnecessary on Windows since the filesystem there handles backslashes.
SmallString<128> NormalizedPath;
+ llvm::sys::path::Style BackslashStyle = llvm::sys::path::Style::posix;
if (LangOpts.MicrosoftExt) {
NormalizedPath = Filename.str();
llvm::sys::path::native(NormalizedPath);
LookupFilename = NormalizedPath;
+ BackslashStyle = llvm::sys::path::Style::windows;
}
#endif
@@ -1933,19 +1934,6 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
return {ImportAction::None};
}
- // Check for circular inclusion of the main file.
- // We can't generate a consistent preamble with regard to the conditional
- // stack if the main file is included again as due to the preamble bounds
- // some directives (e.g. #endif of a header guard) will never be seen.
- // Since this will lead to confusing errors, avoid the inclusion.
- if (File && PreambleConditionalStack.isRecording() &&
- SourceMgr.translateFile(&File->getFileEntry()) ==
- SourceMgr.getMainFileID()) {
- Diag(FilenameTok.getLocation(),
- diag::err_pp_including_mainfile_in_preamble);
- return {ImportAction::None};
- }
-
// Should we enter the source file? Set to Skip if either the source file is
// known to have no effect beyond its effect on module visibility -- that is,
// if it's got an include guard that is already defined, set to Import if it
@@ -2063,6 +2051,18 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
Action = (SuggestedModule && !getLangOpts().CompilingPCH) ? Import : Skip;
}
+ // Check for circular inclusion of the main file.
+ // We can't generate a consistent preamble with regard to the conditional
+ // stack if the main file is included again as due to the preamble bounds
+ // some directives (e.g. #endif of a header guard) will never be seen.
+ // Since this will lead to confusing errors, avoid the inclusion.
+ if (Action == Enter && File && PreambleConditionalStack.isRecording() &&
+ SourceMgr.isMainFile(*File)) {
+ Diag(FilenameTok.getLocation(),
+ diag::err_pp_including_mainfile_in_preamble);
+ return {ImportAction::None};
+ }
+
if (Callbacks && !IsImportDecl) {
// Notify the callback object that we've seen an inclusion directive.
// FIXME: Use a different callback for a pp-import?
@@ -2093,29 +2093,90 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
if (CheckIncludePathPortability) {
StringRef Name = LookupFilename;
+ StringRef NameWithoriginalSlashes = Filename;
+#if defined(_WIN32)
+ // Skip UNC prefix if present. (tryGetRealPathName() always
+ // returns a path with the prefix skipped.)
+ bool NameWasUNC = Name.consume_front("\\\\?\\");
+ NameWithoriginalSlashes.consume_front("\\\\?\\");
+#endif
StringRef RealPathName = File->getFileEntry().tryGetRealPathName();
SmallVector<StringRef, 16> Components(llvm::sys::path::begin(Name),
llvm::sys::path::end(Name));
+#if defined(_WIN32)
+ // -Wnonportable-include-path is designed to diagnose includes using
+ // case even on systems with a case-insensitive file system.
+ // On Windows, RealPathName always starts with an upper-case drive
+ // letter for absolute paths, but Name might start with either
+ // case depending on if `cd c:\foo` or `cd C:\foo` was used in the shell.
+ // ("foo" will always have on-disk case, no matter which case was
+ // used in the cd command). To not emit this warning solely for
+ // the drive letter, whose case is dependent on if `cd` is used
+ // with upper- or lower-case drive letters, always consider the
+ // given drive letter case as correct for the purpose of this warning.
+ SmallString<128> FixedDriveRealPath;
+ if (llvm::sys::path::is_absolute(Name) &&
+ llvm::sys::path::is_absolute(RealPathName) &&
+ toLowercase(Name[0]) == toLowercase(RealPathName[0]) &&
+ isLowercase(Name[0]) != isLowercase(RealPathName[0])) {
+ assert(Components.size() >= 3 && "should have drive, backslash, name");
+ assert(Components[0].size() == 2 && "should start with drive");
+ assert(Components[0][1] == ':' && "should have colon");
+ FixedDriveRealPath = (Name.substr(0, 1) + RealPathName.substr(1)).str();
+ RealPathName = FixedDriveRealPath;
+ }
+#endif
if (trySimplifyPath(Components, RealPathName)) {
SmallString<128> Path;
Path.reserve(Name.size()+2);
Path.push_back(isAngled ? '<' : '"');
- bool isLeadingSeparator = llvm::sys::path::is_absolute(Name);
+
+ const auto IsSep = [BackslashStyle](char c) {
+ return llvm::sys::path::is_separator(c, BackslashStyle);
+ };
+
for (auto Component : Components) {
- if (isLeadingSeparator)
- isLeadingSeparator = false;
- else
+ // On POSIX, Components will contain a single '/' as first element
+ // exactly if Name is an absolute path.
+ // On Windows, it will contain "C:" followed by '\' for absolute paths.
+ // The drive letter is optional for absolute paths on Windows, but
+ // clang currently cannot process absolute paths in #include lines that
+ // don't have a drive.
+ // If the first entry in Components is a directory separator,
+ // then the code at the bottom of this loop that keeps the original
+ // directory separator style copies it. If the second entry is
+ // a directory separator (the C:\ case), then that separator already
+ // got copied when the C: was processed and we want to skip that entry.
+ if (!(Component.size() == 1 && IsSep(Component[0])))
Path.append(Component);
- // Append the separator the user used, or the close quote
- Path.push_back(
- Path.size() <= Filename.size() ? Filename[Path.size()-1] :
- (isAngled ? '>' : '"'));
+ else if (!Path.empty())
+ continue;
+
+ // Append the separator(s) the user used, or the close quote
+ if (Path.size() > NameWithoriginalSlashes.size()) {
+ Path.push_back(isAngled ? '>' : '"');
+ continue;
+ }
+ assert(IsSep(NameWithoriginalSlashes[Path.size()-1]));
+ do
+ Path.push_back(NameWithoriginalSlashes[Path.size()-1]);
+ while (Path.size() <= NameWithoriginalSlashes.size() &&
+ IsSep(NameWithoriginalSlashes[Path.size()-1]));
}
- // For user files and known standard headers, by default we issue a diagnostic.
- // For other system headers, we don't. They can be controlled separately.
- auto DiagId = (FileCharacter == SrcMgr::C_User || warnByDefaultOnWrongCase(Name)) ?
- diag::pp_nonportable_path : diag::pp_nonportable_system_path;
+
+#if defined(_WIN32)
+ // Restore UNC prefix if it was there.
+ if (NameWasUNC)
+ Path = (Path.substr(0, 1) + "\\\\?\\" + Path.substr(1)).str();
+#endif
+
+ // For user files and known standard headers, issue a diagnostic.
+ // For other system headers, don't. They can be controlled separately.
+ auto DiagId =
+ (FileCharacter == SrcMgr::C_User || warnByDefaultOnWrongCase(Name))
+ ? diag::pp_nonportable_path
+ : diag::pp_nonportable_system_path;
Diag(FilenameTok, DiagId) << Path <<
FixItHint::CreateReplacement(FilenameRange, Path);
}
@@ -2165,7 +2226,10 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
if (IncludePos.isMacroID())
IncludePos = SourceMgr.getExpansionRange(IncludePos).getEnd();
FileID FID = SourceMgr.createFileID(*File, IncludePos, FileCharacter);
- assert(FID.isValid() && "Expected valid file ID");
+ if (!FID.isValid()) {
+ TheModuleLoader.HadFatalFailure = true;
+ return ImportAction::Failure;
+ }
// If all is good, enter the new file!
if (EnterSourceFile(FID, CurDir, FilenameTok.getLocation()))
@@ -2792,7 +2856,9 @@ void Preprocessor::HandleDefineDirective(
// warn-because-unused-macro set. If it gets used it will be removed from set.
if (getSourceManager().isInMainFile(MI->getDefinitionLoc()) &&
!Diags->isIgnored(diag::pp_macro_not_used, MI->getDefinitionLoc()) &&
- !MacroExpansionInDirectivesOverride) {
+ !MacroExpansionInDirectivesOverride &&
+ getSourceManager().getFileID(MI->getDefinitionLoc()) !=
+ getPredefinesFileID()) {
MI->setIsWarnIfUnused(true);
WarnUnusedMacroLocs.insert(MI->getDefinitionLoc());
}
diff --git a/clang/lib/Lex/PPExpressions.cpp b/clang/lib/Lex/PPExpressions.cpp
index e5ec2b99f507..8c120c13d7d2 100644
--- a/clang/lib/Lex/PPExpressions.cpp
+++ b/clang/lib/Lex/PPExpressions.cpp
@@ -15,7 +15,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -26,9 +25,12 @@
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
@@ -251,8 +253,24 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// If this identifier isn't 'defined' or one of the special
// preprocessor keywords and it wasn't macro expanded, it turns
// into a simple 0
- if (ValueLive)
+ if (ValueLive) {
PP.Diag(PeekTok, diag::warn_pp_undef_identifier) << II;
+
+ const DiagnosticsEngine &DiagEngine = PP.getDiagnostics();
+ // If 'Wundef' is enabled, do not emit 'undef-prefix' diagnostics.
+ if (DiagEngine.isIgnored(diag::warn_pp_undef_identifier,
+ PeekTok.getLocation())) {
+ const std::vector<std::string> UndefPrefixes =
+ DiagEngine.getDiagnosticOptions().UndefPrefixes;
+ const StringRef IdentifierName = II->getName();
+ if (llvm::any_of(UndefPrefixes,
+ [&IdentifierName](const std::string &Prefix) {
+ return IdentifierName.startswith(Prefix);
+ }))
+ PP.Diag(PeekTok, diag::warn_pp_undef_prefix)
+ << AddFlagValue{llvm::join(UndefPrefixes, ",")} << II;
+ }
+ }
Result.Val = 0;
Result.Val.setIsUnsigned(false); // "0" is signed intmax_t 0.
Result.setIdentifier(II);
@@ -277,7 +295,9 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
if (NumberInvalid)
return true; // a diagnostic was already reported
- NumericLiteralParser Literal(Spelling, PeekTok.getLocation(), PP);
+ NumericLiteralParser Literal(Spelling, PeekTok.getLocation(),
+ PP.getSourceManager(), PP.getLangOpts(),
+ PP.getTargetInfo(), PP.getDiagnostics());
if (Literal.hadError)
return true; // a diagnostic was already reported.
diff --git a/clang/lib/Lex/PPLexerChange.cpp b/clang/lib/Lex/PPLexerChange.cpp
index 802172693960..b7c7e2693ef1 100644
--- a/clang/lib/Lex/PPLexerChange.cpp
+++ b/clang/lib/Lex/PPLexerChange.cpp
@@ -24,8 +24,6 @@
#include "llvm/Support/Path.h"
using namespace clang;
-PPCallbacks::~PPCallbacks() {}
-
//===----------------------------------------------------------------------===//
// Miscellaneous Methods.
//===----------------------------------------------------------------------===//
@@ -81,7 +79,7 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
if (Invalid) {
SourceLocation FileStart = SourceMgr.getLocForStartOfFile(FID);
Diag(Loc, diag::err_pp_error_opening_file)
- << std::string(SourceMgr.getBufferName(FileStart)) << "";
+ << std::string(SourceMgr.getBufferName(FileStart)) << "";
return true;
}
@@ -417,7 +415,10 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
}
if (!isEndOfMacro && CurPPLexer &&
- SourceMgr.getIncludeLoc(CurPPLexer->getFileID()).isValid()) {
+ (SourceMgr.getIncludeLoc(CurPPLexer->getFileID()).isValid() ||
+ // Predefines file doesn't have a valid include location.
+ (PredefinesFileID.isValid() &&
+ CurPPLexer->getFileID() == PredefinesFileID))) {
// Notify SourceManager to record the number of FileIDs that were created
// during lexing of the #include'd file.
unsigned NumFIDs =
diff --git a/clang/lib/Lex/PPMacroExpansion.cpp b/clang/lib/Lex/PPMacroExpansion.cpp
index cf8bb2fbab99..4908594d6081 100644
--- a/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/clang/lib/Lex/PPMacroExpansion.cpp
@@ -1456,10 +1456,8 @@ static void remapMacroPath(
const std::map<std::string, std::string, std::greater<std::string>>
&MacroPrefixMap) {
for (const auto &Entry : MacroPrefixMap)
- if (Path.startswith(Entry.first)) {
- Path = (Twine(Entry.second) + Path.substr(Entry.first.size())).str();
+ if (llvm::sys::path::replace_path_prefix(Path, Entry.first, Entry.second))
break;
- }
}
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
@@ -1543,8 +1541,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
} else {
FN += PLoc.getFilename();
}
- Lexer::Stringify(FN);
remapMacroPath(FN, PPOpts->MacroPrefixMap);
+ Lexer::Stringify(FN);
OS << '"' << FN << '"';
}
Tok.setKind(tok::string_literal);
diff --git a/clang/lib/Lex/Pragma.cpp b/clang/lib/Lex/Pragma.cpp
index e4636265a72b..b512a547de7d 100644
--- a/clang/lib/Lex/Pragma.cpp
+++ b/clang/lib/Lex/Pragma.cpp
@@ -30,6 +30,7 @@
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorLexer.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Lex/Token.h"
#include "clang/Lex/TokenLexer.h"
#include "llvm/ADT/ArrayRef.h"
@@ -39,9 +40,9 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Timer.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -70,31 +71,36 @@ void EmptyPragmaHandler::HandlePragma(Preprocessor &PP,
// PragmaNamespace Implementation.
//===----------------------------------------------------------------------===//
-PragmaNamespace::~PragmaNamespace() {
- llvm::DeleteContainerSeconds(Handlers);
-}
-
/// FindHandler - Check to see if there is already a handler for the
/// specified name. If not, return the handler for the null identifier if it
/// exists, otherwise return null. If IgnoreNull is true (the default) then
/// the null handler isn't returned on failure to match.
PragmaHandler *PragmaNamespace::FindHandler(StringRef Name,
bool IgnoreNull) const {
- if (PragmaHandler *Handler = Handlers.lookup(Name))
- return Handler;
- return IgnoreNull ? nullptr : Handlers.lookup(StringRef());
+ auto I = Handlers.find(Name);
+ if (I != Handlers.end())
+ return I->getValue().get();
+ if (IgnoreNull)
+ return nullptr;
+ I = Handlers.find(StringRef());
+ if (I != Handlers.end())
+ return I->getValue().get();
+ return nullptr;
}
void PragmaNamespace::AddPragma(PragmaHandler *Handler) {
- assert(!Handlers.lookup(Handler->getName()) &&
+ assert(!Handlers.count(Handler->getName()) &&
"A handler with this name is already registered in this namespace");
- Handlers[Handler->getName()] = Handler;
+ Handlers[Handler->getName()].reset(Handler);
}
void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) {
- assert(Handlers.lookup(Handler->getName()) &&
+ auto I = Handlers.find(Handler->getName());
+ assert(I != Handlers.end() &&
"Handler not registered in this namespace");
- Handlers.erase(Handler->getName());
+ // Release ownership back to the caller.
+ I->getValue().release();
+ Handlers.erase(I);
}
void PragmaNamespace::HandlePragma(Preprocessor &PP,
@@ -1035,15 +1041,21 @@ struct PragmaDebugHandler : public PragmaHandler {
IdentifierInfo *II = Tok.getIdentifierInfo();
if (II->isStr("assert")) {
- llvm_unreachable("This is an assertion!");
+ if (!PP.getPreprocessorOpts().DisablePragmaDebugCrash)
+ llvm_unreachable("This is an assertion!");
} else if (II->isStr("crash")) {
- LLVM_BUILTIN_TRAP;
+ llvm::Timer T("crash", "pragma crash");
+ llvm::TimeRegion R(&T);
+ if (!PP.getPreprocessorOpts().DisablePragmaDebugCrash)
+ LLVM_BUILTIN_TRAP;
} else if (II->isStr("parser_crash")) {
- Token Crasher;
- Crasher.startToken();
- Crasher.setKind(tok::annot_pragma_parser_crash);
- Crasher.setAnnotationRange(SourceRange(Tok.getLocation()));
- PP.EnterToken(Crasher, /*IsReinject*/false);
+ if (!PP.getPreprocessorOpts().DisablePragmaDebugCrash) {
+ Token Crasher;
+ Crasher.startToken();
+ Crasher.setKind(tok::annot_pragma_parser_crash);
+ Crasher.setAnnotationRange(SourceRange(Tok.getLocation()));
+ PP.EnterToken(Crasher, /*IsReinject*/ false);
+ }
} else if (II->isStr("dump")) {
Token Identifier;
PP.LexUnexpandedToken(Identifier);
@@ -1075,9 +1087,11 @@ struct PragmaDebugHandler : public PragmaHandler {
<< II->getName();
}
} else if (II->isStr("llvm_fatal_error")) {
- llvm::report_fatal_error("#pragma clang __debug llvm_fatal_error");
+ if (!PP.getPreprocessorOpts().DisablePragmaDebugCrash)
+ llvm::report_fatal_error("#pragma clang __debug llvm_fatal_error");
} else if (II->isStr("llvm_unreachable")) {
- llvm_unreachable("#pragma clang __debug llvm_unreachable");
+ if (!PP.getPreprocessorOpts().DisablePragmaDebugCrash)
+ llvm_unreachable("#pragma clang __debug llvm_unreachable");
} else if (II->isStr("macro")) {
Token MacroName;
PP.LexUnexpandedToken(MacroName);
@@ -1104,11 +1118,8 @@ struct PragmaDebugHandler : public PragmaHandler {
}
M->dump();
} else if (II->isStr("overflow_stack")) {
- DebugOverflowStack();
- } else if (II->isStr("handle_crash")) {
- llvm::CrashRecoveryContext *CRC =llvm::CrashRecoveryContext::GetCurrent();
- if (CRC)
- CRC->HandleCrash();
+ if (!PP.getPreprocessorOpts().DisablePragmaDebugCrash)
+ DebugOverflowStack();
} else if (II->isStr("captured")) {
HandleCaptured(PP);
} else {
@@ -1896,10 +1907,9 @@ void Preprocessor::RegisterBuiltinPragmas() {
}
// Pragmas added by plugins
- for (PragmaHandlerRegistry::iterator it = PragmaHandlerRegistry::begin(),
- ie = PragmaHandlerRegistry::end();
- it != ie; ++it) {
- AddPragmaHandler(it->instantiate().release());
+ for (const PragmaHandlerRegistry::entry &handler :
+ PragmaHandlerRegistry::entries()) {
+ AddPragmaHandler(handler.instantiate().release());
}
}
diff --git a/clang/lib/Lex/Preprocessor.cpp b/clang/lib/Lex/Preprocessor.cpp
index 0e9be3923630..160e2b6ed884 100644
--- a/clang/lib/Lex/Preprocessor.cpp
+++ b/clang/lib/Lex/Preprocessor.cpp
@@ -119,7 +119,7 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
// a macro. They get unpoisoned where it is allowed.
(Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
SetPoisonReason(Ident__VA_ARGS__,diag::ext_pp_bad_vaargs_use);
- if (getLangOpts().CPlusPlus2a) {
+ if (getLangOpts().CPlusPlus20) {
(Ident__VA_OPT__ = getIdentifierInfo("__VA_OPT__"))->setIsPoisoned();
SetPoisonReason(Ident__VA_OPT__,diag::ext_pp_bad_vaopt_use);
} else {
@@ -166,6 +166,8 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
this->PPOpts->ExcludedConditionalDirectiveSkipMappings;
if (ExcludedConditionalDirectiveSkipMappings)
ExcludedConditionalDirectiveSkipMappings->clear();
+
+ MaxTokens = LangOpts.MaxTokens;
}
Preprocessor::~Preprocessor() {
@@ -769,9 +771,13 @@ static diag::kind getFutureCompatDiagKind(const IdentifierInfo &II,
return llvm::StringSwitch<diag::kind>(II.getName())
#define CXX11_KEYWORD(NAME, FLAGS) \
.Case(#NAME, diag::warn_cxx11_keyword)
-#define CXX2A_KEYWORD(NAME, FLAGS) \
- .Case(#NAME, diag::warn_cxx2a_keyword)
+#define CXX20_KEYWORD(NAME, FLAGS) \
+ .Case(#NAME, diag::warn_cxx20_keyword)
#include "clang/Basic/TokenKinds.def"
+ // char8_t is not modeled as a CXX20_KEYWORD because it's not
+ // unconditionally enabled in C++20 mode. (It can be disabled
+ // by -fno-char8_t.)
+ .Case("char8_t", diag::warn_cxx20_keyword)
;
llvm_unreachable(
@@ -906,6 +912,9 @@ void Preprocessor::Lex(Token &Result) {
}
} while (!ReturnedToken);
+ if (Result.is(tok::unknown) && TheModuleLoader.HadFatalFailure)
+ return;
+
if (Result.is(tok::code_completion) && Result.getIdentifierInfo()) {
// Remember the identifier before code completion token.
setCodeCompletionIdentifierInfo(Result.getIdentifierInfo());
@@ -959,8 +968,12 @@ void Preprocessor::Lex(Token &Result) {
LastTokenWasAt = Result.is(tok::at);
--LexLevel;
- if (OnToken && LexLevel == 0 && !Result.getFlag(Token::IsReinjected))
- OnToken(Result);
+
+ if (LexLevel == 0 && !Result.getFlag(Token::IsReinjected)) {
+ ++TokenCount;
+ if (OnToken)
+ OnToken(Result);
+ }
}
/// Lex a header-name token (including one formed from header-name-tokens if
@@ -1200,6 +1213,13 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
Suffix[0].setAnnotationValue(Action.ModuleForHeader);
// FIXME: Call the moduleImport callback?
break;
+ case ImportAction::Failure:
+ assert(TheModuleLoader.HadFatalFailure &&
+ "This should be an early exit only to a fatal error");
+ Result.setKind(tok::eof);
+ CurLexer->cutOffLexing();
+ EnterTokens(Suffix);
+ return true;
}
EnterTokens(Suffix);
@@ -1339,7 +1359,7 @@ bool Preprocessor::FinishLexStringLiteral(Token &Result, std::string &String,
return false;
}
- String = Literal.GetString();
+ String = std::string(Literal.GetString());
return true;
}
@@ -1350,7 +1370,9 @@ bool Preprocessor::parseSimpleIntegerLiteral(Token &Tok, uint64_t &Value) {
StringRef Spelling = getSpelling(Tok, IntegerBuffer, &NumberInvalid);
if (NumberInvalid)
return false;
- NumericLiteralParser Literal(Spelling, Tok.getLocation(), *this);
+ NumericLiteralParser Literal(Spelling, Tok.getLocation(), getSourceManager(),
+ getLangOpts(), getTargetInfo(),
+ getDiagnostics());
if (Literal.hadError || !Literal.isIntegerLiteral() || Literal.hasUDSuffix())
return false;
llvm::APInt APVal(64, 0);
diff --git a/clang/lib/Lex/TokenConcatenation.cpp b/clang/lib/Lex/TokenConcatenation.cpp
index e626cfcc927f..f6b005d9e19c 100644
--- a/clang/lib/Lex/TokenConcatenation.cpp
+++ b/clang/lib/Lex/TokenConcatenation.cpp
@@ -103,7 +103,7 @@ TokenConcatenation::TokenConcatenation(const Preprocessor &pp) : PP(pp) {
TokenInfo[tok::utf8_char_constant] |= aci_custom;
// These tokens have custom code in C++2a mode.
- if (PP.getLangOpts().CPlusPlus2a)
+ if (PP.getLangOpts().CPlusPlus20)
TokenInfo[tok::lessequal ] |= aci_custom_firstchar;
// These tokens change behavior if followed by an '='.
@@ -292,6 +292,6 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
case tok::arrow: // ->*
return PP.getLangOpts().CPlusPlus && FirstChar == '*';
case tok::lessequal: // <=> (C++2a)
- return PP.getLangOpts().CPlusPlus2a && FirstChar == '>';
+ return PP.getLangOpts().CPlusPlus20 && FirstChar == '>';
}
}
diff --git a/clang/lib/Parse/ParseCXXInlineMethods.cpp b/clang/lib/Parse/ParseCXXInlineMethods.cpp
index f8b5fec43800..d05332b5ac5a 100644
--- a/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -133,7 +133,6 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(
LexedMethod* LM = new LexedMethod(this, FnD);
getCurrentClass().LateParsedDeclarations.push_back(LM);
- LM->TemplateScope = getCurScope()->isTemplateParamScope();
CachedTokens &Toks = LM->Toks;
tok::TokenKind kind = Tok.getKind();
@@ -223,6 +222,7 @@ Parser::LateParsedDeclaration::~LateParsedDeclaration() {}
void Parser::LateParsedDeclaration::ParseLexedMethodDeclarations() {}
void Parser::LateParsedDeclaration::ParseLexedMemberInitializers() {}
void Parser::LateParsedDeclaration::ParseLexedMethodDefs() {}
+void Parser::LateParsedDeclaration::ParseLexedAttributes() {}
void Parser::LateParsedDeclaration::ParseLexedPragmas() {}
Parser::LateParsedClass::LateParsedClass(Parser *P, ParsingClass *C)
@@ -244,6 +244,10 @@ void Parser::LateParsedClass::ParseLexedMethodDefs() {
Self->ParseLexedMethodDefs(*Class);
}
+void Parser::LateParsedClass::ParseLexedAttributes() {
+ Self->ParseLexedAttributes(*Class);
+}
+
void Parser::LateParsedClass::ParseLexedPragmas() {
Self->ParseLexedPragmas(*Class);
}
@@ -260,57 +264,79 @@ void Parser::LateParsedMemberInitializer::ParseLexedMemberInitializers() {
Self->ParseLexedMemberInitializer(*this);
}
+void Parser::LateParsedAttribute::ParseLexedAttributes() {
+ Self->ParseLexedAttribute(*this, true, false);
+}
+
void Parser::LateParsedPragma::ParseLexedPragmas() {
Self->ParseLexedPragma(*this);
}
+/// Utility to re-enter a possibly-templated scope while parsing its
+/// late-parsed components.
+struct Parser::ReenterTemplateScopeRAII {
+ Parser &P;
+ MultiParseScope Scopes;
+ TemplateParameterDepthRAII CurTemplateDepthTracker;
+
+ ReenterTemplateScopeRAII(Parser &P, Decl *MaybeTemplated, bool Enter = true)
+ : P(P), Scopes(P), CurTemplateDepthTracker(P.TemplateParameterDepth) {
+ if (Enter) {
+ CurTemplateDepthTracker.addDepth(
+ P.ReenterTemplateScopes(Scopes, MaybeTemplated));
+ }
+ }
+};
+
+/// Utility to re-enter a class scope while parsing its late-parsed components.
+struct Parser::ReenterClassScopeRAII : ReenterTemplateScopeRAII {
+ ParsingClass &Class;
+
+ ReenterClassScopeRAII(Parser &P, ParsingClass &Class)
+ : ReenterTemplateScopeRAII(P, Class.TagOrTemplate,
+ /*Enter=*/!Class.TopLevelClass),
+ Class(Class) {
+ // If this is the top-level class, we're still within its scope.
+ if (Class.TopLevelClass)
+ return;
+
+ // Re-enter the class scope itself.
+ Scopes.Enter(Scope::ClassScope|Scope::DeclScope);
+ P.Actions.ActOnStartDelayedMemberDeclarations(P.getCurScope(),
+ Class.TagOrTemplate);
+ }
+ ~ReenterClassScopeRAII() {
+ if (Class.TopLevelClass)
+ return;
+
+ P.Actions.ActOnFinishDelayedMemberDeclarations(P.getCurScope(),
+ Class.TagOrTemplate);
+ }
+};
+
/// ParseLexedMethodDeclarations - We finished parsing the member
/// specification of a top (non-nested) C++ class. Now go over the
/// stack of method declarations with some parts for which parsing was
/// delayed (such as default arguments) and parse them.
void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
- HasTemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (HasTemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
- ++CurTemplateDepthTracker;
- }
-
- // The current scope is still active if we're the top-level class.
- // Otherwise we'll need to push and enter a new scope.
- bool HasClassScope = !Class.TopLevelClass;
- ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
- HasClassScope);
- if (HasClassScope)
- Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
-
- for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
- Class.LateParsedDeclarations[i]->ParseLexedMethodDeclarations();
- }
+ ReenterClassScopeRAII InClassScope(*this, Class);
- if (HasClassScope)
- Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
+ for (LateParsedDeclaration *LateD : Class.LateParsedDeclarations)
+ LateD->ParseLexedMethodDeclarations();
}
void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
// If this is a member template, introduce the template parameter scope.
- ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (LM.TemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), LM.Method);
- ++CurTemplateDepthTracker;
- }
+ ReenterTemplateScopeRAII InFunctionTemplateScope(*this, LM.Method);
+
// Start the delayed C++ method declaration
Actions.ActOnStartDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
// Introduce the parameters into scope and parse their default
// arguments.
- ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
- Scope::FunctionDeclarationScope | Scope::DeclScope);
+ InFunctionTemplateScope.Scopes.Enter(Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope |
+ Scope::DeclScope);
for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
auto Param = cast<ParmVarDecl>(LM.DefaultArgs[I].Param);
// Introduce the parameter into scope.
@@ -464,7 +490,7 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
LM.ExceptionSpecTokens = nullptr;
}
- PrototypeScope.Exit();
+ InFunctionTemplateScope.Scopes.Exit();
// Finish the delayed C++ method declaration.
Actions.ActOnFinishDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
@@ -474,30 +500,15 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
/// (non-nested) C++ class. Now go over the stack of lexed methods that were
/// collected during its parsing and parse them all.
void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (HasTemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
- ++CurTemplateDepthTracker;
- }
- bool HasClassScope = !Class.TopLevelClass;
- ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
- HasClassScope);
+ ReenterClassScopeRAII InClassScope(*this, Class);
- for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
- Class.LateParsedDeclarations[i]->ParseLexedMethodDefs();
- }
+ for (LateParsedDeclaration *D : Class.LateParsedDeclarations)
+ D->ParseLexedMethodDefs();
}
void Parser::ParseLexedMethodDef(LexedMethod &LM) {
// If this is a member template, introduce the template parameter scope.
- ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (LM.TemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), LM.D);
- ++CurTemplateDepthTracker;
- }
+ ReenterTemplateScopeRAII InFunctionTemplateScope(*this, LM.D);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -578,23 +589,7 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) {
/// of a top (non-nested) C++ class. Now go over the stack of lexed data member
/// initializers that were collected during its parsing and parse them all.
void Parser::ParseLexedMemberInitializers(ParsingClass &Class) {
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
- HasTemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (HasTemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
- ++CurTemplateDepthTracker;
- }
- // Set or update the scope flags.
- bool AlreadyHasClassScope = Class.TopLevelClass;
- unsigned ScopeFlags = Scope::ClassScope|Scope::DeclScope;
- ParseScope ClassScope(this, ScopeFlags, !AlreadyHasClassScope);
- ParseScopeFlags ClassScopeFlags(this, ScopeFlags, AlreadyHasClassScope);
-
- if (!AlreadyHasClassScope)
- Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
+ ReenterClassScopeRAII InClassScope(*this, Class);
if (!Class.LateParsedDeclarations.empty()) {
// C++11 [expr.prim.general]p4:
@@ -602,18 +597,14 @@ void Parser::ParseLexedMemberInitializers(ParsingClass &Class) {
// (9.2) of a class X, the expression this is a prvalue of type "pointer
// to X" within the optional brace-or-equal-initializer. It shall not
// appear elsewhere in the member-declarator.
+ // FIXME: This should be done in ParseLexedMemberInitializer, not here.
Sema::CXXThisScopeRAII ThisScope(Actions, Class.TagOrTemplate,
Qualifiers());
- for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
- Class.LateParsedDeclarations[i]->ParseLexedMemberInitializers();
- }
+ for (LateParsedDeclaration *D : Class.LateParsedDeclarations)
+ D->ParseLexedMemberInitializers();
}
- if (!AlreadyHasClassScope)
- Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
-
Actions.ActOnFinishDelayedMemberInitializers(Class.TagOrTemplate);
}
@@ -660,21 +651,115 @@ void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
ConsumeAnyToken();
}
-void Parser::ParseLexedPragmas(ParsingClass &Class) {
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
- HasTemplateScope);
- TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- if (HasTemplateScope) {
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
- ++CurTemplateDepthTracker;
+/// Wrapper class which calls ParseLexedAttribute, after setting up the
+/// scope appropriately.
+void Parser::ParseLexedAttributes(ParsingClass &Class) {
+ ReenterClassScopeRAII InClassScope(*this, Class);
+
+ for (LateParsedDeclaration *LateD : Class.LateParsedDeclarations)
+ LateD->ParseLexedAttributes();
+}
+
+/// Parse all attributes in LAs, and attach them to Decl D.
+void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
+ bool EnterScope, bool OnDefinition) {
+ assert(LAs.parseSoon() &&
+ "Attribute list should be marked for immediate parsing.");
+ for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) {
+ if (D)
+ LAs[i]->addDecl(D);
+ ParseLexedAttribute(*LAs[i], EnterScope, OnDefinition);
+ delete LAs[i];
+ }
+ LAs.clear();
+}
+
+/// Finish parsing an attribute for which parsing was delayed.
+/// This will be called at the end of parsing a class declaration
+/// for each LateParsedAttribute. We consume the saved tokens and
+/// create an attribute with the arguments filled in. We add this
+/// to the Attribute list for the decl.
+void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
+ bool EnterScope, bool OnDefinition) {
+ // Create a fake EOF so that attribute parsing won't go off the end of the
+ // attribute.
+ Token AttrEnd;
+ AttrEnd.startToken();
+ AttrEnd.setKind(tok::eof);
+ AttrEnd.setLocation(Tok.getLocation());
+ AttrEnd.setEofData(LA.Toks.data());
+ LA.Toks.push_back(AttrEnd);
+
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LA.Toks.push_back(Tok);
+ PP.EnterTokenStream(LA.Toks, true, /*IsReinject=*/true);
+ // Consume the previously pushed token.
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+
+ ParsedAttributes Attrs(AttrFactory);
+ SourceLocation endLoc;
+
+ if (LA.Decls.size() > 0) {
+ Decl *D = LA.Decls[0];
+ NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
+
+ // Allow 'this' within late-parsed attributes.
+ Sema::CXXThisScopeRAII ThisScope(Actions, RD, Qualifiers(),
+ ND && ND->isCXXInstanceMember());
+
+ if (LA.Decls.size() == 1) {
+ // If the Decl is templatized, add template parameters to scope.
+ ReenterTemplateScopeRAII InDeclScope(*this, D, EnterScope);
+
+ // If the Decl is on a function, add function parameters to the scope.
+ bool HasFunScope = EnterScope && D->isFunctionOrFunctionTemplate();
+ if (HasFunScope) {
+ InDeclScope.Scopes.Enter(Scope::FnScope | Scope::DeclScope |
+ Scope::CompoundStmtScope);
+ Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
+ }
+
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ nullptr, SourceLocation(), ParsedAttr::AS_GNU,
+ nullptr);
+
+ if (HasFunScope)
+ Actions.ActOnExitFunctionContext();
+ } else {
+ // If there are multiple decls, then the decl cannot be within the
+ // function scope.
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ nullptr, SourceLocation(), ParsedAttr::AS_GNU,
+ nullptr);
+ }
+ } else {
+ Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
}
- bool HasClassScope = !Class.TopLevelClass;
- ParseScope ClassScope(this, Scope::ClassScope | Scope::DeclScope,
- HasClassScope);
- for (LateParsedDeclaration *LPD : Class.LateParsedDeclarations)
- LPD->ParseLexedPragmas();
+ if (OnDefinition && !Attrs.empty() && !Attrs.begin()->isCXX11Attribute() &&
+ Attrs.begin()->isKnownToGCC())
+ Diag(Tok, diag::warn_attribute_on_function_definition)
+ << &LA.AttrName;
+
+ for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i)
+ Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.Decls[i], Attrs);
+
+ // Due to a parsing error, we either went over the cached tokens or
+ // there are still cached tokens left, so we skip the leftover tokens.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ if (Tok.is(tok::eof) && Tok.getEofData() == AttrEnd.getEofData())
+ ConsumeAnyToken();
+}
+
+void Parser::ParseLexedPragmas(ParsingClass &Class) {
+ ReenterClassScopeRAII InClassScope(*this, Class);
+
+ for (LateParsedDeclaration *D : Class.LateParsedDeclarations)
+ D->ParseLexedPragmas();
}
void Parser::ParseLexedPragma(LateParsedPragma &LP) {
@@ -1112,17 +1197,14 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
break;
}
+ // Put the token stream back and undo any annotations we performed
+ // after the comma. They may reflect a different parse than the one
+ // we will actually perform at the end of the class.
+ PA.RevertAnnotations();
+
// If what follows could be a declaration, it is a declaration.
- if (Result != TPResult::False && Result != TPResult::Error) {
- PA.Revert();
+ if (Result != TPResult::False && Result != TPResult::Error)
return true;
- }
-
- // In the uncommon case that we decide the following tokens are part
- // of a template argument, revert any annotations we've performed in
- // those tokens. We're not going to look them up until we've parsed
- // the rest of the class, and that might add more declarations.
- PA.RevertAnnotations();
}
// Keep going. We know we're inside a template argument list now.
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index 69a3ed9cbad7..c87d240a8206 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -1409,154 +1409,6 @@ void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
Syntax);
}
-// Late Parsed Attributes:
-// See other examples of late parsing in lib/Parse/ParseCXXInlineMethods
-
-void Parser::LateParsedDeclaration::ParseLexedAttributes() {}
-
-void Parser::LateParsedClass::ParseLexedAttributes() {
- Self->ParseLexedAttributes(*Class);
-}
-
-void Parser::LateParsedAttribute::ParseLexedAttributes() {
- Self->ParseLexedAttribute(*this, true, false);
-}
-
-/// Wrapper class which calls ParseLexedAttribute, after setting up the
-/// scope appropriately.
-void Parser::ParseLexedAttributes(ParsingClass &Class) {
- // Deal with templates
- // FIXME: Test cases to make sure this does the right thing for templates.
- bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
- ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
- HasTemplateScope);
- if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
-
- // Set or update the scope flags.
- bool AlreadyHasClassScope = Class.TopLevelClass;
- unsigned ScopeFlags = Scope::ClassScope|Scope::DeclScope;
- ParseScope ClassScope(this, ScopeFlags, !AlreadyHasClassScope);
- ParseScopeFlags ClassScopeFlags(this, ScopeFlags, AlreadyHasClassScope);
-
- // Enter the scope of nested classes
- if (!AlreadyHasClassScope)
- Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
- if (!Class.LateParsedDeclarations.empty()) {
- for (unsigned i = 0, ni = Class.LateParsedDeclarations.size(); i < ni; ++i){
- Class.LateParsedDeclarations[i]->ParseLexedAttributes();
- }
- }
-
- if (!AlreadyHasClassScope)
- Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
- Class.TagOrTemplate);
-}
-
-/// Parse all attributes in LAs, and attach them to Decl D.
-void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
- bool EnterScope, bool OnDefinition) {
- assert(LAs.parseSoon() &&
- "Attribute list should be marked for immediate parsing.");
- for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) {
- if (D)
- LAs[i]->addDecl(D);
- ParseLexedAttribute(*LAs[i], EnterScope, OnDefinition);
- delete LAs[i];
- }
- LAs.clear();
-}
-
-/// Finish parsing an attribute for which parsing was delayed.
-/// This will be called at the end of parsing a class declaration
-/// for each LateParsedAttribute. We consume the saved tokens and
-/// create an attribute with the arguments filled in. We add this
-/// to the Attribute list for the decl.
-void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
- bool EnterScope, bool OnDefinition) {
- // Create a fake EOF so that attribute parsing won't go off the end of the
- // attribute.
- Token AttrEnd;
- AttrEnd.startToken();
- AttrEnd.setKind(tok::eof);
- AttrEnd.setLocation(Tok.getLocation());
- AttrEnd.setEofData(LA.Toks.data());
- LA.Toks.push_back(AttrEnd);
-
- // Append the current token at the end of the new token stream so that it
- // doesn't get lost.
- LA.Toks.push_back(Tok);
- PP.EnterTokenStream(LA.Toks, true, /*IsReinject=*/true);
- // Consume the previously pushed token.
- ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
-
- ParsedAttributes Attrs(AttrFactory);
- SourceLocation endLoc;
-
- if (LA.Decls.size() > 0) {
- Decl *D = LA.Decls[0];
- NamedDecl *ND = dyn_cast<NamedDecl>(D);
- RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
-
- // Allow 'this' within late-parsed attributes.
- Sema::CXXThisScopeRAII ThisScope(Actions, RD, Qualifiers(),
- ND && ND->isCXXInstanceMember());
-
- if (LA.Decls.size() == 1) {
- // If the Decl is templatized, add template parameters to scope.
- bool HasTemplateScope = EnterScope && D->isTemplateDecl();
- ParseScope TempScope(this, Scope::TemplateParamScope, HasTemplateScope);
- if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(Actions.CurScope, D);
-
- // If the Decl is on a function, add function parameters to the scope.
- bool HasFunScope = EnterScope && D->isFunctionOrFunctionTemplate();
- ParseScope FnScope(
- this, Scope::FnScope | Scope::DeclScope | Scope::CompoundStmtScope,
- HasFunScope);
- if (HasFunScope)
- Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
-
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
- nullptr, SourceLocation(), ParsedAttr::AS_GNU,
- nullptr);
-
- if (HasFunScope) {
- Actions.ActOnExitFunctionContext();
- FnScope.Exit(); // Pop scope, and remove Decls from IdResolver
- }
- if (HasTemplateScope) {
- TempScope.Exit();
- }
- } else {
- // If there are multiple decls, then the decl cannot be within the
- // function scope.
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
- nullptr, SourceLocation(), ParsedAttr::AS_GNU,
- nullptr);
- }
- } else {
- Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
- }
-
- if (OnDefinition && !Attrs.empty() && !Attrs.begin()->isCXX11Attribute() &&
- Attrs.begin()->isKnownToGCC())
- Diag(Tok, diag::warn_attribute_on_function_definition)
- << &LA.AttrName;
-
- for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i)
- Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.Decls[i], Attrs);
-
- // Due to a parsing error, we either went over the cached tokens or
- // there are still cached tokens left, so we skip the leftover tokens.
- while (Tok.isNot(tok::eof))
- ConsumeAnyToken();
-
- if (Tok.is(tok::eof) && Tok.getEofData() == AttrEnd.getEofData())
- ConsumeAnyToken();
-}
-
void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
@@ -2046,46 +1898,52 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
}
// Check to see if we have a function *definition* which must have a body.
- if (D.isFunctionDeclarator() &&
- // Look at the next token to make sure that this isn't a function
- // declaration. We have to check this because __attribute__ might be the
- // start of a function definition in GCC-extended K&R C.
- !isDeclarationAfterDeclarator()) {
-
- // Function definitions are only allowed at file scope and in C++ classes.
- // The C++ inline method definition case is handled elsewhere, so we only
- // need to handle the file scope definition case.
- if (Context == DeclaratorContext::FileContext) {
- if (isStartOfFunctionDefinition(D)) {
- if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
- Diag(Tok, diag::err_function_declared_typedef);
-
- // Recover by treating the 'typedef' as spurious.
- DS.ClearStorageClassSpecs();
- }
+ if (D.isFunctionDeclarator()) {
+ if (Tok.is(tok::equal) && NextToken().is(tok::code_completion)) {
+ Actions.CodeCompleteAfterFunctionEquals(D);
+ cutOffParsing();
+ return nullptr;
+ }
+ // Look at the next token to make sure that this isn't a function
+ // declaration. We have to check this because __attribute__ might be the
+ // start of a function definition in GCC-extended K&R C.
+ if (!isDeclarationAfterDeclarator()) {
+
+ // Function definitions are only allowed at file scope and in C++ classes.
+ // The C++ inline method definition case is handled elsewhere, so we only
+ // need to handle the file scope definition case.
+ if (Context == DeclaratorContext::FileContext) {
+ if (isStartOfFunctionDefinition(D)) {
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(Tok, diag::err_function_declared_typedef);
+
+ // Recover by treating the 'typedef' as spurious.
+ DS.ClearStorageClassSpecs();
+ }
- Decl *TheDecl =
- ParseFunctionDefinition(D, ParsedTemplateInfo(), &LateParsedAttrs);
- return Actions.ConvertDeclToDeclGroup(TheDecl);
- }
+ Decl *TheDecl = ParseFunctionDefinition(D, ParsedTemplateInfo(),
+ &LateParsedAttrs);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
- if (isDeclarationSpecifier()) {
- // If there is an invalid declaration specifier right after the
- // function prototype, then we must be in a missing semicolon case
- // where this isn't actually a body. Just fall through into the code
- // that handles it as a prototype, and let the top-level code handle
- // the erroneous declspec where it would otherwise expect a comma or
- // semicolon.
+ if (isDeclarationSpecifier()) {
+ // If there is an invalid declaration specifier right after the
+ // function prototype, then we must be in a missing semicolon case
+ // where this isn't actually a body. Just fall through into the code
+ // that handles it as a prototype, and let the top-level code handle
+ // the erroneous declspec where it would otherwise expect a comma or
+ // semicolon.
+ } else {
+ Diag(Tok, diag::err_expected_fn_body);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
} else {
- Diag(Tok, diag::err_expected_fn_body);
- SkipUntil(tok::semi);
- return nullptr;
- }
- } else {
- if (Tok.is(tok::l_brace)) {
- Diag(Tok, diag::err_function_definition_not_allowed);
- SkipMalformedDecl();
- return nullptr;
+ if (Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::err_function_definition_not_allowed);
+ SkipMalformedDecl();
+ return nullptr;
+ }
}
}
}
@@ -2359,7 +2217,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
<< 0 /* default */;
else
Diag(ConsumeToken(), diag::err_default_special_members)
- << getLangOpts().CPlusPlus2a;
+ << getLangOpts().CPlusPlus20;
} else {
InitializerScopeRAII InitScope(*this, D, ThisDecl);
@@ -2460,6 +2318,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
InitializerScopeRAII InitScope(*this, D, ThisDecl);
+ PreferredType.enterVariableInit(Tok.getLocation(), ThisDecl);
ExprResult Init(ParseBraceInitializer());
InitScope.pop();
@@ -2741,7 +2600,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
default:
// This is probably supposed to be a type. This includes cases like:
// int f(itn);
- // struct S { unsinged : 4; };
+ // struct S { unsigned : 4; };
break;
}
}
@@ -2879,6 +2738,25 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
ParsedAttr::AS_Keyword, EllipsisLoc);
}
+ExprResult Parser::ParseExtIntegerArgument() {
+ assert(Tok.is(tok::kw__ExtInt) && "Not an extended int type");
+ ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume())
+ return ExprError();
+
+ ExprResult ER = ParseConstantExpression();
+ if (ER.isInvalid()) {
+ T.skipToEnd();
+ return ExprError();
+ }
+
+ if(T.consumeClose())
+ return ExprError();
+ return ER;
+}
+
/// Determine whether we're looking at something that might be a declarator
/// in a simple-declaration. If it can't possibly be a declarator, maybe
/// diagnose a missing semicolon after a prior tag definition in the decl
@@ -2962,6 +2840,7 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
case Sema::NC_ContextIndependentExpr:
case Sema::NC_VarTemplate:
case Sema::NC_FunctionTemplate:
+ case Sema::NC_Concept:
// Might be a redeclaration of a prior entity.
break;
}
@@ -3160,9 +3039,19 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// We are looking for a qualified typename.
Token Next = NextToken();
- if (Next.is(tok::annot_template_id) &&
- static_cast<TemplateIdAnnotation *>(Next.getAnnotationValue())
- ->Kind == TNK_Type_template) {
+
+ TemplateIdAnnotation *TemplateId = Next.is(tok::annot_template_id)
+ ? takeTemplateIdAnnotation(Next)
+ : nullptr;
+ if (TemplateId && TemplateId->hasInvalidName()) {
+ // We found something like 'T::U<Args> x', but U is not a template.
+ // Assume it was supposed to be a type.
+ DS.SetTypeSpecError();
+ ConsumeAnnotationToken();
+ break;
+ }
+
+ if (TemplateId && TemplateId->Kind == TNK_Type_template) {
// We have a qualified template-id, e.g., N::A<int>
// If this would be a valid constructor declaration with template
@@ -3172,12 +3061,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
//
// To improve diagnostics for this case, parse the declaration as a
// constructor (and reject the extra template arguments later).
- TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
if ((DSContext == DeclSpecContext::DSC_top_level ||
DSContext == DeclSpecContext::DSC_class) &&
TemplateId->Name &&
Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS) &&
- isConstructorDeclarator(/*Unqualified*/ false)) {
+ isConstructorDeclarator(/*Unqualified=*/false)) {
// The user meant this to be an out-of-line constructor
// definition, but template arguments are not allowed
// there. Just allow this as a constructor; we'll
@@ -3189,23 +3077,29 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ConsumeAnnotationToken(); // The C++ scope.
assert(Tok.is(tok::annot_template_id) &&
"ParseOptionalCXXScopeSpecifier not working");
- AnnotateTemplateIdTokenAsType();
+ AnnotateTemplateIdTokenAsType(SS);
+ continue;
+ }
+
+ if (TemplateId && TemplateId->Kind == TNK_Concept_template &&
+ GetLookAheadToken(2).isOneOf(tok::kw_auto, tok::kw_decltype)) {
+ DS.getTypeSpecScope() = SS;
+ // This is a qualified placeholder-specifier, e.g., ::C<int> auto ...
+ // Consume the scope annotation and continue to consume the template-id
+ // as a placeholder-specifier.
+ ConsumeAnnotationToken();
continue;
}
if (Next.is(tok::annot_typename)) {
DS.getTypeSpecScope() = SS;
ConsumeAnnotationToken(); // The C++ scope.
- if (Tok.getAnnotationValue()) {
- ParsedType T = getTypeAnnotation(Tok);
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
- Tok.getAnnotationEndLoc(),
- PrevSpec, DiagID, T, Policy);
- if (isInvalid)
- break;
- }
- else
- DS.SetTypeSpecError();
+ TypeResult T = getTypeAnnotation(Tok);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
+ Tok.getAnnotationEndLoc(),
+ PrevSpec, DiagID, T, Policy);
+ if (isInvalid)
+ break;
DS.SetRangeEnd(Tok.getAnnotationEndLoc());
ConsumeAnnotationToken(); // The typename
}
@@ -3235,6 +3129,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// C++ doesn't have implicit int. Diagnose it as a typo w.r.t. to the
// typename.
if (!TypeRep) {
+ if (TryAnnotateTypeConstraint())
+ goto DoneWithDeclSpec;
+ if (Tok.isNot(tok::annot_cxxscope) ||
+ NextToken().isNot(tok::identifier))
+ continue;
// Eat the scope spec so the identifier is current.
ConsumeAnnotationToken();
ParsedAttributesWithRange Attrs(AttrFactory);
@@ -3268,13 +3167,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (DS.hasTypeSpecifier() && DS.hasTagDefinition())
goto DoneWithDeclSpec;
- if (Tok.getAnnotationValue()) {
- ParsedType T = getTypeAnnotation(Tok);
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
- DiagID, T, Policy);
- } else
- DS.SetTypeSpecError();
-
+ TypeResult T = getTypeAnnotation(Tok);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ DiagID, T, Policy);
if (isInvalid)
break;
@@ -3384,6 +3279,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// If this is not a typedef name, don't parse it as part of the declspec,
// it must be an implicit int or an error.
if (!TypeRep) {
+ if (TryAnnotateTypeConstraint())
+ goto DoneWithDeclSpec;
+ if (Tok.isNot(tok::identifier))
+ continue;
ParsedAttributesWithRange Attrs(AttrFactory);
if (ParseImplicitInt(DS, nullptr, TemplateInfo, AS, DSContext, Attrs)) {
if (!Attrs.empty()) {
@@ -3433,9 +3332,62 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
}
- // type-name
+ // type-name or placeholder-specifier
case tok::annot_template_id: {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+
+ if (TemplateId->hasInvalidName()) {
+ DS.SetTypeSpecError();
+ break;
+ }
+
+ if (TemplateId->Kind == TNK_Concept_template) {
+ // If we've already diagnosed that this type-constraint has invalid
+ // arguemnts, drop it and just form 'auto' or 'decltype(auto)'.
+ if (TemplateId->hasInvalidArgs())
+ TemplateId = nullptr;
+
+ if (NextToken().is(tok::identifier)) {
+ Diag(Loc, diag::err_placeholder_expected_auto_or_decltype_auto)
+ << FixItHint::CreateInsertion(NextToken().getLocation(), "auto");
+ // Attempt to continue as if 'auto' was placed here.
+ isInvalid = DS.SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID,
+ TemplateId, Policy);
+ break;
+ }
+ if (!NextToken().isOneOf(tok::kw_auto, tok::kw_decltype))
+ goto DoneWithDeclSpec;
+ ConsumeAnnotationToken();
+ SourceLocation AutoLoc = Tok.getLocation();
+ if (TryConsumeToken(tok::kw_decltype)) {
+ BalancedDelimiterTracker Tracker(*this, tok::l_paren);
+ if (Tracker.consumeOpen()) {
+ // Something like `void foo(Iterator decltype i)`
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ } else {
+ if (!TryConsumeToken(tok::kw_auto)) {
+ // Something like `void foo(Iterator decltype(int) i)`
+ Tracker.skipToEnd();
+ Diag(Tok, diag::err_placeholder_expected_auto_or_decltype_auto)
+ << FixItHint::CreateReplacement(SourceRange(AutoLoc,
+ Tok.getLocation()),
+ "auto");
+ } else {
+ Tracker.consumeClose();
+ }
+ }
+ ConsumedEnd = Tok.getLocation();
+ // Even if something went wrong above, continue as if we've seen
+ // `decltype(auto)`.
+ isInvalid = DS.SetTypeSpecType(TST_decltype_auto, Loc, PrevSpec,
+ DiagID, TemplateId, Policy);
+ } else {
+ isInvalid = DS.SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID,
+ TemplateId, Policy);
+ }
+ break;
+ }
+
if (TemplateId->Kind != TNK_Type_template &&
TemplateId->Kind != TNK_Undeclared_template) {
// This template-id does not refer to a type name, so we're
@@ -3448,12 +3400,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// constructor declaration.
if (getLangOpts().CPlusPlus && DSContext == DeclSpecContext::DSC_class &&
Actions.isCurrentClassName(*TemplateId->Name, getCurScope()) &&
- isConstructorDeclarator(TemplateId->SS.isEmpty()))
+ isConstructorDeclarator(/*Unqualified=*/true))
goto DoneWithDeclSpec;
// Turn the template-id annotation token into a type annotation
// token, then try again to parse it as a type-specifier.
- AnnotateTemplateIdTokenAsType();
+ CXXScopeSpec SS;
+ AnnotateTemplateIdTokenAsType(SS);
continue;
}
@@ -3617,7 +3570,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ConsumedEnd = ExplicitLoc;
ConsumeToken(); // kw_explicit
if (Tok.is(tok::l_paren)) {
- if (getLangOpts().CPlusPlus2a) {
+ if (getLangOpts().CPlusPlus20 || isExplicitBool() == TPResult::True) {
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus20
+ ? diag::warn_cxx17_compat_explicit_bool
+ : diag::ext_explicit_bool);
+
ExprResult ExplicitExpr(static_cast<Expr *>(nullptr));
BalancedDelimiterTracker Tracker(*this, tok::l_paren);
Tracker.consumeOpen();
@@ -3630,8 +3587,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
Actions.ActOnExplicitBoolSpecifier(ExplicitExpr.get());
} else
Tracker.skipToEnd();
- } else
- Diag(Tok.getLocation(), diag::warn_cxx2a_compat_explicit_bool);
+ } else {
+ Diag(Tok.getLocation(), diag::warn_cxx20_compat_explicit_bool);
+ }
}
isInvalid = DS.setFunctionSpecExplicit(ExplicitLoc, PrevSpec, DiagID,
ExplicitSpec, CloseParenLoc);
@@ -3726,6 +3684,14 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec,
DiagID, Policy);
break;
+ case tok::kw__ExtInt: {
+ ExprResult ER = ParseExtIntegerArgument();
+ if (ER.isInvalid())
+ continue;
+ isInvalid = DS.SetExtIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
+ ConsumedEnd = PrevTokLocation;
+ break;
+ }
case tok::kw___int128:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec,
DiagID, Policy);
@@ -3734,6 +3700,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec,
DiagID, Policy);
break;
+ case tok::kw___bf16:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_BFloat16, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
case tok::kw_float:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec,
DiagID, Policy);
@@ -4137,7 +4107,7 @@ void Parser::ParseStructDeclaration(
/// [OBC] '@' 'defs' '(' class-name ')'
///
void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
- DeclSpec::TST TagType, Decl *TagDecl) {
+ DeclSpec::TST TagType, RecordDecl *TagDecl) {
PrettyDeclStackTraceEntry CrashInfo(Actions.Context, TagDecl, RecordLoc,
"parsing struct/union body");
assert(!getLangOpts().CPlusPlus && "C++ declarations not supported");
@@ -4149,8 +4119,6 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
ParseScope StructScope(this, Scope::ClassScope|Scope::DeclScope);
Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
- SmallVector<Decl *, 32> FieldDecls;
-
// While we still have something to read, read the declarations in the struct.
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
@@ -4202,7 +4170,6 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
Actions.ActOnField(getCurScope(), TagDecl,
FD.D.getDeclSpec().getSourceRange().getBegin(),
FD.D, FD.BitfieldSize);
- FieldDecls.push_back(Field);
FD.complete(Field);
};
@@ -4226,7 +4193,6 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
SmallVector<Decl *, 16> Fields;
Actions.ActOnDefs(getCurScope(), TagDecl, Tok.getLocation(),
Tok.getIdentifierInfo(), Fields);
- FieldDecls.insert(FieldDecls.end(), Fields.begin(), Fields.end());
ConsumeToken();
ExpectAndConsume(tok::r_paren);
}
@@ -4252,6 +4218,9 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// If attributes exist after struct contents, parse them.
MaybeParseGNUAttributes(attrs);
+ SmallVector<Decl *, 32> FieldDecls(TagDecl->field_begin(),
+ TagDecl->field_end());
+
Actions.ActOnFields(getCurScope(), RecordLoc, TagDecl, FieldDecls,
T.getOpenLocation(), T.getCloseLocation(), attrs);
StructScope.Exit();
@@ -4286,7 +4255,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
/// ':' type-specifier-seq
///
/// [C++] elaborated-type-specifier:
-/// [C++] 'enum' '::'[opt] nested-name-specifier[opt] identifier
+/// [C++] 'enum' nested-name-specifier[opt] identifier
///
void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
@@ -4335,17 +4304,24 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
SuppressAccessChecks diagsFromTag(*this, shouldDelayDiagsInTag);
- // Enum definitions should not be parsed in a trailing-return-type.
- bool AllowDeclaration = DSC != DeclSpecContext::DSC_trailing;
+ // Determine whether this declaration is permitted to have an enum-base.
+ AllowDefiningTypeSpec AllowEnumSpecifier =
+ isDefiningTypeSpecifierContext(DSC);
+ bool CanBeOpaqueEnumDeclaration =
+ DS.isEmpty() && isOpaqueEnumDeclarationContext(DSC);
+ bool CanHaveEnumBase = (getLangOpts().CPlusPlus11 || getLangOpts().ObjC ||
+ getLangOpts().MicrosoftExt) &&
+ (AllowEnumSpecifier == AllowDefiningTypeSpec::Yes ||
+ CanBeOpaqueEnumDeclaration);
CXXScopeSpec &SS = DS.getTypeSpecScope();
if (getLangOpts().CPlusPlus) {
- // "enum foo : bar;" is not a potential typo for "enum foo::bar;"
- // if a fixed underlying type is allowed.
- ColonProtectionRAIIObject X(*this, AllowDeclaration);
+ // "enum foo : bar;" is not a potential typo for "enum foo::bar;".
+ ColonProtectionRAIIObject X(*this);
CXXScopeSpec Spec;
- if (ParseOptionalCXXScopeSpecifier(Spec, nullptr,
+ if (ParseOptionalCXXScopeSpecifier(Spec, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/true))
return;
@@ -4362,9 +4338,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SS = Spec;
}
- // Must have either 'enum name' or 'enum {...}'.
+ // Must have either 'enum name' or 'enum {...}' or (rarely) 'enum : T { ... }'.
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace) &&
- !(AllowDeclaration && Tok.is(tok::colon))) {
+ Tok.isNot(tok::colon)) {
Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
// Skip the rest of this declarator, up until the comma or semicolon.
@@ -4394,78 +4370,69 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
diagsFromTag.done();
TypeResult BaseType;
+ SourceRange BaseRange;
- // Parse the fixed underlying type.
- bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
- if (AllowDeclaration && Tok.is(tok::colon)) {
- bool PossibleBitfield = false;
- if (CanBeBitfield) {
- // If we're in class scope, this can either be an enum declaration with
- // an underlying type, or a declaration of a bitfield member. We try to
- // use a simple disambiguation scheme first to catch the common cases
- // (integer literal, sizeof); if it's still ambiguous, we then consider
- // anything that's a simple-type-specifier followed by '(' as an
- // expression. This suffices because function types are not valid
- // underlying types anyway.
- EnterExpressionEvaluationContext Unevaluated(
- Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- TPResult TPR = isExpressionOrTypeSpecifierSimple(NextToken().getKind());
- // If the next token starts an expression, we know we're parsing a
- // bit-field. This is the common case.
- if (TPR == TPResult::True)
- PossibleBitfield = true;
- // If the next token starts a type-specifier-seq, it may be either a
- // a fixed underlying type or the start of a function-style cast in C++;
- // lookahead one more token to see if it's obvious that we have a
- // fixed underlying type.
- else if (TPR == TPResult::False &&
- GetLookAheadToken(2).getKind() == tok::semi) {
- // Consume the ':'.
- ConsumeToken();
- } else {
- // We have the start of a type-specifier-seq, so we have to perform
- // tentative parsing to determine whether we have an expression or a
- // type.
- TentativeParsingAction TPA(*this);
-
- // Consume the ':'.
- ConsumeToken();
+ bool CanBeBitfield = (getCurScope()->getFlags() & Scope::ClassScope) &&
+ ScopedEnumKWLoc.isInvalid() && Name;
- // If we see a type specifier followed by an open-brace, we have an
- // ambiguity between an underlying type and a C++11 braced
- // function-style cast. Resolve this by always treating it as an
- // underlying type.
- // FIXME: The standard is not entirely clear on how to disambiguate in
- // this case.
- if ((getLangOpts().CPlusPlus &&
- isCXXDeclarationSpecifier(TPResult::True) != TPResult::True) ||
- (!getLangOpts().CPlusPlus && !isDeclarationSpecifier(true))) {
- // We'll parse this as a bitfield later.
- PossibleBitfield = true;
- TPA.Revert();
- } else {
- // We have a type-specifier-seq.
- TPA.Commit();
- }
- }
- } else {
- // Consume the ':'.
- ConsumeToken();
- }
+ // Parse the fixed underlying type.
+ if (Tok.is(tok::colon)) {
+ // This might be an enum-base or part of some unrelated enclosing context.
+ //
+ // 'enum E : base' is permitted in two circumstances:
+ //
+ // 1) As a defining-type-specifier, when followed by '{'.
+ // 2) As the sole constituent of a complete declaration -- when DS is empty
+ // and the next token is ';'.
+ //
+ // The restriction to defining-type-specifiers is important to allow parsing
+ // a ? new enum E : int{}
+ // _Generic(a, enum E : int{})
+ // properly.
+ //
+ // One additional consideration applies:
+ //
+ // C++ [dcl.enum]p1:
+ // A ':' following "enum nested-name-specifier[opt] identifier" within
+ // the decl-specifier-seq of a member-declaration is parsed as part of
+ // an enum-base.
+ //
+ // Other language modes supporting enumerations with fixed underlying types
+ // do not have clear rules on this, so we disambiguate to determine whether
+ // the tokens form a bit-field width or an enum-base.
+
+ if (CanBeBitfield && !isEnumBase(CanBeOpaqueEnumDeclaration)) {
+ // Outside C++11, do not interpret the tokens as an enum-base if they do
+ // not make sense as one. In C++11, it's an error if this happens.
+ if (getLangOpts().CPlusPlus11)
+ Diag(Tok.getLocation(), diag::err_anonymous_enum_bitfield);
+ } else if (CanHaveEnumBase || !ColonIsSacred) {
+ SourceLocation ColonLoc = ConsumeToken();
+
+ // Parse a type-specifier-seq as a type. We can't just ParseTypeName here,
+ // because under -fms-extensions,
+ // enum E : int *p;
+ // declares 'enum E : int; E *p;' not 'enum E : int*; E p;'.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS, AS, DeclSpecContext::DSC_type_specifier);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+ BaseType = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
- if (!PossibleBitfield) {
- SourceRange Range;
- BaseType = ParseTypeName(&Range);
+ BaseRange = SourceRange(ColonLoc, DeclaratorInfo.getSourceRange().getEnd());
if (!getLangOpts().ObjC) {
if (getLangOpts().CPlusPlus11)
- Diag(StartLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type);
+ Diag(ColonLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type)
+ << BaseRange;
else if (getLangOpts().CPlusPlus)
- Diag(StartLoc, diag::ext_cxx11_enum_fixed_underlying_type);
+ Diag(ColonLoc, diag::ext_cxx11_enum_fixed_underlying_type)
+ << BaseRange;
else if (getLangOpts().MicrosoftExt)
- Diag(StartLoc, diag::ext_ms_c_enum_fixed_underlying_type);
+ Diag(ColonLoc, diag::ext_ms_c_enum_fixed_underlying_type)
+ << BaseRange;
else
- Diag(StartLoc, diag::ext_clang_c_enum_fixed_underlying_type);
+ Diag(ColonLoc, diag::ext_clang_c_enum_fixed_underlying_type)
+ << BaseRange;
}
}
}
@@ -4481,14 +4448,19 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// enum foo {..}; void bar() { enum foo x; } <- use of old foo.
//
Sema::TagUseKind TUK;
- if (!AllowDeclaration) {
+ if (AllowEnumSpecifier == AllowDefiningTypeSpec::No)
TUK = Sema::TUK_Reference;
- } else if (Tok.is(tok::l_brace)) {
+ else if (Tok.is(tok::l_brace)) {
if (DS.isFriendSpecified()) {
Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
<< SourceRange(DS.getFriendSpecLoc());
ConsumeBrace();
SkipUntil(tok::r_brace, StopAtSemi);
+ // Discard any other definition-only pieces.
+ attrs.clear();
+ ScopedEnumKWLoc = SourceLocation();
+ IsScopedUsingClassTag = false;
+ BaseType = TypeResult();
TUK = Sema::TUK_Friend;
} else {
TUK = Sema::TUK_Definition;
@@ -4497,6 +4469,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
(Tok.is(tok::semi) ||
(Tok.isAtStartOfLine() &&
!isValidAfterTypeSpecifier(CanBeBitfield)))) {
+ // An opaque-enum-declaration is required to be standalone (no preceding or
+ // following tokens in the declaration). Sema enforces this separately by
+ // diagnosing anything else in the DeclSpec.
TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
if (Tok.isNot(tok::semi)) {
// A semicolon was missing after this declaration. Diagnose and recover.
@@ -4508,8 +4483,11 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
TUK = Sema::TUK_Reference;
}
- // If this is an elaborated type specifier, and we delayed
- // diagnostics before, just merge them into the current pool.
+ bool IsElaboratedTypeSpecifier =
+ TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend;
+
+ // If this is an elaborated type specifier nested in a larger declaration,
+ // and we delayed diagnostics before, just merge them into the current pool.
if (TUK == Sema::TUK_Reference && shouldDelayDiagsInTag) {
diagsFromTag.redelay();
}
@@ -4536,9 +4514,6 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
TemplateInfo.TemplateParams->size());
}
- if (TUK == Sema::TUK_Reference)
- ProhibitAttributes(attrs);
-
if (!Name && TUK != Sema::TUK_Definition) {
Diag(Tok, diag::err_enumerator_unnamed_no_def);
@@ -4547,6 +4522,25 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
+ // An elaborated-type-specifier has a much more constrained grammar:
+ //
+ // 'enum' nested-name-specifier[opt] identifier
+ //
+ // If we parsed any other bits, reject them now.
+ //
+ // MSVC and (for now at least) Objective-C permit a full enum-specifier
+ // or opaque-enum-declaration anywhere.
+ if (IsElaboratedTypeSpecifier && !getLangOpts().MicrosoftExt &&
+ !getLangOpts().ObjC) {
+ ProhibitAttributes(attrs);
+ if (BaseType.isUsable())
+ Diag(BaseRange.getBegin(), diag::ext_enum_base_in_type_specifier)
+ << (AllowEnumSpecifier == AllowDefiningTypeSpec::Yes) << BaseRange;
+ else if (ScopedEnumKWLoc.isValid())
+ Diag(ScopedEnumKWLoc, diag::ext_elaborated_enum_class)
+ << FixItHint::CreateRemoval(ScopedEnumKWLoc) << IsScopedUsingClassTag;
+ }
+
stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
Sema::SkipBodyInfo SkipBody;
@@ -4621,7 +4615,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
- if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
+ if (Tok.is(tok::l_brace) && TUK == Sema::TUK_Definition) {
Decl *D = SkipBody.CheckSameAsPrevious ? SkipBody.New : TagDecl;
ParseEnumBody(StartLoc, D);
if (SkipBody.CheckSameAsPrevious &&
@@ -4808,6 +4802,8 @@ bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_int:
+ case tok::kw__ExtInt:
+ case tok::kw___bf16:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
@@ -4887,7 +4883,9 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_int:
+ case tok::kw__ExtInt:
case tok::kw_half:
+ case tok::kw___bf16:
case tok::kw_float:
case tok::kw_double:
case tok::kw__Accum:
@@ -4991,6 +4989,8 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
return true;
+ if (TryAnnotateTypeConstraint())
+ return true;
if (Tok.is(tok::identifier))
return false;
@@ -5051,7 +5051,9 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_char32_t:
case tok::kw_int:
+ case tok::kw__ExtInt:
case tok::kw_half:
+ case tok::kw___bf16:
case tok::kw_float:
case tok::kw_double:
case tok::kw__Accum:
@@ -5124,8 +5126,27 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
// placeholder-type-specifier
case tok::annot_template_id: {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
- return TemplateId->Kind == TNK_Concept_template &&
- (NextToken().is(tok::kw_auto) || NextToken().is(tok::kw_decltype));
+ if (TemplateId->hasInvalidName())
+ return true;
+ // FIXME: What about type templates that have only been annotated as
+ // annot_template_id, not as annot_typename?
+ return isTypeConstraintAnnotation() &&
+ (NextToken().is(tok::kw_auto) || NextToken().is(tok::kw_decltype));
+ }
+
+ case tok::annot_cxxscope: {
+ TemplateIdAnnotation *TemplateId =
+ NextToken().is(tok::annot_template_id)
+ ? takeTemplateIdAnnotation(NextToken())
+ : nullptr;
+ if (TemplateId && TemplateId->hasInvalidName())
+ return true;
+ // FIXME: What about type templates that have only been annotated as
+ // annot_template_id, not as annot_typename?
+ if (NextToken().is(tok::identifier) && TryAnnotateTypeConstraint())
+ return true;
+ return isTypeConstraintAnnotation() &&
+ GetLookAheadToken(2).isOneOf(tok::kw_auto, tok::kw_decltype);
}
case tok::kw___declspec:
@@ -5173,7 +5194,8 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
// Parse the C++ scope specifier.
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr,
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/true)) {
TPA.Revert();
return false;
@@ -5553,7 +5575,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
D.getContext() == DeclaratorContext::FileContext ||
D.getContext() == DeclaratorContext::MemberContext;
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr, EnteringContext);
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, EnteringContext);
if (SS.isNotEmpty()) {
if (Tok.isNot(tok::star)) {
@@ -5568,8 +5591,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
return;
}
- SourceLocation Loc = ConsumeToken();
- D.SetRangeEnd(Loc);
+ SourceLocation StarLoc = ConsumeToken();
+ D.SetRangeEnd(StarLoc);
DeclSpec DS(AttrFactory);
ParseTypeQualifierListOpt(DS);
D.ExtendWithDeclSpec(DS);
@@ -5580,7 +5603,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Sema will have to catch (syntactically invalid) pointers into global
// scope. It has to catch pointers into namespace scope anyway.
D.AddTypeInfo(DeclaratorChunk::getMemberPointer(
- SS, DS.getTypeQualifiers(), DS.getEndLoc()),
+ SS, DS.getTypeQualifiers(), StarLoc, DS.getEndLoc()),
std::move(DS.getAttributes()),
/* Don't replace range end. */ SourceLocation());
return;
@@ -5776,8 +5799,9 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
bool EnteringContext =
D.getContext() == DeclaratorContext::FileContext ||
D.getContext() == DeclaratorContext::MemberContext;
- ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), nullptr,
- EnteringContext);
+ ParseOptionalCXXScopeSpecifier(
+ D.getCXXScopeSpec(), /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, EnteringContext);
}
if (D.getCXXScopeSpec().isValid()) {
@@ -5851,10 +5875,11 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
bool HadScope = D.getCXXScopeSpec().isValid();
if (ParseUnqualifiedId(D.getCXXScopeSpec(),
+ /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/true,
/*AllowDestructorName=*/true, AllowConstructorName,
- AllowDeductionGuide, nullptr, nullptr,
- D.getName()) ||
+ AllowDeductionGuide, nullptr, D.getName()) ||
// Once we're past the identifier, if the scope was bad, mark the
// whole declarator bad.
D.getCXXScopeSpec().isInvalid()) {
@@ -6021,11 +6046,12 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
while (1) {
if (Tok.is(tok::l_paren)) {
+ bool IsFunctionDeclaration = D.isFunctionDeclaratorAFunctionDeclaration();
// Enter function-declaration scope, limiting any declarators to the
// function prototype scope, including parameter declarators.
ParseScope PrototypeScope(this,
Scope::FunctionPrototypeScope|Scope::DeclScope|
- (D.isFunctionDeclaratorAFunctionDeclaration()
+ (IsFunctionDeclaration
? Scope::FunctionDeclarationScope : 0));
// The paren may be part of a C++ direct initializer, eg. "int x(1);".
@@ -6044,7 +6070,12 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
ParsedAttributes attrs(AttrFactory);
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
+ if (IsFunctionDeclaration)
+ Actions.ActOnStartFunctionDeclarationDeclarator(D,
+ TemplateParameterDepth);
ParseFunctionDeclarator(D, attrs, T, IsAmbiguous);
+ if (IsFunctionDeclaration)
+ Actions.ActOnFinishFunctionDeclarationDeclarator(D);
PrototypeScope.Exit();
} else if (Tok.is(tok::l_square)) {
ParseBracketDeclarator(D);
@@ -6360,7 +6391,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
ProhibitAttributes(FnAttrs);
} else {
if (Tok.isNot(tok::r_paren))
- ParseParameterDeclarationClause(D, FirstArgAttrs, ParamInfo,
+ ParseParameterDeclarationClause(D.getContext(), FirstArgAttrs, ParamInfo,
EllipsisLoc);
else if (RequiresArg)
Diag(Tok, diag::err_argument_required_after_attribute);
@@ -6578,9 +6609,9 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
/// after the opening parenthesis. This function will not parse a K&R-style
/// identifier list.
///
-/// D is the declarator being parsed. If FirstArgAttrs is non-null, then the
-/// caller parsed those arguments immediately after the open paren - they should
-/// be considered to be part of the first parameter.
+/// DeclContext is the context of the declarator being parsed. If FirstArgAttrs
+/// is non-null, then the caller parsed those attributes immediately after the
+/// open paren - they should be considered to be part of the first parameter.
///
/// After returning, ParamInfo will hold the parsed parameters. EllipsisLoc will
/// be the location of the ellipsis, if any was parsed.
@@ -6606,7 +6637,7 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
/// [C++11] attribute-specifier-seq parameter-declaration
///
void Parser::ParseParameterDeclarationClause(
- Declarator &D,
+ DeclaratorContext DeclaratorCtx,
ParsedAttributes &FirstArgAttrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc) {
@@ -6655,9 +6686,11 @@ void Parser::ParseParameterDeclarationClause(
// "LambdaExprParameterContext", because we must accept either
// 'declarator' or 'abstract-declarator' here.
Declarator ParmDeclarator(
- DS, D.getContext() == DeclaratorContext::LambdaExprContext
- ? DeclaratorContext::LambdaExprParameterContext
- : DeclaratorContext::PrototypeContext);
+ DS, DeclaratorCtx == DeclaratorContext::RequiresExprContext
+ ? DeclaratorContext::RequiresExprContext
+ : DeclaratorCtx == DeclaratorContext::LambdaExprContext
+ ? DeclaratorContext::LambdaExprParameterContext
+ : DeclaratorContext::PrototypeContext);
ParseDeclarator(ParmDeclarator);
// Parse GNU attributes, if present.
@@ -6700,6 +6733,31 @@ void Parser::ParseParameterDeclarationClause(
Actions.containsUnexpandedParameterPacks(ParmDeclarator))
DiagnoseMisplacedEllipsisInDeclarator(ConsumeToken(), ParmDeclarator);
+ // Now we are at the point where declarator parsing is finished.
+ //
+ // Try to catch keywords in place of the identifier in a declarator, and
+ // in particular the common case where:
+ // 1 identifier comes at the end of the declarator
+ // 2 if the identifier is dropped, the declarator is valid but anonymous
+ // (no identifier)
+ // 3 declarator parsing succeeds, and then we have a trailing keyword,
+ // which is never valid in a param list (e.g. missing a ',')
+ // And we can't handle this in ParseDeclarator because in general keywords
+ // may be allowed to follow the declarator. (And in some cases there'd be
+ // better recovery like inserting punctuation). ParseDeclarator is just
+ // treating this as an anonymous parameter, and fortunately at this point
+ // we've already almost done that.
+ //
+ // We care about case 1) where the declarator type should be known, and
+ // the identifier should be null.
+ if (!ParmDeclarator.isInvalidType() && !ParmDeclarator.hasName()) {
+ if (Tok.getIdentifierInfo() &&
+ Tok.getIdentifierInfo()->isKeyword(getLangOpts())) {
+ Diag(Tok, diag::err_keyword_as_parameter) << PP.getSpelling(Tok);
+ // Consume the keyword.
+ ConsumeToken();
+ }
+ }
// Inform the actions module about the parameter declarator, so it gets
// added to the current scope.
Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator);
@@ -6711,7 +6769,7 @@ void Parser::ParseParameterDeclarationClause(
SourceLocation EqualLoc = Tok.getLocation();
// Parse the default argument
- if (D.getContext() == DeclaratorContext::MemberContext) {
+ if (DeclaratorCtx == DeclaratorContext::MemberContext) {
// If we're inside a class definition, cache the tokens
// corresponding to the default argument. We'll actually parse
// them when we see the end of the class definition.
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index 081d4d8b1209..ddcbb5615fee 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -155,7 +155,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
// Normal namespace definition, not a nested-namespace-definition.
} else if (InlineLoc.isValid()) {
Diag(InlineLoc, diag::err_inline_nested_namespace_definition);
- } else if (getLangOpts().CPlusPlus2a) {
+ } else if (getLangOpts().CPlusPlus20) {
Diag(ExtraNSs[0].NamespaceLoc,
diag::warn_cxx14_compat_nested_namespace_definition);
if (FirstNestedInlineLoc.isValid())
@@ -290,7 +290,9 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false,
/*LastII=*/nullptr,
@@ -530,7 +532,9 @@ Decl *Parser::ParseUsingDirective(DeclaratorContext Context,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false,
/*LastII=*/nullptr,
@@ -597,7 +601,9 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
// Parse nested-name-specifier.
IdentifierInfo *LastII = nullptr;
- if (ParseOptionalCXXScopeSpecifier(D.SS, nullptr, /*EnteringContext=*/false,
+ if (ParseOptionalCXXScopeSpecifier(D.SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
/*MayBePseudoDtor=*/nullptr,
/*IsTypename=*/false,
/*LastII=*/&LastII,
@@ -632,12 +638,12 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
D.Name.setConstructorName(Type, IdLoc, IdLoc);
} else {
if (ParseUnqualifiedId(
- D.SS, /*EnteringContext=*/false,
+ D.SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext=*/false,
/*AllowDestructorName=*/true,
- /*AllowConstructorName=*/!(Tok.is(tok::identifier) &&
- NextToken().is(tok::equal)),
- /*AllowDeductionGuide=*/false,
- nullptr, nullptr, D.Name))
+ /*AllowConstructorName=*/
+ !(Tok.is(tok::identifier) && NextToken().is(tok::equal)),
+ /*AllowDeductionGuide=*/false, nullptr, D.Name))
return true;
}
@@ -978,10 +984,10 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
EnterExpressionEvaluationContext Unevaluated(
Actions, Sema::ExpressionEvaluationContext::Unevaluated, nullptr,
Sema::ExpressionEvaluationContextRecord::EK_Decltype);
- Result =
- Actions.CorrectDelayedTyposInExpr(ParseExpression(), [](Expr *E) {
- return E->hasPlaceholderType() ? ExprError() : E;
- });
+ Result = Actions.CorrectDelayedTyposInExpr(
+ ParseExpression(), /*InitDecl=*/nullptr,
+ /*RecoverUncorrectedTypos=*/false,
+ [](Expr *E) { return E->hasPlaceholderType() ? ExprError() : E; });
if (Result.isInvalid()) {
DS.SetTypeSpecError();
if (SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch)) {
@@ -1115,7 +1121,9 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Parse optional nested-name-specifier
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false))
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false))
return true;
BaseLoc = Tok.getLocation();
@@ -1139,19 +1147,14 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Check whether we have a template-id that names a type.
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
- if (TemplateId->Kind == TNK_Type_template ||
- TemplateId->Kind == TNK_Dependent_template_name ||
- TemplateId->Kind == TNK_Undeclared_template) {
- AnnotateTemplateIdTokenAsType(/*IsClassName*/true);
+ if (TemplateId->mightBeType()) {
+ AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
- ParsedType Type = getTypeAnnotation(Tok);
+ TypeResult Type = getTypeAnnotation(Tok);
EndLocation = Tok.getAnnotationEndLoc();
ConsumeAnnotationToken();
-
- if (Type)
- return Type;
- return true;
+ return Type;
}
// Fall through to produce an error below.
@@ -1168,7 +1171,9 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
if (Tok.is(tok::less)) {
// It looks the user intended to write a template-id here, but the
// template-name was wrong. Try to fix that.
- TemplateNameKind TNK = TNK_Type_template;
+ // FIXME: Invoke ParseOptionalCXXScopeSpecifier in a "'template' is neither
+ // required nor permitted" mode, and do this there.
+ TemplateNameKind TNK = TNK_Non_template;
TemplateTy Template;
if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(),
&SS, Template, TNK)) {
@@ -1176,14 +1181,6 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
<< Id;
}
- if (!Template) {
- TemplateArgList TemplateArgs;
- SourceLocation LAngleLoc, RAngleLoc;
- ParseTemplateIdAfterTemplateName(true, LAngleLoc, TemplateArgs,
- RAngleLoc);
- return true;
- }
-
// Form the template name
UnqualifiedId TemplateName;
TemplateName.setIdentifier(Id, IdLoc);
@@ -1192,8 +1189,9 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
TemplateName))
return true;
- if (TNK == TNK_Type_template || TNK == TNK_Dependent_template_name)
- AnnotateTemplateIdTokenAsType(/*IsClassName*/true);
+ if (Tok.is(tok::annot_template_id) &&
+ takeTemplateIdAnnotation(Tok)->mightBeType())
+ AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
// If we didn't end up with a typename token, there's nothing more we
// can do.
@@ -1203,7 +1201,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Retrieve the type from the annotation token, consume that token, and
// return.
EndLocation = Tok.getAnnotationEndLoc();
- ParsedType Type = getTypeAnnotation(Tok);
+ TypeResult Type = getTypeAnnotation(Tok);
ConsumeAnnotationToken();
return Type;
}
@@ -1285,7 +1283,8 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
case tok::annot_pragma_ms_pointers_to_members:
return true;
case tok::colon:
- return CouldBeBitfield; // enum E { ... } : 2;
+ return CouldBeBitfield || // enum E { ... } : 2;
+ ColonIsSacred; // _Generic(..., enum E : 2);
// Microsoft compatibility
case tok::kw___cdecl: // struct foo {...} __cdecl x;
case tok::kw___fastcall: // struct foo {...} __fastcall x;
@@ -1547,7 +1546,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
CXXScopeSpec Spec;
bool HasValidSpec = true;
- if (ParseOptionalCXXScopeSpecifier(Spec, nullptr, EnteringContext)) {
+ if (ParseOptionalCXXScopeSpecifier(Spec, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ EnteringContext)) {
DS.SetTypeSpecError();
HasValidSpec = false;
}
@@ -1620,9 +1621,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
NameLoc = ConsumeAnnotationToken();
if (TemplateId->Kind == TNK_Undeclared_template) {
- // Try to resolve the template name to a type template.
- Actions.ActOnUndeclaredTypeTemplateName(getCurScope(), TemplateId->Template,
- TemplateId->Kind, NameLoc, Name);
+ // Try to resolve the template name to a type template. May update Kind.
+ Actions.ActOnUndeclaredTypeTemplateName(
+ getCurScope(), TemplateId->Template, TemplateId->Kind, NameLoc, Name);
if (TemplateId->Kind == TNK_Undeclared_template) {
RecoverFromUndeclaredTemplateName(
Name, NameLoc,
@@ -1631,10 +1632,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
}
- if (TemplateId && TemplateId->Kind != TNK_Type_template &&
- TemplateId->Kind != TNK_Dependent_template_name) {
+ if (TemplateId && !TemplateId->mightBeType()) {
// The template-name in the simple-template-id refers to
- // something other than a class template. Give an appropriate
+ // something other than a type template. Give an appropriate
// error message and skip to the ';'.
SourceRange Range(NameLoc);
if (SS.isNotEmpty())
@@ -1681,7 +1681,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
Sema::TagUseKind TUK;
- if (DSC == DeclSpecContext::DSC_trailing)
+ if (isDefiningTypeSpecifierContext(DSC) == AllowDefiningTypeSpec::No ||
+ (getLangOpts().OpenMP && OpenMPDirectiveParsing))
TUK = Sema::TUK_Reference;
else if (Tok.is(tok::l_brace) ||
(getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
@@ -1806,7 +1807,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// or explicit instantiation.
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ if (TemplateId->isInvalid()) {
+ // Can't build the declaration.
+ } else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
TUK == Sema::TUK_Declaration) {
// This is an explicit instantiation of a class template.
ProhibitAttributes(attrs);
@@ -1826,7 +1829,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
ProhibitAttributes(attrs);
TypeResult = Actions.ActOnTagTemplateIdType(TUK, TagType, StartLoc,
- TemplateId->SS,
+ SS,
TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->TemplateNameLoc,
@@ -1876,7 +1879,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Build the class template specialization.
TagOrTempResult = Actions.ActOnClassTemplateSpecialization(
getCurScope(), TagType, TUK, StartLoc, DS.getModulePrivateSpecLoc(),
- *TemplateId, attrs,
+ SS, *TemplateId, attrs,
MultiTemplateParamsArg(TemplateParams ? &(*TemplateParams)[0]
: nullptr,
TemplateParams ? TemplateParams->size() : 0),
@@ -1962,7 +1965,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Decl *D =
SkipBody.CheckSameAsPrevious ? SkipBody.New : TagOrTempResult.get();
// Parse the definition body.
- ParseStructUnionBody(StartLoc, TagType, D);
+ ParseStructUnionBody(StartLoc, TagType, cast<RecordDecl>(D));
if (SkipBody.CheckSameAsPrevious &&
!Actions.ActOnDuplicateDefinition(DS, TagOrTempResult.get(),
SkipBody)) {
@@ -2182,7 +2185,6 @@ void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
// declarations.
auto LateMethod = new LateParsedMethodDeclaration(this, ThisDecl);
getCurrentClass().LateParsedDeclarations.push_back(LateMethod);
- LateMethod->TemplateScope = getCurScope()->isTemplateParamScope();
// Stash the exception-specification tokens in the late-pased method.
LateMethod->ExceptionSpecTokens = FTI.ExceptionSpecTokens;
@@ -2501,7 +2503,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (isAccessDecl) {
// Collect the scope specifier token we annotated earlier.
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
if (SS.isInvalid()) {
@@ -2512,8 +2515,9 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Try to parse an unqualified-id.
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
- if (ParseUnqualifiedId(SS, false, true, true, false, nullptr,
- &TemplateKWLoc, Name)) {
+ if (ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, false, true, true,
+ false, &TemplateKWLoc, Name)) {
SkipUntil(tok::semi);
return nullptr;
}
@@ -2642,6 +2646,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
ParsingDeclarator DeclaratorInfo(*this, DS, DeclaratorContext::MemberContext);
+ if (TemplateInfo.TemplateParams)
+ DeclaratorInfo.setTemplateParameterLists(TemplateParams);
VirtSpecifiers VS;
// Hold late-parsed attributes so we can attach a Decl to them later.
@@ -2656,7 +2662,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
auto &Zero = NextToken();
SmallString<8> Buffer;
- if (Zero.isNot(tok::numeric_constant) || Zero.getLength() != 1 ||
+ if (Zero.isNot(tok::numeric_constant) ||
PP.getSpelling(Zero, Buffer) != "0")
return false;
@@ -2707,6 +2713,11 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DefinitionKind = FDK_Defaulted;
else if (KW.is(tok::kw_delete))
DefinitionKind = FDK_Deleted;
+ else if (KW.is(tok::code_completion)) {
+ Actions.CodeCompleteAfterFunctionEquals(DeclaratorInfo);
+ cutOffParsing();
+ return nullptr;
+ }
}
}
DeclaratorInfo.setFunctionDefinitionKind(DefinitionKind);
@@ -2714,7 +2725,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// C++11 [dcl.attr.grammar] p4: If an attribute-specifier-seq appertains
// to a friend declaration, that declaration shall be a definition.
if (DeclaratorInfo.isFunctionDeclarator() &&
- DefinitionKind != FDK_Definition && DS.isFriendSpecified()) {
+ DefinitionKind == FDK_Declaration && DS.isFriendSpecified()) {
// Diagnose attributes that appear before decl specifier:
// [[]] friend int foo();
ProhibitAttributes(FnAttrs);
@@ -2782,7 +2793,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
!DS.isFriendSpecified()) {
// It's a default member initializer.
if (BitfieldSize.get())
- Diag(Tok, getLangOpts().CPlusPlus2a
+ Diag(Tok, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_bitfield_member_init
: diag::ext_bitfield_member_init);
HasInClassInit = Tok.is(tok::equal) ? ICIS_CopyInit : ICIS_ListInit;
@@ -2987,7 +2998,7 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
<< 0 /* default */;
else
Diag(ConsumeToken(), diag::err_default_special_members)
- << getLangOpts().CPlusPlus2a;
+ << getLangOpts().CPlusPlus20;
return ExprError();
}
}
@@ -3334,6 +3345,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// Each iteration of this loop reads one member-declaration.
ParseCXXClassMemberDeclarationWithPragmas(
CurAS, AccessAttrs, static_cast<DeclSpec::TST>(TagType), TagDecl);
+ MaybeDestroyTemplateIds();
}
T.consumeClose();
} else {
@@ -3359,6 +3371,16 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// are complete and we can parse the delayed portions of method
// declarations and the lexed inline method definitions, along with any
// delayed attributes.
+
+ // Save the state of Sema.FPFeatures, and change the setting
+ // to the levels specified on the command line. Previous level
+ // will be restored when the RAII object is destroyed.
+ Sema::FPFeaturesStateRAII SaveFPFeaturesState(Actions);
+ FPOptionsOverride NewOverrides;
+ Actions.CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+ Actions.FpPragmaStack.Act(Tok.getLocation(), Sema::PSK_Reset, StringRef(),
+ 0 /*unused*/);
+
SourceLocation SavedPrevTokLocation = PrevTokLocation;
ParseLexedPragmas(getCurrentClass());
ParseLexedAttributes(getCurrentClass());
@@ -3491,7 +3513,9 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// parse '::'[opt] nested-name-specifier[opt]
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false))
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false))
return true;
// : identifier
@@ -3500,7 +3524,7 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// : declype(...)
DeclSpec DS(AttrFactory);
// : template_name<...>
- ParsedType TemplateTypeTy;
+ TypeResult TemplateTypeTy;
if (Tok.is(tok::identifier)) {
// Get the identifier. This may be a member name or a class name,
@@ -3517,15 +3541,11 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
TemplateIdAnnotation *TemplateId = Tok.is(tok::annot_template_id)
? takeTemplateIdAnnotation(Tok)
: nullptr;
- if (TemplateId && (TemplateId->Kind == TNK_Type_template ||
- TemplateId->Kind == TNK_Dependent_template_name ||
- TemplateId->Kind == TNK_Undeclared_template)) {
- AnnotateTemplateIdTokenAsType(/*IsClassName*/true);
+ if (TemplateId && TemplateId->mightBeType()) {
+ AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
TemplateTypeTy = getTypeAnnotation(Tok);
ConsumeAnnotationToken();
- if (!TemplateTypeTy)
- return true;
} else {
Diag(Tok, diag::err_expected_member_or_base_name);
return true;
@@ -3544,8 +3564,10 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
SourceLocation EllipsisLoc;
TryConsumeToken(tok::ellipsis, EllipsisLoc);
+ if (TemplateTypeTy.isInvalid())
+ return true;
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
- TemplateTypeTy, DS, IdLoc,
+ TemplateTypeTy.get(), DS, IdLoc,
InitList.get(), EllipsisLoc);
} else if(Tok.is(tok::l_paren)) {
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -3555,8 +3577,10 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
ExprVector ArgExprs;
CommaLocsTy CommaLocs;
auto RunSignatureHelp = [&] {
+ if (TemplateTypeTy.isInvalid())
+ return QualType();
QualType PreferredType = Actions.ProduceCtorInitMemberSignatureHelp(
- getCurScope(), ConstructorDecl, SS, TemplateTypeTy, ArgExprs, II,
+ getCurScope(), ConstructorDecl, SS, TemplateTypeTy.get(), ArgExprs, II,
T.getOpenLocation());
CalledSignatureHelp = true;
return PreferredType;
@@ -3577,12 +3601,17 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
SourceLocation EllipsisLoc;
TryConsumeToken(tok::ellipsis, EllipsisLoc);
+ if (TemplateTypeTy.isInvalid())
+ return true;
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
- TemplateTypeTy, DS, IdLoc,
+ TemplateTypeTy.get(), DS, IdLoc,
T.getOpenLocation(), ArgExprs,
T.getCloseLocation(), EllipsisLoc);
}
+ if (TemplateTypeTy.isInvalid())
+ return true;
+
if (getLangOpts().CPlusPlus11)
return Diag(Tok, diag::err_expected_either) << tok::l_paren << tok::l_brace;
else
@@ -3908,8 +3937,8 @@ void Parser::PopParsingClass(Sema::ParsingClassState state) {
// after the top-level class is completely defined. Therefore, add
// it to the list of nested classes within its parent.
assert(getCurScope()->isClassScope() && "Nested class outside of class scope?");
- ClassStack.top()->LateParsedDeclarations.push_back(new LateParsedClass(this, Victim));
- Victim->TemplateScope = getCurScope()->getParent()->isTemplateParamScope();
+ ClassStack.top()->LateParsedDeclarations.push_back(
+ new LateParsedClass(this, Victim));
}
/// Try to parse an 'identifier' which appears within an attribute-token.
@@ -4364,7 +4393,7 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
- // Skip most ms attributes except for a whitelist.
+ // Skip most ms attributes except for a specific list.
while (true) {
SkipUntil(tok::r_square, tok::identifier, StopAtSemi | StopBeforeMatch);
if (Tok.isNot(tok::identifier)) // ']', but also eof
diff --git a/clang/lib/Parse/ParseExpr.cpp b/clang/lib/Parse/ParseExpr.cpp
index 1442df046bb9..81e87582c6ad 100644
--- a/clang/lib/Parse/ParseExpr.cpp
+++ b/clang/lib/Parse/ParseExpr.cpp
@@ -234,7 +234,7 @@ ExprResult Parser::ParseCaseExpression(SourceLocation CaseLoc) {
/// \endverbatim
ExprResult Parser::ParseConstraintExpression() {
EnterExpressionEvaluationContext ConstantEvaluated(
- Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated);
ExprResult LHS(ParseCastExpression(AnyCastExpr));
ExprResult Res(ParseRHSOfBinaryExpression(LHS, prec::LogicalOr));
if (Res.isUsable() && !Actions.CheckConstraintExpression(Res.get())) {
@@ -256,7 +256,7 @@ ExprResult Parser::ParseConstraintExpression() {
ExprResult
Parser::ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause) {
EnterExpressionEvaluationContext ConstantEvaluated(
- Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated);
bool NotPrimaryExpression = false;
auto ParsePrimary = [&] () {
ExprResult E = ParseCastExpression(PrimaryExprOnly,
@@ -625,13 +625,31 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
SourceRange(Actions.getExprRange(LHS.get()).getBegin(),
Actions.getExprRange(RHS.get()).getEnd()));
- LHS = Actions.ActOnBinOp(getCurScope(), OpToken.getLocation(),
- OpToken.getKind(), LHS.get(), RHS.get());
+ ExprResult BinOp =
+ Actions.ActOnBinOp(getCurScope(), OpToken.getLocation(),
+ OpToken.getKind(), LHS.get(), RHS.get());
+ if (BinOp.isInvalid())
+ BinOp = Actions.CreateRecoveryExpr(LHS.get()->getBeginLoc(),
+ RHS.get()->getEndLoc(),
+ {LHS.get(), RHS.get()});
+ LHS = BinOp;
} else {
- LHS = Actions.ActOnConditionalOp(OpToken.getLocation(), ColonLoc,
- LHS.get(), TernaryMiddle.get(),
- RHS.get());
+ ExprResult CondOp = Actions.ActOnConditionalOp(
+ OpToken.getLocation(), ColonLoc, LHS.get(), TernaryMiddle.get(),
+ RHS.get());
+ if (CondOp.isInvalid()) {
+ std::vector<clang::Expr *> Args;
+ // TernaryMiddle can be null for the GNU conditional expr extension.
+ if (TernaryMiddle.get())
+ Args = {LHS.get(), TernaryMiddle.get(), RHS.get()};
+ else
+ Args = {LHS.get(), RHS.get()};
+ CondOp = Actions.CreateRecoveryExpr(LHS.get()->getBeginLoc(),
+ RHS.get()->getEndLoc(), Args);
+ }
+
+ LHS = CondOp;
}
// In this case, ActOnBinOp or ActOnConditionalOp performed the
// CorrectDelayedTyposInExpr check.
@@ -756,6 +774,7 @@ class CastExpressionIdValidator final : public CorrectionCandidateCallback {
/// [C++11] user-defined-literal
/// '(' expression ')'
/// [C11] generic-selection
+/// [C++2a] requires-expression
/// '__func__' [C99 6.4.2.2]
/// [GNU] '__FUNCTION__'
/// [MS] '__FUNCDNAME__'
@@ -901,6 +920,11 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
auto SavedType = PreferredType;
NotCastExpr = false;
+ // Are postfix-expression suffix operators permitted after this
+ // cast-expression? If not, and we find some, we'll parse them anyway and
+ // diagnose them.
+ bool AllowSuffix = true;
+
// This handles all of cast-expression, unary-expression, postfix-expression,
// and primary-expression. We handle them together like this for efficiency
// and to simplify handling of an expression starting with a '(' token: which
@@ -910,8 +934,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// If the parsed tokens consist of a primary-expression, the cases below
// break out of the switch; at the end we call ParsePostfixExpressionSuffix
// to handle the postfix expression suffixes. Cases that cannot be followed
- // by postfix exprs should return without invoking
- // ParsePostfixExpressionSuffix.
+ // by postfix exprs should set AllowSuffix to false.
switch (SavedKind) {
case tok::l_paren: {
// If this expression is limited to being a unary-expression, the paren can
@@ -934,8 +957,11 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/,
isTypeCast == IsTypeCast, CastTy, RParenLoc);
+ // FIXME: What should we do if a vector literal is followed by a
+ // postfix-expression suffix? Usually postfix operators are permitted on
+ // literals.
if (isVectorLiteral)
- return Res;
+ return Res;
switch (ParenExprType) {
case SimpleExpr: break; // Nothing else to do.
@@ -973,14 +999,31 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___objc_yes:
case tok::kw___objc_no:
- return ParseObjCBoolLiteral();
+ Res = ParseObjCBoolLiteral();
+ break;
case tok::kw_nullptr:
Diag(Tok, diag::warn_cxx98_compat_nullptr);
- return Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
+ Res = Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
+ break;
+ case tok::annot_uneval_primary_expr:
case tok::annot_primary_expr:
Res = getExprAnnotation(Tok);
+ if (SavedKind == tok::annot_uneval_primary_expr) {
+ if (Expr *E = Res.get()) {
+ if (!E->isTypeDependent() && !E->containsErrors()) {
+ // TransformToPotentiallyEvaluated expects that it will still be in a
+ // (temporary) unevaluated context and then looks through that context
+ // to build it in the surrounding context. So we need to push an
+ // unevaluated context to balance things out.
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
+ Res = Actions.TransformToPotentiallyEvaluated(Res.get());
+ }
+ }
+ }
ConsumeAnnotationToken();
if (!Res.isInvalid() && Tok.is(tok::less))
checkPotentialAngleBracket(Res);
@@ -1005,7 +1048,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
assert(Tok.isNot(tok::kw_decltype) && Tok.isNot(tok::kw___super));
return ParseCastExpression(ParseKind, isAddressOfOperand, isTypeCast,
isVectorLiteral, NotPrimaryExpression);
-
+
case tok::identifier: { // primary-expression: identifier
// unqualified-id: identifier
// constant: enumeration-constant
@@ -1261,7 +1304,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Res = ParseGenericSelectionExpression();
break;
case tok::kw___builtin_available:
- return ParseAvailabilityCheckExpr(Tok.getLocation());
+ Res = ParseAvailabilityCheckExpr(Tok.getLocation());
+ break;
case tok::kw___builtin_va_arg:
case tok::kw___builtin_offsetof:
case tok::kw___builtin_choose_expr:
@@ -1273,9 +1317,11 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___builtin_LINE:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
+ // This parses the complete suffix; we can return early.
return ParseBuiltinPrimaryExpression();
case tok::kw___null:
- return Actions.ActOnGNUNullExpr(ConsumeToken());
+ Res = Actions.ActOnGNUNullExpr(ConsumeToken());
+ break;
case tok::plusplus: // unary-expression: '++' unary-expression [C99]
case tok::minusminus: { // unary-expression: '--' unary-expression [C99]
@@ -1304,9 +1350,14 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
UnconsumeToken(SavedTok);
return ExprError();
}
- if (!Res.isInvalid())
+ if (!Res.isInvalid()) {
+ Expr *Arg = Res.get();
Res = Actions.ActOnUnaryOp(getCurScope(), SavedTok.getLocation(),
- SavedKind, Res.get());
+ SavedKind, Arg);
+ if (Res.isInvalid())
+ Res = Actions.CreateRecoveryExpr(SavedTok.getLocation(),
+ Arg->getEndLoc(), Arg);
+ }
return Res;
}
case tok::amp: { // unary-expression: '&' cast-expression
@@ -1316,8 +1367,13 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
SourceLocation SavedLoc = ConsumeToken();
PreferredType.enterUnary(Actions, Tok.getLocation(), tok::amp, SavedLoc);
Res = ParseCastExpression(AnyCastExpr, true);
- if (!Res.isInvalid())
- Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ if (!Res.isInvalid()) {
+ Expr *Arg = Res.get();
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Arg);
+ if (Res.isInvalid())
+ Res = Actions.CreateRecoveryExpr(Tok.getLocation(), Arg->getEndLoc(),
+ Arg);
+ }
return Res;
}
@@ -1333,8 +1389,12 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
SourceLocation SavedLoc = ConsumeToken();
PreferredType.enterUnary(Actions, Tok.getLocation(), SavedKind, SavedLoc);
Res = ParseCastExpression(AnyCastExpr);
- if (!Res.isInvalid())
- Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ if (!Res.isInvalid()) {
+ Expr *Arg = Res.get();
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Arg);
+ if (Res.isInvalid())
+ Res = Actions.CreateRecoveryExpr(SavedLoc, Arg->getEndLoc(), Arg);
+ }
return Res;
}
@@ -1373,7 +1433,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___builtin_omp_required_simd_align:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseUnaryExprOrTypeTraitExpression();
+ AllowSuffix = false;
+ Res = ParseUnaryExprOrTypeTraitExpression();
+ break;
case tok::ampamp: { // unary-expression: '&&' identifier
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
@@ -1389,12 +1451,14 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Tok.getLocation());
Res = Actions.ActOnAddrLabel(AmpAmpLoc, Tok.getLocation(), LD);
ConsumeToken();
- return Res;
+ AllowSuffix = false;
+ break;
}
case tok::kw_const_cast:
case tok::kw_dynamic_cast:
case tok::kw_reinterpret_cast:
case tok::kw_static_cast:
+ case tok::kw_addrspace_cast:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
Res = ParseCXXCasts();
@@ -1417,10 +1481,12 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw_this:
Res = ParseCXXThis();
break;
-
+ case tok::kw___builtin_unique_stable_name:
+ Res = ParseUniqueStableNameExpression();
+ break;
case tok::annot_typename:
if (isStartOfObjCClassMessageMissingOpenBracket()) {
- ParsedType Type = getTypeAnnotation(Tok);
+ TypeResult Type = getTypeAnnotation(Tok);
// Fake up a Declarator to use with ActOnTypeName.
DeclSpec DS(AttrFactory);
@@ -1457,11 +1523,13 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw_long:
case tok::kw___int64:
case tok::kw___int128:
+ case tok::kw__ExtInt:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_void:
@@ -1528,9 +1596,10 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// type, translate it into a type and continue parsing as a
// cast expression.
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
- AnnotateTemplateIdTokenAsType();
+ AnnotateTemplateIdTokenAsType(SS);
return ParseCastExpression(ParseKind, isAddressOfOperand, NotCastExpr,
isTypeCast, isVectorLiteral,
NotPrimaryExpression);
@@ -1548,7 +1617,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// We have a template-id that we know refers to a type,
// translate it into a type and continue parsing as a cast
// expression.
- AnnotateTemplateIdTokenAsType();
+ CXXScopeSpec SS;
+ AnnotateTemplateIdTokenAsType(SS);
return ParseCastExpression(ParseKind, isAddressOfOperand,
NotCastExpr, isTypeCast, isVectorLiteral,
NotPrimaryExpression);
@@ -1577,12 +1647,16 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
if (Tok.is(tok::kw_new)) {
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseCXXNewExpression(true, CCLoc);
+ Res = ParseCXXNewExpression(true, CCLoc);
+ AllowSuffix = false;
+ break;
}
if (Tok.is(tok::kw_delete)) {
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseCXXDeleteExpression(true, CCLoc);
+ Res = ParseCXXDeleteExpression(true, CCLoc);
+ AllowSuffix = false;
+ break;
}
// This is not a type name or scope specifier, it is an invalid expression.
@@ -1593,12 +1667,21 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw_new: // [C++] new-expression
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseCXXNewExpression(false, Tok.getLocation());
+ Res = ParseCXXNewExpression(false, Tok.getLocation());
+ AllowSuffix = false;
+ break;
case tok::kw_delete: // [C++] delete-expression
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseCXXDeleteExpression(false, Tok.getLocation());
+ Res = ParseCXXDeleteExpression(false, Tok.getLocation());
+ AllowSuffix = false;
+ break;
+
+ case tok::kw_requires: // [C++2a] requires-expression
+ Res = ParseRequiresExpression();
+ AllowSuffix = false;
+ break;
case tok::kw_noexcept: { // [C++0x] 'noexcept' '(' expression ')'
if (NotPrimaryExpression)
@@ -1614,32 +1697,36 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// which is an unevaluated operand, can throw an exception.
EnterExpressionEvaluationContext Unevaluated(
Actions, Sema::ExpressionEvaluationContext::Unevaluated);
- ExprResult Result = ParseExpression();
+ Res = ParseExpression();
T.consumeClose();
- if (!Result.isInvalid())
- Result = Actions.ActOnNoexceptExpr(KeyLoc, T.getOpenLocation(),
- Result.get(), T.getCloseLocation());
- return Result;
+ if (!Res.isInvalid())
+ Res = Actions.ActOnNoexceptExpr(KeyLoc, T.getOpenLocation(), Res.get(),
+ T.getCloseLocation());
+ AllowSuffix = false;
+ break;
}
#define TYPE_TRAIT(N,Spelling,K) \
case tok::kw_##Spelling:
#include "clang/Basic/TokenKinds.def"
- return ParseTypeTrait();
+ Res = ParseTypeTrait();
+ break;
case tok::kw___array_rank:
case tok::kw___array_extent:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseArrayTypeTrait();
+ Res = ParseArrayTypeTrait();
+ break;
case tok::kw___is_lvalue_expr:
case tok::kw___is_rvalue_expr:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
- return ParseExpressionTrait();
+ Res = ParseExpressionTrait();
+ break;
case tok::at: {
if (NotPrimaryExpression)
@@ -1696,6 +1783,41 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// parsed.
return Res;
+ if (!AllowSuffix) {
+ // FIXME: Don't parse a primary-expression suffix if we encountered a parse
+ // error already.
+ if (Res.isInvalid())
+ return Res;
+
+ switch (Tok.getKind()) {
+ case tok::l_square:
+ case tok::l_paren:
+ case tok::plusplus:
+ case tok::minusminus:
+ // "expected ';'" or similar is probably the right diagnostic here. Let
+ // the caller decide what to do.
+ if (Tok.isAtStartOfLine())
+ return Res;
+
+ LLVM_FALLTHROUGH;
+ case tok::period:
+ case tok::arrow:
+ break;
+
+ default:
+ return Res;
+ }
+
+ // This was a unary-expression for which a postfix-expression suffix is
+ // not permitted by the grammar (eg, a sizeof expression or
+ // new-expression or similar). Diagnose but parse the suffix anyway.
+ Diag(Tok.getLocation(), diag::err_postfix_after_unary_requires_parens)
+ << Tok.getKind() << Res.get()->getSourceRange()
+ << FixItHint::CreateInsertion(Res.get()->getBeginLoc(), "(")
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(PrevTokLocation),
+ ")");
+ }
+
// These can be followed by postfix-expr pieces.
PreferredType = SavedType;
Res = ParsePostfixExpressionSuffix(Res);
@@ -1787,8 +1909,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
Loc = T.getOpenLocation();
- ExprResult Idx, Length;
- SourceLocation ColonLoc;
+ ExprResult Idx, Length, Stride;
+ SourceLocation ColonLocFirst, ColonLocSecond;
PreferredType.enterSubscript(Actions, Tok.getLocation(), LHS.get());
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
@@ -1802,10 +1924,22 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
}
if (Tok.is(tok::colon)) {
// Consume ':'
- ColonLoc = ConsumeToken();
- if (Tok.isNot(tok::r_square))
+ ColonLocFirst = ConsumeToken();
+ if (Tok.isNot(tok::r_square) &&
+ (getLangOpts().OpenMP < 50 ||
+ ((Tok.isNot(tok::colon) && getLangOpts().OpenMP >= 50))))
Length = ParseExpression();
}
+ if (getLangOpts().OpenMP >= 50 &&
+ (OMPClauseKind == llvm::omp::Clause::OMPC_to ||
+ OMPClauseKind == llvm::omp::Clause::OMPC_from) &&
+ Tok.is(tok::colon)) {
+ // Consume ':'
+ ColonLocSecond = ConsumeToken();
+ if (Tok.isNot(tok::r_square)) {
+ Stride = ParseExpression();
+ }
+ }
} else
Idx = ParseExpression();
@@ -1815,10 +1949,11 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
Idx = Actions.CorrectDelayedTyposInExpr(Idx);
Length = Actions.CorrectDelayedTyposInExpr(Length);
if (!LHS.isInvalid() && !Idx.isInvalid() && !Length.isInvalid() &&
- Tok.is(tok::r_square)) {
- if (ColonLoc.isValid()) {
- LHS = Actions.ActOnOMPArraySectionExpr(LHS.get(), Loc, Idx.get(),
- ColonLoc, Length.get(), RLoc);
+ !Stride.isInvalid() && Tok.is(tok::r_square)) {
+ if (ColonLocFirst.isValid() || ColonLocSecond.isValid()) {
+ LHS = Actions.ActOnOMPArraySectionExpr(
+ LHS.get(), Loc, Idx.get(), ColonLocFirst, ColonLocSecond,
+ Length.get(), Stride.get(), RLoc);
} else {
LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.get(), Loc,
Idx.get(), RLoc);
@@ -1935,12 +2070,18 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
PT.consumeClose();
LHS = ExprError();
} else {
- assert((ArgExprs.size() == 0 ||
- ArgExprs.size()-1 == CommaLocs.size())&&
- "Unexpected number of commas!");
- LHS = Actions.ActOnCallExpr(getCurScope(), LHS.get(), Loc,
- ArgExprs, Tok.getLocation(),
+ assert(
+ (ArgExprs.size() == 0 || ArgExprs.size() - 1 == CommaLocs.size()) &&
+ "Unexpected number of commas!");
+ Expr *Fn = LHS.get();
+ SourceLocation RParLoc = Tok.getLocation();
+ LHS = Actions.ActOnCallExpr(getCurScope(), Fn, Loc, ArgExprs, RParLoc,
ExecConfig);
+ if (LHS.isInvalid()) {
+ ArgExprs.insert(ArgExprs.begin(), Fn);
+ LHS =
+ Actions.CreateRecoveryExpr(Fn->getBeginLoc(), RParLoc, ArgExprs);
+ }
PT.consumeClose();
}
@@ -1972,15 +2113,22 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
return ParsePostfixExpressionSuffix(Base);
}
- LHS = Actions.ActOnStartCXXMemberReference(getCurScope(), Base,
- OpLoc, OpKind, ObjectType,
+ LHS = Actions.ActOnStartCXXMemberReference(getCurScope(), Base, OpLoc,
+ OpKind, ObjectType,
MayBePseudoDestructor);
- if (LHS.isInvalid())
+ if (LHS.isInvalid()) {
+ // Clang will try to perform expression based completion as a
+ // fallback, which is confusing in case of member references. So we
+ // stop here without any completions.
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ return ExprError();
+ }
break;
-
- ParseOptionalCXXScopeSpecifier(SS, ObjectType,
- /*EnteringContext=*/false,
- &MayBePseudoDestructor);
+ }
+ ParseOptionalCXXScopeSpecifier(
+ SS, ObjectType, LHS.get() && LHS.get()->containsErrors(),
+ /*EnteringContext=*/false, &MayBePseudoDestructor);
if (SS.isNotEmpty())
ObjectType = nullptr;
}
@@ -2040,14 +2188,13 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
IdentifierInfo *Id = Tok.getIdentifierInfo();
SourceLocation Loc = ConsumeToken();
Name.setIdentifier(Id, Loc);
- } else if (ParseUnqualifiedId(SS,
- /*EnteringContext=*/false,
- /*AllowDestructorName=*/true,
- /*AllowConstructorName=*/
- getLangOpts().MicrosoftExt &&
- SS.isNotEmpty(),
- /*AllowDeductionGuide=*/false,
- ObjectType, &TemplateKWLoc, Name)) {
+ } else if (ParseUnqualifiedId(
+ SS, ObjectType, LHS.get() && LHS.get()->containsErrors(),
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/true,
+ /*AllowConstructorName=*/
+ getLangOpts().MicrosoftExt && SS.isNotEmpty(),
+ /*AllowDeductionGuide=*/false, &TemplateKWLoc, Name)) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
}
@@ -2057,15 +2204,25 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
OpKind, SS, TemplateKWLoc, Name,
CurParsedObjCImpl ? CurParsedObjCImpl->Dcl
: nullptr);
- if (!LHS.isInvalid() && Tok.is(tok::less))
- checkPotentialAngleBracket(LHS);
+ if (!LHS.isInvalid()) {
+ if (Tok.is(tok::less))
+ checkPotentialAngleBracket(LHS);
+ } else if (OrigLHS && Name.isValid()) {
+ // Preserve the LHS if the RHS is an invalid member.
+ LHS = Actions.CreateRecoveryExpr(OrigLHS->getBeginLoc(),
+ Name.getEndLoc(), {OrigLHS});
+ }
break;
}
case tok::plusplus: // postfix-expression: postfix-expression '++'
case tok::minusminus: // postfix-expression: postfix-expression '--'
if (!LHS.isInvalid()) {
+ Expr *Arg = LHS.get();
LHS = Actions.ActOnPostfixUnaryOp(getCurScope(), Tok.getLocation(),
- Tok.getKind(), LHS.get());
+ Tok.getKind(), Arg);
+ if (LHS.isInvalid())
+ LHS = Actions.CreateRecoveryExpr(Arg->getBeginLoc(),
+ Tok.getLocation(), Arg);
}
ConsumeToken();
break;
@@ -2175,6 +2332,43 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
}
+ExprResult Parser::ParseUniqueStableNameExpression() {
+ assert(Tok.is(tok::kw___builtin_unique_stable_name) &&
+ "Not __bulitin_unique_stable_name");
+
+ SourceLocation OpLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ // typeid expressions are always parenthesized.
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ "__builtin_unique_stable_name"))
+ return ExprError();
+
+ if (isTypeIdInParens()) {
+ TypeResult Ty = ParseTypeName();
+ T.consumeClose();
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ return Actions.ActOnUniqueStableNameExpr(OpLoc, T.getOpenLocation(),
+ T.getCloseLocation(), Ty.get());
+ }
+
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated);
+ ExprResult Result = ParseExpression();
+
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return Result;
+ }
+
+ T.consumeClose();
+ return Actions.ActOnUniqueStableNameExpr(OpLoc, T.getOpenLocation(),
+ T.getCloseLocation(), Result.get());
+}
+
/// Parse a sizeof or alignof expression.
///
/// \verbatim
@@ -2556,6 +2750,33 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
return ParsePostfixExpressionSuffix(Res.get());
}
+bool Parser::tryParseOpenMPArrayShapingCastPart() {
+ assert(Tok.is(tok::l_square) && "Expected open bracket");
+ bool ErrorFound = true;
+ TentativeParsingAction TPA(*this);
+ do {
+ if (Tok.isNot(tok::l_square))
+ break;
+ // Consume '['
+ ConsumeBracket();
+ // Skip inner expression.
+ while (!SkipUntil(tok::r_square, tok::annot_pragma_openmp_end,
+ StopAtSemi | StopBeforeMatch))
+ ;
+ if (Tok.isNot(tok::r_square))
+ break;
+ // Consume ']'
+ ConsumeBracket();
+ // Found ')' - done.
+ if (Tok.is(tok::r_paren)) {
+ ErrorFound = false;
+ break;
+ }
+ } while (Tok.isNot(tok::annot_pragma_openmp_end));
+ TPA.Revert();
+ return !ErrorFound;
+}
+
/// ParseParenExpression - This parses the unit that starts with a '(' token,
/// based on what is allowed by ExprType. The actual thing parsed is returned
/// in ExprType. If stopIfCastExpr is true, it will only return the parsed type,
@@ -2580,6 +2801,8 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
/// '(' '...' fold-operator cast-expression ')'
/// '(' cast-expression fold-operator '...'
/// fold-operator cast-expression ')'
+/// [OPENMP] Array shaping operation
+/// '(' '[' expression ']' { '[' expression ']' } cast-expression
/// \endverbatim
ExprResult
Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
@@ -2650,7 +2873,8 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// If the substmt parsed correctly, build the AST node.
if (!Stmt.isInvalid()) {
- Result = Actions.ActOnStmtExpr(OpenLoc, Stmt.get(), Tok.getLocation());
+ Result = Actions.ActOnStmtExpr(getCurScope(), OpenLoc, Stmt.get(),
+ Tok.getLocation());
} else {
Actions.ActOnStmtExprError();
}
@@ -2685,7 +2909,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
PreferredType.enterTypeCast(Tok.getLocation(), Ty.get().get());
ExprResult SubExpr = ParseCastExpression(AnyCastExpr);
-
+
if (Ty.isInvalid() || SubExpr.isInvalid())
return ExprError();
@@ -2855,6 +3079,38 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
Result = Actions.ActOnParenListExpr(OpenLoc, Tok.getLocation(),
ArgExprs);
}
+ } else if (getLangOpts().OpenMP >= 50 && OpenMPDirectiveParsing &&
+ ExprType == CastExpr && Tok.is(tok::l_square) &&
+ tryParseOpenMPArrayShapingCastPart()) {
+ bool ErrorFound = false;
+ SmallVector<Expr *, 4> OMPDimensions;
+ SmallVector<SourceRange, 4> OMPBracketsRanges;
+ do {
+ BalancedDelimiterTracker TS(*this, tok::l_square);
+ TS.consumeOpen();
+ ExprResult NumElements =
+ Actions.CorrectDelayedTyposInExpr(ParseExpression());
+ if (!NumElements.isUsable()) {
+ ErrorFound = true;
+ while (!SkipUntil(tok::r_square, tok::r_paren,
+ StopAtSemi | StopBeforeMatch))
+ ;
+ }
+ TS.consumeClose();
+ OMPDimensions.push_back(NumElements.get());
+ OMPBracketsRanges.push_back(TS.getRange());
+ } while (Tok.isNot(tok::r_paren));
+ // Match the ')'.
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ Result = Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (ErrorFound) {
+ Result = ExprError();
+ } else if (!Result.isInvalid()) {
+ Result = Actions.ActOnOMPArrayShapingExpr(
+ Result.get(), OpenLoc, RParenLoc, OMPDimensions, OMPBracketsRanges);
+ }
+ return Result;
} else {
InMessageExpressionRAIIObject InMessage(*this, false);
@@ -3123,6 +3379,16 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
if (Tok.is(tok::ellipsis))
Expr = Actions.ActOnPackExpansion(Expr.get(), ConsumeToken());
+ else if (Tok.is(tok::code_completion)) {
+ // There's nothing to suggest in here as we parsed a full expression.
+ // Instead fail and propogate the error since caller might have something
+ // the suggest, e.g. signature help in function call. Note that this is
+ // performed before pushing the \p Expr, so that signature help can report
+ // current argument correctly.
+ SawError = true;
+ cutOffParsing();
+ break;
+ }
if (Expr.isInvalid()) {
SkipUntil(tok::comma, tok::r_paren, StopBeforeMatch);
SawError = true;
diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp
index e685d5ea8a9c..aa35200c33b6 100644
--- a/clang/lib/Parse/ParseExprCXX.cpp
+++ b/clang/lib/Parse/ParseExprCXX.cpp
@@ -11,7 +11,9 @@
//===----------------------------------------------------------------------===//
#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Parse/ParseDiagnostic.h"
@@ -29,10 +31,11 @@ static int SelectDigraphErrorMessage(tok::TokenKind Kind) {
// template name
case tok::unknown: return 0;
// casts
- case tok::kw_const_cast: return 1;
- case tok::kw_dynamic_cast: return 2;
- case tok::kw_reinterpret_cast: return 3;
- case tok::kw_static_cast: return 4;
+ case tok::kw_addrspace_cast: return 1;
+ case tok::kw_const_cast: return 2;
+ case tok::kw_dynamic_cast: return 3;
+ case tok::kw_reinterpret_cast: return 4;
+ case tok::kw_static_cast: return 5;
default:
llvm_unreachable("Unknown type for digraph error message.");
}
@@ -122,13 +125,17 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
/// the "." or "->" of a member access expression, this parameter provides the
/// type of the object whose members are being accessed.
///
+/// \param ObjectHadErrors if this unqualified-id occurs within a member access
+/// expression, indicates whether the original subexpressions had any errors.
+/// When true, diagnostics for missing 'template' keyword will be supressed.
+///
/// \param EnteringContext whether we will be entering into the context of
/// the nested-name-specifier after parsing it.
///
/// \param MayBePseudoDestructor When non-NULL, points to a flag that
/// indicates whether this nested-name-specifier may be part of a
/// pseudo-destructor name. In this case, the flag will be set false
-/// if we don't actually end up parsing a destructor name. Moreorover,
+/// if we don't actually end up parsing a destructor name. Moreover,
/// if we do end up determining that we are parsing a destructor name,
/// the last component of the nested-name-specifier is not parsed as
/// part of the scope specifier.
@@ -144,14 +151,10 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
///
///
/// \returns true if there was an error parsing a scope specifier
-bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
- ParsedType ObjectType,
- bool EnteringContext,
- bool *MayBePseudoDestructor,
- bool IsTypename,
- IdentifierInfo **LastII,
- bool OnlyNamespace,
- bool InUsingDeclaration) {
+bool Parser::ParseOptionalCXXScopeSpecifier(
+ CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors,
+ bool EnteringContext, bool *MayBePseudoDestructor, bool IsTypename,
+ IdentifierInfo **LastII, bool OnlyNamespace, bool InUsingDeclaration) {
assert(getLangOpts().CPlusPlus &&
"Call sites of this function should be guarded by checking for C++");
@@ -165,13 +168,6 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
return false;
}
- if (Tok.is(tok::annot_template_id)) {
- // If the current token is an annotated template id, it may already have
- // a scope specifier. Restore it.
- TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
- SS = TemplateId->SS;
- }
-
// Has to happen before any "return false"s in this function.
bool CheckForDestructor = false;
if (MayBePseudoDestructor && *MayBePseudoDestructor) {
@@ -321,13 +317,11 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// Commit to parsing the template-id.
TPA.Commit();
TemplateTy Template;
- if (TemplateNameKind TNK = Actions.ActOnDependentTemplateName(
- getCurScope(), SS, TemplateKWLoc, TemplateName, ObjectType,
- EnteringContext, Template, /*AllowInjectedClassName*/ true)) {
- if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateKWLoc,
- TemplateName, false))
- return true;
- } else
+ TemplateNameKind TNK = Actions.ActOnTemplateName(
+ getCurScope(), SS, TemplateKWLoc, TemplateName, ObjectType,
+ EnteringContext, Template, /*AllowInjectedClassName*/ true);
+ if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateKWLoc,
+ TemplateName, false))
return true;
continue;
@@ -361,7 +355,8 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(),
+ if (TemplateId->isInvalid() ||
+ Actions.ActOnCXXNestedNameSpecifier(getCurScope(),
SS,
TemplateId->TemplateKWLoc,
TemplateId->Template,
@@ -423,8 +418,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
}
if (Next.is(tok::coloncolon)) {
- if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde) &&
- !Actions.isNonTypeNestedNameSpecifier(getCurScope(), SS, IdInfo)) {
+ if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde)) {
*MayBePseudoDestructor = true;
return false;
}
@@ -517,28 +511,29 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
if (MemberOfUnknownSpecialization && (ObjectType || SS.isSet()) &&
(IsTypename || isTemplateArgumentList(1) == TPResult::True)) {
- // We have something like t::getAs<T>, where getAs is a
- // member of an unknown specialization. However, this will only
- // parse correctly as a template, so suggest the keyword 'template'
- // before 'getAs' and treat this as a dependent template name.
- unsigned DiagID = diag::err_missing_dependent_template_keyword;
- if (getLangOpts().MicrosoftExt)
- DiagID = diag::warn_missing_dependent_template_keyword;
-
- Diag(Tok.getLocation(), DiagID)
- << II.getName()
- << FixItHint::CreateInsertion(Tok.getLocation(), "template ");
-
- if (TemplateNameKind TNK = Actions.ActOnDependentTemplateName(
- getCurScope(), SS, Tok.getLocation(), TemplateName, ObjectType,
- EnteringContext, Template, /*AllowInjectedClassName*/ true)) {
- // Consume the identifier.
- ConsumeToken();
- if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
- TemplateName, false))
- return true;
+ // If we had errors before, ObjectType can be dependent even without any
+ // templates. Do not report missing template keyword in that case.
+ if (!ObjectHadErrors) {
+ // We have something like t::getAs<T>, where getAs is a
+ // member of an unknown specialization. However, this will only
+ // parse correctly as a template, so suggest the keyword 'template'
+ // before 'getAs' and treat this as a dependent template name.
+ unsigned DiagID = diag::err_missing_dependent_template_keyword;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::warn_missing_dependent_template_keyword;
+
+ Diag(Tok.getLocation(), DiagID)
+ << II.getName()
+ << FixItHint::CreateInsertion(Tok.getLocation(), "template ");
}
- else
+
+ SourceLocation TemplateNameLoc = ConsumeToken();
+
+ TemplateNameKind TNK = Actions.ActOnTemplateName(
+ getCurScope(), SS, TemplateNameLoc, TemplateName, ObjectType,
+ EnteringContext, Template, /*AllowInjectedClassName*/ true);
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, false))
return true;
continue;
@@ -553,7 +548,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// Even if we didn't see any pieces of a nested-name-specifier, we
// still check whether there is a tilde in this position, which
// indicates a potential pseudo-destructor.
- if (CheckForDestructor && Tok.is(tok::tilde))
+ if (CheckForDestructor && !HasScopeSpecifier && Tok.is(tok::tilde))
*MayBePseudoDestructor = true;
return false;
@@ -599,12 +594,12 @@ ExprResult Parser::tryParseCXXIdExpression(CXXScopeSpec &SS,
default:
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
- if (ParseUnqualifiedId(SS,
+ if (ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false,
/*AllowDestructorName=*/false,
/*AllowConstructorName=*/false,
- /*AllowDeductionGuide=*/false,
- /*ObjectType=*/nullptr, &TemplateKWLoc, Name))
+ /*AllowDeductionGuide=*/false, &TemplateKWLoc, Name))
return ExprError();
// This is only the direct operand of an & operator if it is not
@@ -672,7 +667,9 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
// '::' unqualified-id
//
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false);
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false);
Token Replacement;
ExprResult Result =
@@ -1261,17 +1258,16 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
};
// FIXME: Consider allowing this as an extension for GCC compatibiblity.
- const bool HasExplicitTemplateParams = Tok.is(tok::less);
- ParseScope TemplateParamScope(this, Scope::TemplateParamScope,
- /*EnteredScope=*/HasExplicitTemplateParams);
- if (HasExplicitTemplateParams) {
- Diag(Tok, getLangOpts().CPlusPlus2a
+ MultiParseScope TemplateParamScope(*this);
+ if (Tok.is(tok::less)) {
+ Diag(Tok, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_lambda_template_parameter_list
: diag::ext_lambda_template_parameter_list);
SmallVector<NamedDecl*, 4> TemplateParams;
SourceLocation LAngleLoc, RAngleLoc;
- if (ParseTemplateParameters(CurTemplateDepthTracker.getDepth(),
+ if (ParseTemplateParameters(TemplateParamScope,
+ CurTemplateDepthTracker.getDepth(),
TemplateParams, LAngleLoc, RAngleLoc)) {
Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
return ExprError();
@@ -1306,8 +1302,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Actions.RecordParsingTemplateParameterDepth(
CurTemplateDepthTracker.getOriginalDepth());
- ParseParameterDeclarationClause(D, Attr, ParamInfo, EllipsisLoc);
-
+ ParseParameterDeclarationClause(D.getContext(), Attr, ParamInfo,
+ EllipsisLoc);
// For a generic lambda, each 'auto' within the parameter declaration
// clause creates a template type parameter, so increment the depth.
// If we've parsed any explicit template parameters, then the depth will
@@ -1516,12 +1512,15 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/// 'reinterpret_cast' '<' type-name '>' '(' expression ')'
/// 'const_cast' '<' type-name '>' '(' expression ')'
///
+/// C++ for OpenCL s2.3.1 adds:
+/// 'addrspace_cast' '<' type-name '>' '(' expression ')'
ExprResult Parser::ParseCXXCasts() {
tok::TokenKind Kind = Tok.getKind();
const char *CastName = nullptr; // For error messages
switch (Kind) {
default: llvm_unreachable("Unknown C++ cast!");
+ case tok::kw_addrspace_cast: CastName = "addrspace_cast"; break;
case tok::kw_const_cast: CastName = "const_cast"; break;
case tok::kw_dynamic_cast: CastName = "dynamic_cast"; break;
case tok::kw_reinterpret_cast: CastName = "reinterpret_cast"; break;
@@ -1694,31 +1693,42 @@ ExprResult Parser::ParseCXXUuidof() {
/// Parse a C++ pseudo-destructor expression after the base,
/// . or -> operator, and nested-name-specifier have already been
-/// parsed.
+/// parsed. We're handling this fragment of the grammar:
///
-/// postfix-expression: [C++ 5.2]
-/// postfix-expression . pseudo-destructor-name
-/// postfix-expression -> pseudo-destructor-name
+/// postfix-expression: [C++2a expr.post]
+/// postfix-expression . template[opt] id-expression
+/// postfix-expression -> template[opt] id-expression
///
-/// pseudo-destructor-name:
-/// ::[opt] nested-name-specifier[opt] type-name :: ~type-name
-/// ::[opt] nested-name-specifier template simple-template-id ::
-/// ~type-name
-/// ::[opt] nested-name-specifier[opt] ~type-name
+/// id-expression:
+/// qualified-id
+/// unqualified-id
///
+/// qualified-id:
+/// nested-name-specifier template[opt] unqualified-id
+///
+/// nested-name-specifier:
+/// type-name ::
+/// decltype-specifier :: FIXME: not implemented, but probably only
+/// allowed in C++ grammar by accident
+/// nested-name-specifier identifier ::
+/// nested-name-specifier template[opt] simple-template-id ::
+/// [...]
+///
+/// unqualified-id:
+/// ~ type-name
+/// ~ decltype-specifier
+/// [...]
+///
+/// ... where the all but the last component of the nested-name-specifier
+/// has already been parsed, and the base expression is not of a non-dependent
+/// class type.
ExprResult
Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType) {
- // We're parsing either a pseudo-destructor-name or a dependent
- // member access that has the same form as a
- // pseudo-destructor-name. We parse both in the same way and let
- // the action model sort them out.
- //
- // Note that the ::[opt] nested-name-specifier[opt] has already
- // been parsed, and if there was a simple-template-id, it has
- // been coalesced into a template-id annotation token.
+ // If the last component of the (optional) nested-name-specifier is
+ // template[opt] simple-template-id, it has already been annotated.
UnqualifiedId FirstTypeName;
SourceLocation CCLoc;
if (Tok.is(tok::identifier)) {
@@ -1727,14 +1737,16 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
CCLoc = ConsumeToken();
} else if (Tok.is(tok::annot_template_id)) {
- // FIXME: retrieve TemplateKWLoc from template-id annotation and
- // store it in the pseudo-dtor node (to be used when instantiating it).
- FirstTypeName.setTemplateId(
- (TemplateIdAnnotation *)Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ // FIXME: Carry on and build an AST representation for tooling.
+ if (TemplateId->isInvalid())
+ return ExprError();
+ FirstTypeName.setTemplateId(TemplateId);
ConsumeAnnotationToken();
assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
CCLoc = ConsumeToken();
} else {
+ assert(SS.isEmpty() && "missing last component of nested name specifier");
FirstTypeName.setIdentifier(nullptr, SourceLocation());
}
@@ -1742,7 +1754,7 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
assert(Tok.is(tok::tilde) && "ParseOptionalCXXScopeSpecifier fail");
SourceLocation TildeLoc = ConsumeToken();
- if (Tok.is(tok::kw_decltype) && !FirstTypeName.isValid() && SS.isEmpty()) {
+ if (Tok.is(tok::kw_decltype) && !FirstTypeName.isValid()) {
DeclSpec DS(AttrFactory);
ParseDecltypeSpecifier(DS);
if (DS.getTypeSpecType() == TST_error)
@@ -1764,11 +1776,17 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
// If there is a '<', the second type name is a template-id. Parse
// it as such.
+ //
+ // FIXME: This is not a context in which a '<' is assumed to start a template
+ // argument list. This affects examples such as
+ // void f(auto *p) { p->~X<int>(); }
+ // ... but there's no ambiguity, and nowhere to write 'template' in such an
+ // example, so we accept it anyway.
if (Tok.is(tok::less) &&
- ParseUnqualifiedIdTemplateId(SS, SourceLocation(),
- Name, NameLoc,
- false, ObjectType, SecondTypeName,
- /*AssumeTemplateId=*/true))
+ ParseUnqualifiedIdTemplateId(
+ SS, ObjectType, Base && Base->containsErrors(), SourceLocation(),
+ Name, NameLoc, false, SecondTypeName,
+ /*AssumeTemplateId=*/true))
return ExprError();
return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc, OpKind,
@@ -1862,6 +1880,7 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
&& "Expected '(' or '{'!");
if (Tok.is(tok::l_brace)) {
+ PreferredType.enterTypeCast(Tok.getLocation(), TypeRep.get());
ExprResult Init = ParseBraceInitializer();
if (Init.isInvalid())
return Init;
@@ -2131,12 +2150,8 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
// type-name
case tok::annot_typename: {
- if (getTypeAnnotation(Tok))
- DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID,
- getTypeAnnotation(Tok), Policy);
- else
- DS.SetTypeSpecError();
-
+ DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID,
+ getTypeAnnotation(Tok), Policy);
DS.SetRangeEnd(Tok.getAnnotationEndLoc());
ConsumeAnnotationToken();
@@ -2144,6 +2159,19 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
return;
}
+ case tok::kw__ExtInt: {
+ ExprResult ER = ParseExtIntegerArgument();
+ if (ER.isInvalid())
+ DS.SetTypeSpecError();
+ else
+ DS.SetExtIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
+
+ // Do this here because we have already consumed the close paren.
+ DS.SetRangeEnd(PrevTokLocation);
+ DS.Finish(Actions, Policy);
+ return;
+ }
+
// builtin types
case tok::kw_short:
DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec, DiagID, Policy);
@@ -2172,6 +2200,9 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw___int128:
DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec, DiagID, Policy);
break;
+ case tok::kw___bf16:
+ DS.SetTypeSpecType(DeclSpec::TST_BFloat16, Loc, PrevSpec, DiagID, Policy);
+ break;
case tok::kw_half:
DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec, DiagID, Policy);
break;
@@ -2254,6 +2285,12 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
/// \param SS the nested-name-specifier that precedes this template-id, if
/// we're actually parsing a qualified-id.
///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param ObjectHadErrors this unqualified-id occurs within a member access
+/// expression, indicates whether the original subexpressions had any errors.
+///
/// \param Name for constructor and destructor names, this is the actual
/// identifier that may be a template-name.
///
@@ -2263,9 +2300,6 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
/// \param EnteringContext whether we're entering the scope of the
/// nested-name-specifier.
///
-/// \param ObjectType if this unqualified-id occurs within a member access
-/// expression, the type of the base object whose member is being accessed.
-///
/// \param Id as input, describes the template-name or operator-function-id
/// that precedes the '<'. If template arguments were parsed successfully,
/// will be updated with the template-id.
@@ -2274,14 +2308,10 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
/// refers to a template without performing name lookup to verify.
///
/// \returns true if a parse error occurred, false otherwise.
-bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- IdentifierInfo *Name,
- SourceLocation NameLoc,
- bool EnteringContext,
- ParsedType ObjectType,
- UnqualifiedId &Id,
- bool AssumeTemplateId) {
+bool Parser::ParseUnqualifiedIdTemplateId(
+ CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors,
+ SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc,
+ bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId) {
assert(Tok.is(tok::less) && "Expected '<' to finish parsing a template-id");
TemplateTy Template;
@@ -2293,11 +2323,9 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
if (AssumeTemplateId) {
// We defer the injected-class-name checks until we've found whether
// this template-id is used to form a nested-name-specifier or not.
- TNK = Actions.ActOnDependentTemplateName(
- getCurScope(), SS, TemplateKWLoc, Id, ObjectType, EnteringContext,
- Template, /*AllowInjectedClassName*/ true);
- if (TNK == TNK_Non_template)
- return true;
+ TNK = Actions.ActOnTemplateName(getCurScope(), SS, TemplateKWLoc, Id,
+ ObjectType, EnteringContext, Template,
+ /*AllowInjectedClassName*/ true);
} else {
bool MemberOfUnknownSpecialization;
TNK = Actions.isTemplateName(getCurScope(), SS,
@@ -2313,28 +2341,32 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
if (TNK == TNK_Non_template && MemberOfUnknownSpecialization &&
ObjectType && isTemplateArgumentList(0) == TPResult::True) {
- // We have something like t->getAs<T>(), where getAs is a
- // member of an unknown specialization. However, this will only
- // parse correctly as a template, so suggest the keyword 'template'
- // before 'getAs' and treat this as a dependent template name.
- std::string Name;
- if (Id.getKind() == UnqualifiedIdKind::IK_Identifier)
- Name = Id.Identifier->getName();
- else {
- Name = "operator ";
- if (Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId)
- Name += getOperatorSpelling(Id.OperatorFunctionId.Operator);
- else
- Name += Id.Identifier->getName();
+ // If we had errors before, ObjectType can be dependent even without any
+ // templates, do not report missing template keyword in that case.
+ if (!ObjectHadErrors) {
+ // We have something like t->getAs<T>(), where getAs is a
+ // member of an unknown specialization. However, this will only
+ // parse correctly as a template, so suggest the keyword 'template'
+ // before 'getAs' and treat this as a dependent template name.
+ std::string Name;
+ if (Id.getKind() == UnqualifiedIdKind::IK_Identifier)
+ Name = std::string(Id.Identifier->getName());
+ else {
+ Name = "operator ";
+ if (Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId)
+ Name += getOperatorSpelling(Id.OperatorFunctionId.Operator);
+ else
+ Name += Id.Identifier->getName();
+ }
+ Diag(Id.StartLocation, diag::err_missing_dependent_template_keyword)
+ << Name
+ << FixItHint::CreateInsertion(Id.StartLocation, "template ");
}
- Diag(Id.StartLocation, diag::err_missing_dependent_template_keyword)
- << Name
- << FixItHint::CreateInsertion(Id.StartLocation, "template ");
- TNK = Actions.ActOnDependentTemplateName(
+ TNK = Actions.ActOnTemplateName(
getCurScope(), SS, TemplateKWLoc, Id, ObjectType, EnteringContext,
Template, /*AllowInjectedClassName*/ true);
- if (TNK == TNK_Non_template)
- return true;
+ } else if (TNK == TNK_Non_template) {
+ return false;
}
}
break;
@@ -2347,6 +2379,8 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
TemplateName, ObjectType,
EnteringContext, Template,
MemberOfUnknownSpecialization);
+ if (TNK == TNK_Non_template)
+ return false;
break;
}
@@ -2355,11 +2389,9 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
bool MemberOfUnknownSpecialization;
TemplateName.setIdentifier(Name, NameLoc);
if (ObjectType) {
- TNK = Actions.ActOnDependentTemplateName(
+ TNK = Actions.ActOnTemplateName(
getCurScope(), SS, TemplateKWLoc, TemplateName, ObjectType,
EnteringContext, Template, /*AllowInjectedClassName*/ true);
- if (TNK == TNK_Non_template)
- return true;
} else {
TNK = Actions.isTemplateName(getCurScope(), SS, TemplateKWLoc.isValid(),
TemplateName, ObjectType,
@@ -2369,7 +2401,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
if (TNK == TNK_Non_template && !Id.DestructorName.get()) {
Diag(NameLoc, diag::err_destructor_template_id)
<< Name << SS.getRange();
- return true;
+ // Carry on to parse the template arguments before bailing out.
}
}
break;
@@ -2379,9 +2411,6 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
return false;
}
- if (TNK == TNK_Non_template)
- return false;
-
// Parse the enclosed template argument list.
SourceLocation LAngleLoc, RAngleLoc;
TemplateArgList TemplateArgs;
@@ -2389,6 +2418,10 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
RAngleLoc))
return true;
+ // If this is a non-template, we already issued a diagnostic.
+ if (TNK == TNK_Non_template)
+ return true;
+
if (Id.getKind() == UnqualifiedIdKind::IK_Identifier ||
Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId ||
Id.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId) {
@@ -2405,8 +2438,8 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
: Id.OperatorFunctionId.Operator;
TemplateIdAnnotation *TemplateId = TemplateIdAnnotation::Create(
- SS, TemplateKWLoc, Id.StartLocation, TemplateII, OpKind, Template, TNK,
- LAngleLoc, RAngleLoc, TemplateArgs, TemplateIds);
+ TemplateKWLoc, Id.StartLocation, TemplateII, OpKind, Template, TNK,
+ LAngleLoc, RAngleLoc, TemplateArgs, /*ArgsInvalid*/false, TemplateIds);
Id.setTemplateId(TemplateId);
return false;
@@ -2686,6 +2719,13 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
/// \param SS The nested-name-specifier that preceded this unqualified-id. If
/// non-empty, then we are parsing the unqualified-id of a qualified-id.
///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param ObjectHadErrors if this unqualified-id occurs within a member access
+/// expression, indicates whether the original subexpressions had any errors.
+/// When true, diagnostics for missing 'template' keyword will be supressed.
+///
/// \param EnteringContext whether we are entering the scope of the
/// nested-name-specifier.
///
@@ -2695,17 +2735,14 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
///
/// \param AllowDeductionGuide whether we allow parsing a deduction guide name.
///
-/// \param ObjectType if this unqualified-id occurs within a member access
-/// expression, the type of the base object whose member is being accessed.
-///
/// \param Result on a successful parse, contains the parsed unqualified-id.
///
/// \returns true if parsing fails, false otherwise.
-bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
+bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
+ bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
- ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result) {
if (TemplateKWLoc)
@@ -2764,10 +2801,11 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
TemplateTy Template;
if (Tok.is(tok::less))
return ParseUnqualifiedIdTemplateId(
- SS, TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), Id, IdLoc,
- EnteringContext, ObjectType, Result, TemplateSpecified);
+ SS, ObjectType, ObjectHadErrors,
+ TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), Id, IdLoc,
+ EnteringContext, Result, TemplateSpecified);
else if (TemplateSpecified &&
- Actions.ActOnDependentTemplateName(
+ Actions.ActOnTemplateName(
getCurScope(), SS, *TemplateKWLoc, Result, ObjectType,
EnteringContext, Template,
/*AllowInjectedClassName*/ true) == TNK_Non_template)
@@ -2781,6 +2819,13 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ // FIXME: Consider passing invalid template-ids on to callers; they may
+ // be able to recover better than we can.
+ if (TemplateId->isInvalid()) {
+ ConsumeAnnotationToken();
+ return true;
+ }
+
// If the template-name names the current class, then this is a constructor
if (AllowConstructorName && TemplateId->Name &&
Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) {
@@ -2842,11 +2887,11 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
Result.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId) &&
Tok.is(tok::less))
return ParseUnqualifiedIdTemplateId(
- SS, TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), nullptr,
- SourceLocation(), EnteringContext, ObjectType, Result,
- TemplateSpecified);
+ SS, ObjectType, ObjectHadErrors,
+ TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), nullptr,
+ SourceLocation(), EnteringContext, Result, TemplateSpecified);
else if (TemplateSpecified &&
- Actions.ActOnDependentTemplateName(
+ Actions.ActOnTemplateName(
getCurScope(), SS, *TemplateKWLoc, Result, ObjectType,
EnteringContext, Template,
/*AllowInjectedClassName*/ true) == TNK_Non_template)
@@ -2865,6 +2910,22 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// Parse the '~'.
SourceLocation TildeLoc = ConsumeToken();
+ if (TemplateSpecified) {
+ // C++ [temp.names]p3:
+ // A name prefixed by the keyword template shall be a template-id [...]
+ //
+ // A template-id cannot begin with a '~' token. This would never work
+ // anyway: x.~A<int>() would specify that the destructor is a template,
+ // not that 'A' is a template.
+ //
+ // FIXME: Suggest replacing the attempted destructor name with a correct
+ // destructor name and recover. (This is not trivial if this would become
+ // a pseudo-destructor name).
+ Diag(*TemplateKWLoc, diag::err_unexpected_template_in_destructor_name)
+ << Tok.getLocation();
+ return true;
+ }
+
if (SS.isEmpty() && Tok.is(tok::kw_decltype)) {
DeclSpec DS(AttrFactory);
SourceLocation EndLoc = ParseDecltypeSpecifier(DS);
@@ -2884,7 +2945,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// If the user wrote ~T::T, correct it to T::~T.
DeclaratorScopeObj DeclScopeObj(*this, SS);
- if (!TemplateSpecified && NextToken().is(tok::coloncolon)) {
+ if (NextToken().is(tok::coloncolon)) {
// Don't let ParseOptionalCXXScopeSpecifier() "correct"
// `int A; struct { ~A::A(); };` to `int A; struct { ~A:A(); };`,
// it will confuse this recovery logic.
@@ -2894,7 +2955,8 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
AnnotateScopeToken(SS, /*NewAnnotation*/true);
SS.clear();
}
- if (ParseOptionalCXXScopeSpecifier(SS, ObjectType, EnteringContext))
+ if (ParseOptionalCXXScopeSpecifier(SS, ObjectType, ObjectHadErrors,
+ EnteringContext))
return true;
if (SS.isNotEmpty())
ObjectType = nullptr;
@@ -2921,8 +2983,9 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
if (Tok.is(tok::less)) {
Result.setDestructorName(TildeLoc, nullptr, ClassNameLoc);
return ParseUnqualifiedIdTemplateId(
- SS, TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), ClassName,
- ClassNameLoc, EnteringContext, ObjectType, Result, TemplateSpecified);
+ SS, ObjectType, ObjectHadErrors,
+ TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), ClassName,
+ ClassNameLoc, EnteringContext, Result, TemplateSpecified);
}
// Note that this is a destructor name.
@@ -3057,10 +3120,14 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
auto RunSignatureHelp = [&]() {
ParsedType TypeRep =
Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
- assert(TypeRep && "invalid types should be handled before");
- QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
+ QualType PreferredType;
+ // ActOnTypeName might adjust DeclaratorInfo and return a null type even
+ // the passing DeclaratorInfo is valid, e.g. running SignatureHelp on
+ // `new decltype(invalid) (^)`.
+ if (TypeRep)
+ PreferredType = Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
+ DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -3262,6 +3329,310 @@ Parser::ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start) {
return Actions.ActOnCXXDelete(Start, UseGlobal, ArrayDelete, Operand.get());
}
+/// ParseRequiresExpression - Parse a C++2a requires-expression.
+/// C++2a [expr.prim.req]p1
+/// A requires-expression provides a concise way to express requirements on
+/// template arguments. A requirement is one that can be checked by name
+/// lookup (6.4) or by checking properties of types and expressions.
+///
+/// requires-expression:
+/// 'requires' requirement-parameter-list[opt] requirement-body
+///
+/// requirement-parameter-list:
+/// '(' parameter-declaration-clause[opt] ')'
+///
+/// requirement-body:
+/// '{' requirement-seq '}'
+///
+/// requirement-seq:
+/// requirement
+/// requirement-seq requirement
+///
+/// requirement:
+/// simple-requirement
+/// type-requirement
+/// compound-requirement
+/// nested-requirement
+ExprResult Parser::ParseRequiresExpression() {
+ assert(Tok.is(tok::kw_requires) && "Expected 'requires' keyword");
+ SourceLocation RequiresKWLoc = ConsumeToken(); // Consume 'requires'
+
+ llvm::SmallVector<ParmVarDecl *, 2> LocalParameterDecls;
+ if (Tok.is(tok::l_paren)) {
+ // requirement parameter list is present.
+ ParseScope LocalParametersScope(this, Scope::FunctionPrototypeScope |
+ Scope::DeclScope);
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
+ Parens.consumeOpen();
+ if (!Tok.is(tok::r_paren)) {
+ ParsedAttributes FirstArgAttrs(getAttrFactory());
+ SourceLocation EllipsisLoc;
+ llvm::SmallVector<DeclaratorChunk::ParamInfo, 2> LocalParameters;
+ ParseParameterDeclarationClause(DeclaratorContext::RequiresExprContext,
+ FirstArgAttrs, LocalParameters,
+ EllipsisLoc);
+ if (EllipsisLoc.isValid())
+ Diag(EllipsisLoc, diag::err_requires_expr_parameter_list_ellipsis);
+ for (auto &ParamInfo : LocalParameters)
+ LocalParameterDecls.push_back(cast<ParmVarDecl>(ParamInfo.Param));
+ }
+ Parens.consumeClose();
+ }
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.expectAndConsume())
+ return ExprError();
+
+ // Start of requirement list
+ llvm::SmallVector<concepts::Requirement *, 2> Requirements;
+
+ // C++2a [expr.prim.req]p2
+ // Expressions appearing within a requirement-body are unevaluated operands.
+ EnterExpressionEvaluationContext Ctx(
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated);
+
+ ParseScope BodyScope(this, Scope::DeclScope);
+ RequiresExprBodyDecl *Body = Actions.ActOnStartRequiresExpr(
+ RequiresKWLoc, LocalParameterDecls, getCurScope());
+
+ if (Tok.is(tok::r_brace)) {
+ // Grammar does not allow an empty body.
+ // requirement-body:
+ // { requirement-seq }
+ // requirement-seq:
+ // requirement
+ // requirement-seq requirement
+ Diag(Tok, diag::err_empty_requires_expr);
+ // Continue anyway and produce a requires expr with no requirements.
+ } else {
+ while (!Tok.is(tok::r_brace)) {
+ switch (Tok.getKind()) {
+ case tok::l_brace: {
+ // Compound requirement
+ // C++ [expr.prim.req.compound]
+ // compound-requirement:
+ // '{' expression '}' 'noexcept'[opt]
+ // return-type-requirement[opt] ';'
+ // return-type-requirement:
+ // trailing-return-type
+ // '->' cv-qualifier-seq[opt] constrained-parameter
+ // cv-qualifier-seq[opt] abstract-declarator[opt]
+ BalancedDelimiterTracker ExprBraces(*this, tok::l_brace);
+ ExprBraces.consumeOpen();
+ ExprResult Expression =
+ Actions.CorrectDelayedTyposInExpr(ParseExpression());
+ if (!Expression.isUsable()) {
+ ExprBraces.skipToEnd();
+ SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
+ break;
+ }
+ if (ExprBraces.consumeClose())
+ ExprBraces.skipToEnd();
+
+ concepts::Requirement *Req = nullptr;
+ SourceLocation NoexceptLoc;
+ TryConsumeToken(tok::kw_noexcept, NoexceptLoc);
+ if (Tok.is(tok::semi)) {
+ Req = Actions.ActOnCompoundRequirement(Expression.get(), NoexceptLoc);
+ if (Req)
+ Requirements.push_back(Req);
+ break;
+ }
+ if (!TryConsumeToken(tok::arrow))
+ // User probably forgot the arrow, remind them and try to continue.
+ Diag(Tok, diag::err_requires_expr_missing_arrow)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "->");
+ // Try to parse a 'type-constraint'
+ if (TryAnnotateTypeConstraint()) {
+ SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
+ break;
+ }
+ if (!isTypeConstraintAnnotation()) {
+ Diag(Tok, diag::err_requires_expr_expected_type_constraint);
+ SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
+ break;
+ }
+ CXXScopeSpec SS;
+ if (Tok.is(tok::annot_cxxscope)) {
+ Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
+ Tok.getAnnotationRange(),
+ SS);
+ ConsumeAnnotationToken();
+ }
+
+ Req = Actions.ActOnCompoundRequirement(
+ Expression.get(), NoexceptLoc, SS, takeTemplateIdAnnotation(Tok),
+ TemplateParameterDepth);
+ ConsumeAnnotationToken();
+ if (Req)
+ Requirements.push_back(Req);
+ break;
+ }
+ default: {
+ bool PossibleRequiresExprInSimpleRequirement = false;
+ if (Tok.is(tok::kw_requires)) {
+ auto IsNestedRequirement = [&] {
+ RevertingTentativeParsingAction TPA(*this);
+ ConsumeToken(); // 'requires'
+ if (Tok.is(tok::l_brace))
+ // This is a requires expression
+ // requires (T t) {
+ // requires { t++; };
+ // ... ^
+ // }
+ return false;
+ if (Tok.is(tok::l_paren)) {
+ // This might be the parameter list of a requires expression
+ ConsumeParen();
+ auto Res = TryParseParameterDeclarationClause();
+ if (Res != TPResult::False) {
+ // Skip to the closing parenthesis
+ // FIXME: Don't traverse these tokens twice (here and in
+ // TryParseParameterDeclarationClause).
+ unsigned Depth = 1;
+ while (Depth != 0) {
+ if (Tok.is(tok::l_paren))
+ Depth++;
+ else if (Tok.is(tok::r_paren))
+ Depth--;
+ ConsumeAnyToken();
+ }
+ // requires (T t) {
+ // requires () ?
+ // ... ^
+ // - OR -
+ // requires (int x) ?
+ // ... ^
+ // }
+ if (Tok.is(tok::l_brace))
+ // requires (...) {
+ // ^ - a requires expression as a
+ // simple-requirement.
+ return false;
+ }
+ }
+ return true;
+ };
+ if (IsNestedRequirement()) {
+ ConsumeToken();
+ // Nested requirement
+ // C++ [expr.prim.req.nested]
+ // nested-requirement:
+ // 'requires' constraint-expression ';'
+ ExprResult ConstraintExpr =
+ Actions.CorrectDelayedTyposInExpr(ParseConstraintExpression());
+ if (ConstraintExpr.isInvalid() || !ConstraintExpr.isUsable()) {
+ SkipUntil(tok::semi, tok::r_brace,
+ SkipUntilFlags::StopBeforeMatch);
+ break;
+ }
+ if (auto *Req =
+ Actions.ActOnNestedRequirement(ConstraintExpr.get()))
+ Requirements.push_back(Req);
+ else {
+ SkipUntil(tok::semi, tok::r_brace,
+ SkipUntilFlags::StopBeforeMatch);
+ break;
+ }
+ break;
+ } else
+ PossibleRequiresExprInSimpleRequirement = true;
+ } else if (Tok.is(tok::kw_typename)) {
+ // This might be 'typename T::value_type;' (a type requirement) or
+ // 'typename T::value_type{};' (a simple requirement).
+ TentativeParsingAction TPA(*this);
+
+ // We need to consume the typename to allow 'requires { typename a; }'
+ SourceLocation TypenameKWLoc = ConsumeToken();
+ if (TryAnnotateCXXScopeToken()) {
+ TPA.Commit();
+ SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
+ break;
+ }
+ CXXScopeSpec SS;
+ if (Tok.is(tok::annot_cxxscope)) {
+ Actions.RestoreNestedNameSpecifierAnnotation(
+ Tok.getAnnotationValue(), Tok.getAnnotationRange(), SS);
+ ConsumeAnnotationToken();
+ }
+
+ if (Tok.isOneOf(tok::identifier, tok::annot_template_id) &&
+ !NextToken().isOneOf(tok::l_brace, tok::l_paren)) {
+ TPA.Commit();
+ SourceLocation NameLoc = Tok.getLocation();
+ IdentifierInfo *II = nullptr;
+ TemplateIdAnnotation *TemplateId = nullptr;
+ if (Tok.is(tok::identifier)) {
+ II = Tok.getIdentifierInfo();
+ ConsumeToken();
+ } else {
+ TemplateId = takeTemplateIdAnnotation(Tok);
+ ConsumeAnnotationToken();
+ if (TemplateId->isInvalid())
+ break;
+ }
+
+ if (auto *Req = Actions.ActOnTypeRequirement(TypenameKWLoc, SS,
+ NameLoc, II,
+ TemplateId)) {
+ Requirements.push_back(Req);
+ }
+ break;
+ }
+ TPA.Revert();
+ }
+ // Simple requirement
+ // C++ [expr.prim.req.simple]
+ // simple-requirement:
+ // expression ';'
+ SourceLocation StartLoc = Tok.getLocation();
+ ExprResult Expression =
+ Actions.CorrectDelayedTyposInExpr(ParseExpression());
+ if (!Expression.isUsable()) {
+ SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
+ break;
+ }
+ if (!Expression.isInvalid() && PossibleRequiresExprInSimpleRequirement)
+ Diag(StartLoc, diag::warn_requires_expr_in_simple_requirement)
+ << FixItHint::CreateInsertion(StartLoc, "requires");
+ if (auto *Req = Actions.ActOnSimpleRequirement(Expression.get()))
+ Requirements.push_back(Req);
+ else {
+ SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
+ break;
+ }
+ // User may have tried to put some compound requirement stuff here
+ if (Tok.is(tok::kw_noexcept)) {
+ Diag(Tok, diag::err_requires_expr_simple_requirement_noexcept)
+ << FixItHint::CreateInsertion(StartLoc, "{")
+ << FixItHint::CreateInsertion(Tok.getLocation(), "}");
+ SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
+ break;
+ }
+ break;
+ }
+ }
+ if (ExpectAndConsumeSemi(diag::err_expected_semi_requirement)) {
+ SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
+ TryConsumeToken(tok::semi);
+ break;
+ }
+ }
+ if (Requirements.empty()) {
+ // Don't emit an empty requires expr here to avoid confusing the user with
+ // other diagnostics quoting an empty requires expression they never
+ // wrote.
+ Braces.consumeClose();
+ Actions.ActOnFinishRequiresExpr();
+ return ExprError();
+ }
+ }
+ Braces.consumeClose();
+ Actions.ActOnFinishRequiresExpr();
+ return Actions.ActOnRequiresExpr(RequiresKWLoc, Body, LocalParameterDecls,
+ Requirements, Braces.getCloseLocation());
+}
+
static TypeTrait TypeTraitFromTokKind(tok::TokenKind kind) {
switch (kind) {
default: llvm_unreachable("Not a known type trait");
@@ -3277,18 +3648,24 @@ case tok::kw_ ## Spelling: return BTT_ ## Name;
}
static ArrayTypeTrait ArrayTypeTraitFromTokKind(tok::TokenKind kind) {
- switch(kind) {
- default: llvm_unreachable("Not a known binary type trait");
- case tok::kw___array_rank: return ATT_ArrayRank;
- case tok::kw___array_extent: return ATT_ArrayExtent;
+ switch (kind) {
+ default:
+ llvm_unreachable("Not a known array type trait");
+#define ARRAY_TYPE_TRAIT(Spelling, Name, Key) \
+ case tok::kw_##Spelling: \
+ return ATT_##Name;
+#include "clang/Basic/TokenKinds.def"
}
}
static ExpressionTrait ExpressionTraitFromTokKind(tok::TokenKind kind) {
- switch(kind) {
- default: llvm_unreachable("Not a known unary expression trait.");
- case tok::kw___is_lvalue_expr: return ET_IsLValueExpr;
- case tok::kw___is_rvalue_expr: return ET_IsRValueExpr;
+ switch (kind) {
+ default:
+ llvm_unreachable("Not a known unary expression trait.");
+#define EXPRESSION_TRAIT(Spelling, Name, Key) \
+ case tok::kw_##Spelling: \
+ return ET_##Name;
+#include "clang/Basic/TokenKinds.def"
}
}
diff --git a/clang/lib/Parse/ParseInit.cpp b/clang/lib/Parse/ParseInit.cpp
index 5ab055130dc2..9ac2b2e6f79b 100644
--- a/clang/lib/Parse/ParseInit.cpp
+++ b/clang/lib/Parse/ParseInit.cpp
@@ -10,11 +10,14 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/TokenKinds.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -154,7 +157,9 @@ static void CheckArrayDesignatorSyntax(Parser &P, SourceLocation Loc,
/// initializer (because it is an expression). We need to consider this case
/// when parsing array designators.
///
-ExprResult Parser::ParseInitializerWithPotentialDesignator() {
+/// \p CodeCompleteCB is called with Designation parsed so far.
+ExprResult Parser::ParseInitializerWithPotentialDesignator(
+ llvm::function_ref<void(const Designation &)> CodeCompleteCB) {
// If this is the old-style GNU extension:
// designation ::= identifier ':'
@@ -193,6 +198,11 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// designator: '.' identifier
SourceLocation DotLoc = ConsumeToken();
+ if (Tok.is(tok::code_completion)) {
+ CodeCompleteCB(Desig);
+ cutOffParsing();
+ return ExprError();
+ }
if (Tok.isNot(tok::identifier)) {
Diag(Tok.getLocation(), diag::err_expected_field_designator);
return ExprError();
@@ -407,7 +417,6 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
return ExprError();
}
-
/// ParseBraceInitializer - Called when parsing an initializer that has a
/// leading open brace.
///
@@ -444,6 +453,10 @@ ExprResult Parser::ParseBraceInitializer() {
Actions, EnterExpressionEvaluationContext::InitList);
bool InitExprsOk = true;
+ auto CodeCompleteDesignation = [&](const Designation &D) {
+ Actions.CodeCompleteDesignator(PreferredType.get(T.getOpenLocation()),
+ InitExprs, D);
+ };
while (1) {
// Handle Microsoft __if_exists/if_not_exists if necessary.
@@ -463,7 +476,7 @@ ExprResult Parser::ParseBraceInitializer() {
// initializer directly.
ExprResult SubElt;
if (MayBeDesignationStart())
- SubElt = ParseInitializerWithPotentialDesignator();
+ SubElt = ParseInitializerWithPotentialDesignator(CodeCompleteDesignation);
else
SubElt = ParseInitializer();
@@ -543,13 +556,17 @@ bool Parser::ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
return false;
}
+ auto CodeCompleteDesignation = [&](const Designation &D) {
+ Actions.CodeCompleteDesignator(PreferredType.get(Braces.getOpenLocation()),
+ InitExprs, D);
+ };
while (!isEofOrEom()) {
trailingComma = false;
// If we know that this cannot be a designation, just parse the nested
// initializer directly.
ExprResult SubElt;
if (MayBeDesignationStart())
- SubElt = ParseInitializerWithPotentialDesignator();
+ SubElt = ParseInitializerWithPotentialDesignator(CodeCompleteDesignation);
else
SubElt = ParseInitializer();
diff --git a/clang/lib/Parse/ParseObjc.cpp b/clang/lib/Parse/ParseObjc.cpp
index efcef6d3b123..eaea8666bc10 100644
--- a/clang/lib/Parse/ParseObjc.cpp
+++ b/clang/lib/Parse/ParseObjc.cpp
@@ -10,11 +10,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Scope.h"
@@ -522,10 +523,9 @@ ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
SkipUntil(tok::greater, tok::at, StopBeforeMatch);
if (Tok.is(tok::greater))
ConsumeToken();
- } else if (ParseGreaterThanInTemplateList(rAngleLoc,
+ } else if (ParseGreaterThanInTemplateList(lAngleLoc, rAngleLoc,
/*ConsumeLastToken=*/true,
/*ObjCGenericList=*/true)) {
- Diag(lAngleLoc, diag::note_matching) << "'<'";
SkipUntil({tok::greater, tok::greaterequal, tok::at, tok::minus,
tok::minus, tok::plus, tok::colon, tok::l_paren, tok::l_brace,
tok::comma, tok::semi },
@@ -740,7 +740,8 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
// Map a nullability property attribute to a context-sensitive keyword
// attribute.
- if (OCDS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (OCDS.getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_nullability)
addContextSensitiveTypeNullability(*this, FD.D, OCDS.getNullability(),
OCDS.getNullabilityLoc(),
addedToDeclSpec);
@@ -860,25 +861,25 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
SourceLocation AttrName = ConsumeToken(); // consume last attribute name
if (II->isStr("readonly"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readonly);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_readonly);
else if (II->isStr("assign"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_assign);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_assign);
else if (II->isStr("unsafe_unretained"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_unsafe_unretained);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_unsafe_unretained);
else if (II->isStr("readwrite"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readwrite);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_readwrite);
else if (II->isStr("retain"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_retain);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_retain);
else if (II->isStr("strong"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_strong);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
else if (II->isStr("copy"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_copy);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_copy);
else if (II->isStr("nonatomic"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nonatomic);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nonatomic);
else if (II->isStr("atomic"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_atomic);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_atomic);
else if (II->isStr("weak"))
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_weak);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_weak);
else if (II->isStr("getter") || II->isStr("setter")) {
bool IsSetter = II->getNameStart()[0] == 's';
@@ -910,7 +911,7 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
}
if (IsSetter) {
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_setter);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_setter);
DS.setSetterName(SelIdent, SelLoc);
if (ExpectAndConsume(tok::colon,
@@ -919,44 +920,44 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
return;
}
} else {
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_getter);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_getter);
DS.setGetterName(SelIdent, SelLoc);
}
} else if (II->isStr("nonnull")) {
- if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (DS.getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)
diagnoseRedundantPropertyNullability(*this, DS,
NullabilityKind::NonNull,
Tok.getLocation());
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
DS.setNullability(Tok.getLocation(), NullabilityKind::NonNull);
} else if (II->isStr("nullable")) {
- if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (DS.getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)
diagnoseRedundantPropertyNullability(*this, DS,
NullabilityKind::Nullable,
Tok.getLocation());
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
DS.setNullability(Tok.getLocation(), NullabilityKind::Nullable);
} else if (II->isStr("null_unspecified")) {
- if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (DS.getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)
diagnoseRedundantPropertyNullability(*this, DS,
NullabilityKind::Unspecified,
Tok.getLocation());
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
DS.setNullability(Tok.getLocation(), NullabilityKind::Unspecified);
} else if (II->isStr("null_resettable")) {
- if (DS.getPropertyAttributes() & ObjCDeclSpec::DQ_PR_nullability)
+ if (DS.getPropertyAttributes() & ObjCPropertyAttribute::kind_nullability)
diagnoseRedundantPropertyNullability(*this, DS,
NullabilityKind::Unspecified,
Tok.getLocation());
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nullability);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
DS.setNullability(Tok.getLocation(), NullabilityKind::Unspecified);
// Also set the null_resettable bit.
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_null_resettable);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_null_resettable);
} else if (II->isStr("class")) {
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_class);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_class);
} else if (II->isStr("direct")) {
- DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_direct);
+ DS.setPropertyAttributes(ObjCPropertyAttribute::kind_direct);
} else {
Diag(AttrName, diag::err_objc_expected_property_attr) << II;
SkipUntil(tok::r_paren, StopAtSemi);
@@ -1550,7 +1551,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
}
// Consume the '>'.
- if (ParseGreaterThanInTemplateList(EndLoc, consumeLastToken,
+ if (ParseGreaterThanInTemplateList(LAngleLoc, EndLoc, consumeLastToken,
/*ObjCGenericList=*/false))
return true;
@@ -1648,7 +1649,7 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
if (allSingleIdentifiers) {
// Parse the closing '>'.
SourceLocation rAngleLoc;
- (void)ParseGreaterThanInTemplateList(rAngleLoc, consumeLastToken,
+ (void)ParseGreaterThanInTemplateList(lAngleLoc, rAngleLoc, consumeLastToken,
/*ObjCGenericList=*/true);
// Let Sema figure out what we parsed.
@@ -1754,7 +1755,7 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
// Parse the closing '>'.
SourceLocation rAngleLoc;
- (void)ParseGreaterThanInTemplateList(rAngleLoc, consumeLastToken,
+ (void)ParseGreaterThanInTemplateList(lAngleLoc, rAngleLoc, consumeLastToken,
/*ObjCGenericList=*/true);
if (invalid) {
@@ -2978,7 +2979,7 @@ bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
InMessageExpression)
return false;
- ParsedType Type;
+ TypeResult Type;
if (Tok.is(tok::annot_typename))
Type = getTypeAnnotation(Tok);
@@ -2988,7 +2989,8 @@ bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
else
return false;
- if (!Type.get().isNull() && Type.get()->isObjCObjectOrInterfaceType()) {
+ // FIXME: Should not be querying properties of types from the parser.
+ if (Type.isUsable() && Type.get().get()->isObjCObjectOrInterfaceType()) {
const Token &AfterNext = GetLookAheadToken(2);
if (AfterNext.isOneOf(tok::colon, tok::r_square)) {
if (Tok.is(tok::identifier))
diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index 1095919baa7d..5223755c8fdf 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -11,14 +11,18 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/UniqueVector.h"
+#include "llvm/Frontend/OpenMP/OMPContext.h"
using namespace clang;
using namespace llvm::omp;
@@ -29,7 +33,7 @@ using namespace llvm::omp;
namespace {
enum OpenMPDirectiveKindEx {
- OMPD_cancellation = unsigned(OMPD_unknown) + 1,
+ OMPD_cancellation = llvm::omp::Directive_enumSize + 1,
OMPD_data,
OMPD_declare,
OMPD_end,
@@ -46,6 +50,8 @@ enum OpenMPDirectiveKindEx {
OMPD_target_teams_distribute_parallel,
OMPD_mapper,
OMPD_variant,
+ OMPD_begin,
+ OMPD_begin_declare,
};
// Helper to unify the enum class OpenMPDirectiveKind with its extension
@@ -99,6 +105,7 @@ static unsigned getOpenMPDirectiveKindEx(StringRef S) {
.Case("update", OMPD_update)
.Case("mapper", OMPD_mapper)
.Case("variant", OMPD_variant)
+ .Case("begin", OMPD_begin)
.Default(OMPD_unknown);
}
@@ -107,18 +114,21 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
// E.g.: OMPD_for OMPD_simd ===> OMPD_for_simd
// TODO: add other combined directives in topological order.
static const OpenMPDirectiveKindExWrapper F[][3] = {
+ {OMPD_begin, OMPD_declare, OMPD_begin_declare},
+ {OMPD_end, OMPD_declare, OMPD_end_declare},
{OMPD_cancellation, OMPD_point, OMPD_cancellation_point},
{OMPD_declare, OMPD_reduction, OMPD_declare_reduction},
{OMPD_declare, OMPD_mapper, OMPD_declare_mapper},
{OMPD_declare, OMPD_simd, OMPD_declare_simd},
{OMPD_declare, OMPD_target, OMPD_declare_target},
{OMPD_declare, OMPD_variant, OMPD_declare_variant},
+ {OMPD_begin_declare, OMPD_variant, OMPD_begin_declare_variant},
+ {OMPD_end_declare, OMPD_variant, OMPD_end_declare_variant},
{OMPD_distribute, OMPD_parallel, OMPD_distribute_parallel},
{OMPD_distribute_parallel, OMPD_for, OMPD_distribute_parallel_for},
{OMPD_distribute_parallel_for, OMPD_simd,
OMPD_distribute_parallel_for_simd},
{OMPD_distribute, OMPD_simd, OMPD_distribute_simd},
- {OMPD_end, OMPD_declare, OMPD_end_declare},
{OMPD_end_declare, OMPD_target, OMPD_end_declare_target},
{OMPD_target, OMPD_data, OMPD_target_data},
{OMPD_target, OMPD_enter, OMPD_target_enter},
@@ -184,8 +194,9 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
DKind = F[I][2];
}
}
- return DKind < OMPD_unknown ? static_cast<OpenMPDirectiveKind>(DKind)
- : OMPD_unknown;
+ return unsigned(DKind) < llvm::omp::Directive_enumSize
+ ? static_cast<OpenMPDirectiveKind>(DKind)
+ : OMPD_unknown;
}
static DeclarationName parseOpenMPReductionId(Parser &P) {
@@ -637,16 +648,14 @@ namespace {
class FNContextRAII final {
Parser &P;
Sema::CXXThisScopeRAII *ThisScope;
- Parser::ParseScope *TempScope;
- Parser::ParseScope *FnScope;
- bool HasTemplateScope = false;
+ Parser::MultiParseScope Scopes;
bool HasFunScope = false;
FNContextRAII() = delete;
FNContextRAII(const FNContextRAII &) = delete;
FNContextRAII &operator=(const FNContextRAII &) = delete;
public:
- FNContextRAII(Parser &P, Parser::DeclGroupPtrTy Ptr) : P(P) {
+ FNContextRAII(Parser &P, Parser::DeclGroupPtrTy Ptr) : P(P), Scopes(P) {
Decl *D = *Ptr.get().begin();
NamedDecl *ND = dyn_cast<NamedDecl>(D);
RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
@@ -657,29 +666,20 @@ public:
ND && ND->isCXXInstanceMember());
// If the Decl is templatized, add template parameters to scope.
- HasTemplateScope = D->isTemplateDecl();
- TempScope =
- new Parser::ParseScope(&P, Scope::TemplateParamScope, HasTemplateScope);
- if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(Actions.getCurScope(), D);
+ // FIXME: Track CurTemplateDepth?
+ P.ReenterTemplateScopes(Scopes, D);
// If the Decl is on a function, add function parameters to the scope.
- HasFunScope = D->isFunctionOrFunctionTemplate();
- FnScope = new Parser::ParseScope(
- &P, Scope::FnScope | Scope::DeclScope | Scope::CompoundStmtScope,
- HasFunScope);
- if (HasFunScope)
+ if (D->isFunctionOrFunctionTemplate()) {
+ HasFunScope = true;
+ Scopes.Enter(Scope::FnScope | Scope::DeclScope |
+ Scope::CompoundStmtScope);
Actions.ActOnReenterFunctionContext(Actions.getCurScope(), D);
+ }
}
~FNContextRAII() {
- if (HasFunScope) {
+ if (HasFunScope)
P.getActions().ActOnExitFunctionContext();
- FnScope->Exit(); // Pop scope, and remove Decls from IdResolver
- }
- if (HasTemplateScope)
- TempScope->Exit();
- delete FnScope;
- delete TempScope;
delete ThisScope;
}
};
@@ -746,18 +746,19 @@ static bool parseDeclareSimdClauses(
getOpenMPClauseKind(ClauseName), *Vars, Data))
IsError = true;
if (CKind == OMPC_aligned) {
- Alignments.append(Aligneds.size() - Alignments.size(), Data.TailExpr);
+ Alignments.append(Aligneds.size() - Alignments.size(),
+ Data.DepModOrTailExpr);
} else if (CKind == OMPC_linear) {
assert(0 <= Data.ExtraModifier &&
Data.ExtraModifier <= OMPC_LINEAR_unknown &&
"Unexpected linear modifier.");
if (P.getActions().CheckOpenMPLinearModifier(
static_cast<OpenMPLinearClauseKind>(Data.ExtraModifier),
- Data.DepLinMapLastLoc))
+ Data.ExtraModifierLoc))
Data.ExtraModifier = OMPC_LINEAR_val;
LinModifiers.append(Linears.size() - LinModifiers.size(),
Data.ExtraModifier);
- Steps.append(Linears.size() - Steps.size(), Data.TailExpr);
+ Steps.append(Linears.size() - Steps.size(), Data.DepModOrTailExpr);
}
} else
// TODO: add parsing of other clauses.
@@ -794,13 +795,7 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
bool IsError =
parseDeclareSimdClauses(*this, BS, Simdlen, Uniforms, Aligneds,
Alignments, Linears, LinModifiers, Steps);
- // Need to check for extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_declare_simd);
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
- }
+ skipUntilPragmaOpenMPEnd(OMPD_declare_simd);
// Skip the last annot_pragma_openmp_end.
SourceLocation EndLoc = ConsumeAnnotationToken();
if (IsError)
@@ -810,10 +805,268 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
LinModifiers, Steps, SourceRange(Loc, EndLoc));
}
+namespace {
+/// Constant used in the diagnostics to distinguish the levels in an OpenMP
+/// contexts: selector-set={selector(trait, ...), ...}, ....
+enum OMPContextLvl {
+ CONTEXT_SELECTOR_SET_LVL = 0,
+ CONTEXT_SELECTOR_LVL = 1,
+ CONTEXT_TRAIT_LVL = 2,
+};
+
+static StringRef stringLiteralParser(Parser &P) {
+ ExprResult Res = P.ParseStringLiteralExpression(true);
+ return Res.isUsable() ? Res.getAs<StringLiteral>()->getString() : "";
+}
+
+static StringRef getNameFromIdOrString(Parser &P, Token &Tok,
+ OMPContextLvl Lvl) {
+ if (Tok.is(tok::identifier)) {
+ llvm::SmallString<16> Buffer;
+ StringRef Name = P.getPreprocessor().getSpelling(Tok, Buffer);
+ (void)P.ConsumeToken();
+ return Name;
+ }
+
+ if (tok::isStringLiteral(Tok.getKind()))
+ return stringLiteralParser(P);
+
+ P.Diag(Tok.getLocation(),
+ diag::warn_omp_declare_variant_string_literal_or_identifier)
+ << Lvl;
+ return "";
+}
+
+static bool checkForDuplicates(Parser &P, StringRef Name,
+ SourceLocation NameLoc,
+ llvm::StringMap<SourceLocation> &Seen,
+ OMPContextLvl Lvl) {
+ auto Res = Seen.try_emplace(Name, NameLoc);
+ if (Res.second)
+ return false;
+
+ // Each trait-set-selector-name, trait-selector-name and trait-name can
+ // only be specified once.
+ P.Diag(NameLoc, diag::warn_omp_declare_variant_ctx_mutiple_use)
+ << Lvl << Name;
+ P.Diag(Res.first->getValue(), diag::note_omp_declare_variant_ctx_used_here)
+ << Lvl << Name;
+ return true;
+}
+} // namespace
+
+void Parser::parseOMPTraitPropertyKind(
+ OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set,
+ llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen) {
+ TIProperty.Kind = TraitProperty::invalid;
+
+ SourceLocation NameLoc = Tok.getLocation();
+ StringRef Name =
+ getNameFromIdOrString(*this, Tok, CONTEXT_TRAIT_LVL);
+ if (Name.empty()) {
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_TRAIT_LVL << listOpenMPContextTraitProperties(Set, Selector);
+ return;
+ }
+
+ TIProperty.Kind = getOpenMPContextTraitPropertyKind(Set, Name);
+ if (TIProperty.Kind != TraitProperty::invalid) {
+ if (checkForDuplicates(*this, Name, NameLoc, Seen, CONTEXT_TRAIT_LVL))
+ TIProperty.Kind = TraitProperty::invalid;
+ return;
+ }
+
+ // It follows diagnosis and helping notes.
+ // FIXME: We should move the diagnosis string generation into libFrontend.
+ Diag(NameLoc, diag::warn_omp_declare_variant_ctx_not_a_property)
+ << Name << getOpenMPContextTraitSelectorName(Selector)
+ << getOpenMPContextTraitSetName(Set);
+
+ TraitSet SetForName = getOpenMPContextTraitSetKind(Name);
+ if (SetForName != TraitSet::invalid) {
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_SELECTOR_SET_LVL << CONTEXT_TRAIT_LVL;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << Name << "<selector-name>"
+ << "(<property-name>)";
+ return;
+ }
+ TraitSelector SelectorForName = getOpenMPContextTraitSelectorKind(Name);
+ if (SelectorForName != TraitSelector::invalid) {
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_SELECTOR_LVL << CONTEXT_TRAIT_LVL;
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ isValidTraitSelectorForTraitSet(
+ SelectorForName, getOpenMPContextTraitSetForSelector(SelectorForName),
+ AllowsTraitScore, RequiresProperty);
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForSelector(SelectorForName))
+ << Name << (RequiresProperty ? "(<property-name>)" : "");
+ return;
+ }
+ for (const auto &PotentialSet :
+ {TraitSet::construct, TraitSet::user, TraitSet::implementation,
+ TraitSet::device}) {
+ TraitProperty PropertyForName =
+ getOpenMPContextTraitPropertyKind(PotentialSet, Name);
+ if (PropertyForName == TraitProperty::invalid)
+ continue;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForProperty(PropertyForName))
+ << getOpenMPContextTraitSelectorName(
+ getOpenMPContextTraitSelectorForProperty(PropertyForName))
+ << ("(" + Name + ")").str();
+ return;
+ }
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_TRAIT_LVL << listOpenMPContextTraitProperties(Set, Selector);
+}
+
+static bool checkExtensionProperty(Parser &P, SourceLocation Loc,
+ OMPTraitProperty &TIProperty,
+ OMPTraitSelector &TISelector,
+ llvm::StringMap<SourceLocation> &Seen) {
+ assert(TISelector.Kind ==
+ llvm::omp::TraitSelector::implementation_extension &&
+ "Only for extension properties, e.g., "
+ "`implementation={extension(PROPERTY)}`");
+ if (TIProperty.Kind == TraitProperty::invalid)
+ return false;
+
+ auto IsMatchExtension = [](OMPTraitProperty &TP) {
+ return (TP.Kind ==
+ llvm::omp::TraitProperty::implementation_extension_match_all ||
+ TP.Kind ==
+ llvm::omp::TraitProperty::implementation_extension_match_any ||
+ TP.Kind ==
+ llvm::omp::TraitProperty::implementation_extension_match_none);
+ };
+
+ if (IsMatchExtension(TIProperty)) {
+ for (OMPTraitProperty &SeenProp : TISelector.Properties)
+ if (IsMatchExtension(SeenProp)) {
+ P.Diag(Loc, diag::err_omp_variant_ctx_second_match_extension);
+ StringRef SeenName =
+ llvm::omp::getOpenMPContextTraitPropertyName(SeenProp.Kind);
+ SourceLocation SeenLoc = Seen[SeenName];
+ P.Diag(SeenLoc, diag::note_omp_declare_variant_ctx_used_here)
+ << CONTEXT_TRAIT_LVL << SeenName;
+ return false;
+ }
+ return true;
+ }
+
+ llvm_unreachable("Unknown extension property!");
+}
+
+void Parser::parseOMPContextProperty(OMPTraitSelector &TISelector,
+ llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &Seen) {
+ assert(TISelector.Kind != TraitSelector::user_condition &&
+ "User conditions are special properties not handled here!");
+
+ SourceLocation PropertyLoc = Tok.getLocation();
+ OMPTraitProperty TIProperty;
+ parseOMPTraitPropertyKind(TIProperty, Set, TISelector.Kind, Seen);
+
+ if (TISelector.Kind == llvm::omp::TraitSelector::implementation_extension)
+ if (!checkExtensionProperty(*this, Tok.getLocation(), TIProperty,
+ TISelector, Seen))
+ TIProperty.Kind = TraitProperty::invalid;
+
+ // If we have an invalid property here we already issued a warning.
+ if (TIProperty.Kind == TraitProperty::invalid) {
+ if (PropertyLoc != Tok.getLocation())
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_continue_here)
+ << CONTEXT_TRAIT_LVL;
+ return;
+ }
+
+ if (isValidTraitPropertyForTraitSetAndSelector(TIProperty.Kind,
+ TISelector.Kind, Set)) {
+
+ // If we make it here the property, selector, set, score, condition, ... are
+ // all valid (or have been corrected). Thus we can record the property.
+ TISelector.Properties.push_back(TIProperty);
+ return;
+ }
+
+ Diag(PropertyLoc, diag::warn_omp_ctx_incompatible_property_for_selector)
+ << getOpenMPContextTraitPropertyName(TIProperty.Kind)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set);
+ Diag(PropertyLoc, diag::note_omp_ctx_compatible_set_and_selector_for_property)
+ << getOpenMPContextTraitPropertyName(TIProperty.Kind)
+ << getOpenMPContextTraitSelectorName(
+ getOpenMPContextTraitSelectorForProperty(TIProperty.Kind))
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForProperty(TIProperty.Kind));
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_continue_here)
+ << CONTEXT_TRAIT_LVL;
+}
+
+void Parser::parseOMPTraitSelectorKind(
+ OMPTraitSelector &TISelector, llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &Seen) {
+ TISelector.Kind = TraitSelector::invalid;
+
+ SourceLocation NameLoc = Tok.getLocation();
+ StringRef Name = getNameFromIdOrString(*this, Tok, CONTEXT_SELECTOR_LVL
+ );
+ if (Name.empty()) {
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_SELECTOR_LVL << listOpenMPContextTraitSelectors(Set);
+ return;
+ }
+
+ TISelector.Kind = getOpenMPContextTraitSelectorKind(Name);
+ if (TISelector.Kind != TraitSelector::invalid) {
+ if (checkForDuplicates(*this, Name, NameLoc, Seen, CONTEXT_SELECTOR_LVL))
+ TISelector.Kind = TraitSelector::invalid;
+ return;
+ }
+
+ // It follows diagnosis and helping notes.
+ Diag(NameLoc, diag::warn_omp_declare_variant_ctx_not_a_selector)
+ << Name << getOpenMPContextTraitSetName(Set);
+
+ TraitSet SetForName = getOpenMPContextTraitSetKind(Name);
+ if (SetForName != TraitSet::invalid) {
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_SELECTOR_SET_LVL << CONTEXT_SELECTOR_LVL;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << Name << "<selector-name>"
+ << "<property-name>";
+ return;
+ }
+ for (const auto &PotentialSet :
+ {TraitSet::construct, TraitSet::user, TraitSet::implementation,
+ TraitSet::device}) {
+ TraitProperty PropertyForName =
+ getOpenMPContextTraitPropertyKind(PotentialSet, Name);
+ if (PropertyForName == TraitProperty::invalid)
+ continue;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_TRAIT_LVL << CONTEXT_SELECTOR_LVL;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForProperty(PropertyForName))
+ << getOpenMPContextTraitSelectorName(
+ getOpenMPContextTraitSelectorForProperty(PropertyForName))
+ << ("(" + Name + ")").str();
+ return;
+ }
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_SELECTOR_LVL << listOpenMPContextTraitSelectors(Set);
+}
+
/// Parse optional 'score' '(' <expr> ')' ':'.
static ExprResult parseContextScore(Parser &P) {
ExprResult ScoreExpr;
- Sema::OMPCtxStringType Buffer;
+ llvm::SmallString<16> Buffer;
StringRef SelectorName =
P.getPreprocessor().getSpelling(P.getCurToken(), Buffer);
if (!SelectorName.equals("score"))
@@ -825,246 +1078,272 @@ static ExprResult parseContextScore(Parser &P) {
if (P.getCurToken().is(tok::colon))
(void)P.ConsumeAnyToken();
else
- P.Diag(P.getCurToken(), diag::warn_pragma_expected_colon)
- << "context selector score clause";
+ P.Diag(P.getCurToken(), diag::warn_omp_declare_variant_expected)
+ << "':'"
+ << "score expression";
return ScoreExpr;
}
-/// Parse context selector for 'implementation' selector set:
-/// 'vendor' '(' [ 'score' '(' <score _expr> ')' ':' ] <vendor> { ',' <vendor> }
-/// ')'
-static void
-parseImplementationSelector(Parser &P, SourceLocation Loc,
- llvm::StringMap<SourceLocation> &UsedCtx,
- SmallVectorImpl<Sema::OMPCtxSelectorData> &Data) {
- const Token &Tok = P.getCurToken();
- // Parse inner context selector set name, if any.
- if (!Tok.is(tok::identifier)) {
- P.Diag(Tok.getLocation(), diag::warn_omp_declare_variant_cs_name_expected)
- << "implementation";
- // Skip until either '}', ')', or end of directive.
- while (!P.SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
- return;
- }
- Sema::OMPCtxStringType Buffer;
- StringRef CtxSelectorName = P.getPreprocessor().getSpelling(Tok, Buffer);
- auto Res = UsedCtx.try_emplace(CtxSelectorName, Tok.getLocation());
- if (!Res.second) {
- // OpenMP 5.0, 2.3.2 Context Selectors, Restrictions.
- // Each trait-selector-name can only be specified once.
- P.Diag(Tok.getLocation(), diag::err_omp_declare_variant_ctx_mutiple_use)
- << CtxSelectorName << "implementation";
- P.Diag(Res.first->getValue(), diag::note_omp_declare_variant_ctx_used_here)
- << CtxSelectorName;
- }
- OpenMPContextSelectorKind CSKind = getOpenMPContextSelector(CtxSelectorName);
- (void)P.ConsumeToken();
- switch (CSKind) {
- case OMP_CTX_vendor: {
- // Parse '('.
- BalancedDelimiterTracker T(P, tok::l_paren, tok::annot_pragma_openmp_end);
- (void)T.expectAndConsume(diag::err_expected_lparen_after,
- CtxSelectorName.data());
- ExprResult Score = parseContextScore(P);
- llvm::UniqueVector<Sema::OMPCtxStringType> Vendors;
- do {
- // Parse <vendor>.
- StringRef VendorName;
- if (Tok.is(tok::identifier)) {
- Buffer.clear();
- VendorName = P.getPreprocessor().getSpelling(P.getCurToken(), Buffer);
- (void)P.ConsumeToken();
- if (!VendorName.empty())
- Vendors.insert(VendorName);
- } else {
- P.Diag(Tok.getLocation(), diag::err_omp_declare_variant_item_expected)
- << "vendor identifier"
- << "vendor"
- << "implementation";
+/// Parses an OpenMP context selector.
+///
+/// <trait-selector-name> ['('[<trait-score>] <trait-property> [, <t-p>]* ')']
+void Parser::parseOMPContextSelector(
+ OMPTraitSelector &TISelector, llvm::omp::TraitSet Set,
+ llvm::StringMap<SourceLocation> &SeenSelectors) {
+ unsigned short OuterPC = ParenCount;
+
+ // If anything went wrong we issue an error or warning and then skip the rest
+ // of the selector. However, commas are ambiguous so we look for the nesting
+ // of parentheses here as well.
+ auto FinishSelector = [OuterPC, this]() -> void {
+ bool Done = false;
+ while (!Done) {
+ while (!SkipUntil({tok::r_brace, tok::r_paren, tok::comma,
+ tok::annot_pragma_openmp_end},
+ StopBeforeMatch))
+ ;
+ if (Tok.is(tok::r_paren) && OuterPC > ParenCount)
+ (void)ConsumeParen();
+ if (OuterPC <= ParenCount) {
+ Done = true;
+ break;
}
- if (!P.TryConsumeToken(tok::comma) && Tok.isNot(tok::r_paren)) {
- P.Diag(Tok, diag::err_expected_punc)
- << (VendorName.empty() ? "vendor name" : VendorName);
+ if (!Tok.is(tok::comma) && !Tok.is(tok::r_paren)) {
+ Done = true;
+ break;
}
- } while (Tok.is(tok::identifier));
- // Parse ')'.
- (void)T.consumeClose();
- if (!Vendors.empty())
- Data.emplace_back(OMP_CTX_SET_implementation, CSKind, Score, Vendors);
- break;
+ (void)ConsumeAnyToken();
+ }
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_continue_here)
+ << CONTEXT_SELECTOR_LVL;
+ };
+
+ SourceLocation SelectorLoc = Tok.getLocation();
+ parseOMPTraitSelectorKind(TISelector, Set, SeenSelectors);
+ if (TISelector.Kind == TraitSelector::invalid)
+ return FinishSelector();
+
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ if (!isValidTraitSelectorForTraitSet(TISelector.Kind, Set, AllowsTraitScore,
+ RequiresProperty)) {
+ Diag(SelectorLoc, diag::warn_omp_ctx_incompatible_selector_for_set)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set);
+ Diag(SelectorLoc, diag::note_omp_ctx_compatible_set_for_selector)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForSelector(TISelector.Kind))
+ << RequiresProperty;
+ return FinishSelector();
+ }
+
+ if (!RequiresProperty) {
+ TISelector.Properties.push_back(
+ {getOpenMPContextTraitPropertyForSelector(TISelector.Kind)});
+ return;
}
- case OMP_CTX_kind:
- case OMP_CTX_unknown:
- P.Diag(Tok.getLocation(), diag::warn_omp_declare_variant_cs_name_expected)
- << "implementation";
- // Skip until either '}', ')', or end of directive.
- while (!P.SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
+
+ if (!Tok.is(tok::l_paren)) {
+ Diag(SelectorLoc, diag::warn_omp_ctx_selector_without_properties)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set);
+ return FinishSelector();
+ }
+
+ if (TISelector.Kind == TraitSelector::user_condition) {
+ SourceLocation RLoc;
+ ExprResult Condition = ParseOpenMPParensExpr("user condition", RLoc);
+ if (!Condition.isUsable())
+ return FinishSelector();
+ TISelector.ScoreOrCondition = Condition.get();
+ TISelector.Properties.push_back({TraitProperty::user_condition_unknown});
return;
}
+
+ BalancedDelimiterTracker BDT(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ // Parse '('.
+ (void)BDT.consumeOpen();
+
+ SourceLocation ScoreLoc = Tok.getLocation();
+ ExprResult Score = parseContextScore(*this);
+
+ if (!AllowsTraitScore && !Score.isUnset()) {
+ if (Score.isUsable()) {
+ Diag(ScoreLoc, diag::warn_omp_ctx_incompatible_score_for_property)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set) << Score.get();
+ } else {
+ Diag(ScoreLoc, diag::warn_omp_ctx_incompatible_score_for_property)
+ << getOpenMPContextTraitSelectorName(TISelector.Kind)
+ << getOpenMPContextTraitSetName(Set) << "<invalid>";
+ }
+ Score = ExprResult();
+ }
+
+ if (Score.isUsable())
+ TISelector.ScoreOrCondition = Score.get();
+
+ llvm::StringMap<SourceLocation> SeenProperties;
+ do {
+ parseOMPContextProperty(TISelector, Set, SeenProperties);
+ } while (TryConsumeToken(tok::comma));
+
+ // Parse ')'.
+ BDT.consumeClose();
}
-/// Parse context selector for 'device' selector set:
-/// 'kind' '(' <kind> { ',' <kind> } ')'
-static void
-parseDeviceSelector(Parser &P, SourceLocation Loc,
- llvm::StringMap<SourceLocation> &UsedCtx,
- SmallVectorImpl<Sema::OMPCtxSelectorData> &Data) {
- const Token &Tok = P.getCurToken();
- // Parse inner context selector set name, if any.
- if (!Tok.is(tok::identifier)) {
- P.Diag(Tok.getLocation(), diag::warn_omp_declare_variant_cs_name_expected)
- << "device";
- // Skip until either '}', ')', or end of directive.
- while (!P.SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
+void Parser::parseOMPTraitSetKind(OMPTraitSet &TISet,
+ llvm::StringMap<SourceLocation> &Seen) {
+ TISet.Kind = TraitSet::invalid;
+
+ SourceLocation NameLoc = Tok.getLocation();
+ StringRef Name = getNameFromIdOrString(*this, Tok, CONTEXT_SELECTOR_SET_LVL
+ );
+ if (Name.empty()) {
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_SELECTOR_SET_LVL << listOpenMPContextTraitSets();
return;
}
- Sema::OMPCtxStringType Buffer;
- StringRef CtxSelectorName = P.getPreprocessor().getSpelling(Tok, Buffer);
- auto Res = UsedCtx.try_emplace(CtxSelectorName, Tok.getLocation());
- if (!Res.second) {
- // OpenMP 5.0, 2.3.2 Context Selectors, Restrictions.
- // Each trait-selector-name can only be specified once.
- P.Diag(Tok.getLocation(), diag::err_omp_declare_variant_ctx_mutiple_use)
- << CtxSelectorName << "device";
- P.Diag(Res.first->getValue(), diag::note_omp_declare_variant_ctx_used_here)
- << CtxSelectorName;
- }
- OpenMPContextSelectorKind CSKind = getOpenMPContextSelector(CtxSelectorName);
- (void)P.ConsumeToken();
- switch (CSKind) {
- case OMP_CTX_kind: {
- // Parse '('.
- BalancedDelimiterTracker T(P, tok::l_paren, tok::annot_pragma_openmp_end);
- (void)T.expectAndConsume(diag::err_expected_lparen_after,
- CtxSelectorName.data());
- llvm::UniqueVector<Sema::OMPCtxStringType> Kinds;
- do {
- // Parse <kind>.
- StringRef KindName;
- if (Tok.is(tok::identifier)) {
- Buffer.clear();
- KindName = P.getPreprocessor().getSpelling(P.getCurToken(), Buffer);
- SourceLocation SLoc = P.getCurToken().getLocation();
- (void)P.ConsumeToken();
- if (llvm::StringSwitch<bool>(KindName)
- .Case("host", false)
- .Case("nohost", false)
- .Case("cpu", false)
- .Case("gpu", false)
- .Case("fpga", false)
- .Default(true)) {
- P.Diag(SLoc, diag::err_omp_wrong_device_kind_trait) << KindName;
- } else {
- Kinds.insert(KindName);
- }
- } else {
- P.Diag(Tok.getLocation(), diag::err_omp_declare_variant_item_expected)
- << "'host', 'nohost', 'cpu', 'gpu', or 'fpga'"
- << "kind"
- << "device";
+
+ TISet.Kind = getOpenMPContextTraitSetKind(Name);
+ if (TISet.Kind != TraitSet::invalid) {
+ if (checkForDuplicates(*this, Name, NameLoc, Seen,
+ CONTEXT_SELECTOR_SET_LVL))
+ TISet.Kind = TraitSet::invalid;
+ return;
+ }
+
+ // It follows diagnosis and helping notes.
+ Diag(NameLoc, diag::warn_omp_declare_variant_ctx_not_a_set) << Name;
+
+ TraitSelector SelectorForName = getOpenMPContextTraitSelectorKind(Name);
+ if (SelectorForName != TraitSelector::invalid) {
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_SELECTOR_LVL << CONTEXT_SELECTOR_SET_LVL;
+ bool AllowsTraitScore = false;
+ bool RequiresProperty = false;
+ isValidTraitSelectorForTraitSet(
+ SelectorForName, getOpenMPContextTraitSetForSelector(SelectorForName),
+ AllowsTraitScore, RequiresProperty);
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForSelector(SelectorForName))
+ << Name << (RequiresProperty ? "(<property-name>)" : "");
+ return;
+ }
+ for (const auto &PotentialSet :
+ {TraitSet::construct, TraitSet::user, TraitSet::implementation,
+ TraitSet::device}) {
+ TraitProperty PropertyForName =
+ getOpenMPContextTraitPropertyKind(PotentialSet, Name);
+ if (PropertyForName == TraitProperty::invalid)
+ continue;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_is_a)
+ << Name << CONTEXT_TRAIT_LVL << CONTEXT_SELECTOR_SET_LVL;
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_try)
+ << getOpenMPContextTraitSetName(
+ getOpenMPContextTraitSetForProperty(PropertyForName))
+ << getOpenMPContextTraitSelectorName(
+ getOpenMPContextTraitSelectorForProperty(PropertyForName))
+ << ("(" + Name + ")").str();
+ return;
+ }
+ Diag(NameLoc, diag::note_omp_declare_variant_ctx_options)
+ << CONTEXT_SELECTOR_SET_LVL << listOpenMPContextTraitSets();
+}
+
+/// Parses an OpenMP context selector set.
+///
+/// <trait-set-selector-name> '=' '{' <trait-selector> [, <trait-selector>]* '}'
+void Parser::parseOMPContextSelectorSet(
+ OMPTraitSet &TISet,
+ llvm::StringMap<SourceLocation> &SeenSets) {
+ auto OuterBC = BraceCount;
+
+ // If anything went wrong we issue an error or warning and then skip the rest
+ // of the set. However, commas are ambiguous so we look for the nesting
+ // of braces here as well.
+ auto FinishSelectorSet = [this, OuterBC]() -> void {
+ bool Done = false;
+ while (!Done) {
+ while (!SkipUntil({tok::comma, tok::r_brace, tok::r_paren,
+ tok::annot_pragma_openmp_end},
+ StopBeforeMatch))
+ ;
+ if (Tok.is(tok::r_brace) && OuterBC > BraceCount)
+ (void)ConsumeBrace();
+ if (OuterBC <= BraceCount) {
+ Done = true;
+ break;
}
- if (!P.TryConsumeToken(tok::comma) && Tok.isNot(tok::r_paren)) {
- P.Diag(Tok, diag::err_expected_punc)
- << (KindName.empty() ? "kind of device" : KindName);
+ if (!Tok.is(tok::comma) && !Tok.is(tok::r_brace)) {
+ Done = true;
+ break;
}
- } while (Tok.is(tok::identifier));
- // Parse ')'.
- (void)T.consumeClose();
- if (!Kinds.empty())
- Data.emplace_back(OMP_CTX_SET_device, CSKind, ExprResult(), Kinds);
- break;
+ (void)ConsumeAnyToken();
+ }
+ Diag(Tok.getLocation(), diag::note_omp_declare_variant_ctx_continue_here)
+ << CONTEXT_SELECTOR_SET_LVL;
+ };
+
+ parseOMPTraitSetKind(TISet, SeenSets);
+ if (TISet.Kind == TraitSet::invalid)
+ return FinishSelectorSet();
+
+ // Parse '='.
+ if (!TryConsumeToken(tok::equal))
+ Diag(Tok.getLocation(), diag::warn_omp_declare_variant_expected)
+ << "="
+ << ("context set name \"" + getOpenMPContextTraitSetName(TISet.Kind) +
+ "\"")
+ .str();
+
+ // Parse '{'.
+ if (Tok.is(tok::l_brace)) {
+ (void)ConsumeBrace();
+ } else {
+ Diag(Tok.getLocation(), diag::warn_omp_declare_variant_expected)
+ << "{"
+ << ("'=' that follows the context set name \"" +
+ getOpenMPContextTraitSetName(TISet.Kind) + "\"")
+ .str();
}
- case OMP_CTX_vendor:
- case OMP_CTX_unknown:
- P.Diag(Tok.getLocation(), diag::warn_omp_declare_variant_cs_name_expected)
- << "device";
- // Skip until either '}', ')', or end of directive.
- while (!P.SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
- return;
+
+ llvm::StringMap<SourceLocation> SeenSelectors;
+ do {
+ OMPTraitSelector TISelector;
+ parseOMPContextSelector(TISelector, TISet.Kind, SeenSelectors);
+ if (TISelector.Kind != TraitSelector::invalid &&
+ !TISelector.Properties.empty())
+ TISet.Selectors.push_back(TISelector);
+ } while (TryConsumeToken(tok::comma));
+
+ // Parse '}'.
+ if (Tok.is(tok::r_brace)) {
+ (void)ConsumeBrace();
+ } else {
+ Diag(Tok.getLocation(), diag::warn_omp_declare_variant_expected)
+ << "}"
+ << ("context selectors for the context set \"" +
+ getOpenMPContextTraitSetName(TISet.Kind) + "\"")
+ .str();
}
}
-/// Parses clauses for 'declare variant' directive.
-/// clause:
-/// <selector_set_name> '=' '{' <context_selectors> '}'
-/// [ ',' <selector_set_name> '=' '{' <context_selectors> '}' ]
-bool Parser::parseOpenMPContextSelectors(
- SourceLocation Loc, SmallVectorImpl<Sema::OMPCtxSelectorData> &Data) {
- llvm::StringMap<SourceLocation> UsedCtxSets;
+/// Parse OpenMP context selectors:
+///
+/// <trait-set-selector> [, <trait-set-selector>]*
+bool Parser::parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo& TI) {
+ llvm::StringMap<SourceLocation> SeenSets;
do {
- // Parse inner context selector set name.
- if (!Tok.is(tok::identifier)) {
- Diag(Tok.getLocation(), diag::err_omp_declare_variant_no_ctx_selector)
- << getOpenMPClauseName(OMPC_match);
- return true;
- }
- Sema::OMPCtxStringType Buffer;
- StringRef CtxSelectorSetName = PP.getSpelling(Tok, Buffer);
- auto Res = UsedCtxSets.try_emplace(CtxSelectorSetName, Tok.getLocation());
- if (!Res.second) {
- // OpenMP 5.0, 2.3.2 Context Selectors, Restrictions.
- // Each trait-set-selector-name can only be specified once.
- Diag(Tok.getLocation(), diag::err_omp_declare_variant_ctx_set_mutiple_use)
- << CtxSelectorSetName;
- Diag(Res.first->getValue(),
- diag::note_omp_declare_variant_ctx_set_used_here)
- << CtxSelectorSetName;
- }
- // Parse '='.
- (void)ConsumeToken();
- if (Tok.isNot(tok::equal)) {
- Diag(Tok.getLocation(), diag::err_omp_declare_variant_equal_expected)
- << CtxSelectorSetName;
- return true;
- }
- (void)ConsumeToken();
- // TBD: add parsing of known context selectors.
- // Unknown selector - just ignore it completely.
- {
- // Parse '{'.
- BalancedDelimiterTracker TBr(*this, tok::l_brace,
- tok::annot_pragma_openmp_end);
- if (TBr.expectAndConsume(diag::err_expected_lbrace_after, "="))
- return true;
- OpenMPContextSelectorSetKind CSSKind =
- getOpenMPContextSelectorSet(CtxSelectorSetName);
- llvm::StringMap<SourceLocation> UsedCtx;
- do {
- switch (CSSKind) {
- case OMP_CTX_SET_implementation:
- parseImplementationSelector(*this, Loc, UsedCtx, Data);
- break;
- case OMP_CTX_SET_device:
- parseDeviceSelector(*this, Loc, UsedCtx, Data);
- break;
- case OMP_CTX_SET_unknown:
- // Skip until either '}', ')', or end of directive.
- while (!SkipUntil(tok::r_brace, tok::r_paren,
- tok::annot_pragma_openmp_end, StopBeforeMatch))
- ;
- break;
- }
- const Token PrevTok = Tok;
- if (!TryConsumeToken(tok::comma) && Tok.isNot(tok::r_brace))
- Diag(Tok, diag::err_omp_expected_comma_brace)
- << (PrevTok.isAnnotation() ? "context selector trait"
- : PP.getSpelling(PrevTok));
- } while (Tok.is(tok::identifier));
- // Parse '}'.
- (void)TBr.consumeClose();
- }
- // Consume ','
- if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end))
- (void)ExpectAndConsume(tok::comma);
- } while (Tok.isAnyIdentifier());
+ OMPTraitSet TISet;
+ parseOMPContextSelectorSet(TISet, SeenSets);
+ if (TISet.Kind != TraitSet::invalid && !TISet.Selectors.empty())
+ TI.Sets.push_back(TISet);
+ } while (TryConsumeToken(tok::comma));
+
return false;
}
@@ -1102,10 +1381,30 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
(void)ConsumeAnnotationToken();
return;
}
+
+ OMPTraitInfo &TI = Actions.getASTContext().getNewOMPTraitInfo();
+ if (parseOMPDeclareVariantMatchClause(Loc, TI))
+ return;
+
Optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
Actions.checkOpenMPDeclareVariantFunction(
- Ptr, AssociatedFunction.get(), SourceRange(Loc, Tok.getLocation()));
+ Ptr, AssociatedFunction.get(), TI,
+ SourceRange(Loc, Tok.getLocation()));
+ // Skip last tokens.
+ while (Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ if (DeclVarData && !TI.Sets.empty())
+ Actions.ActOnOpenMPDeclareVariantDirective(
+ DeclVarData->first, DeclVarData->second, TI,
+ SourceRange(Loc, Tok.getLocation()));
+
+ // Skip the last annot_pragma_openmp_end.
+ (void)ConsumeAnnotationToken();
+}
+
+bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
+ OMPTraitInfo &TI) {
// Parse 'match'.
OpenMPClauseKind CKind = Tok.isAnnotation()
? OMPC_unknown
@@ -1117,47 +1416,32 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
;
// Skip the last annot_pragma_openmp_end.
(void)ConsumeAnnotationToken();
- return;
+ return true;
}
(void)ConsumeToken();
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
- getOpenMPClauseName(OMPC_match))) {
+ getOpenMPClauseName(OMPC_match).data())) {
while (!SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch))
;
// Skip the last annot_pragma_openmp_end.
(void)ConsumeAnnotationToken();
- return;
+ return true;
}
// Parse inner context selectors.
- SmallVector<Sema::OMPCtxSelectorData, 4> Data;
- if (!parseOpenMPContextSelectors(Loc, Data)) {
- // Parse ')'.
- (void)T.consumeClose();
- // Need to check for extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_declare_variant);
- }
- }
+ parseOMPContextSelectors(Loc, TI);
- // Skip last tokens.
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
- if (DeclVarData.hasValue())
- Actions.ActOnOpenMPDeclareVariantDirective(
- DeclVarData.getValue().first, DeclVarData.getValue().second,
- SourceRange(Loc, Tok.getLocation()), Data);
- // Skip the last annot_pragma_openmp_end.
- (void)ConsumeAnnotationToken();
+ // Parse ')'
+ (void)T.consumeClose();
+ return false;
}
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
-/// 'default' '(' 'none' | 'shared' ')
+/// 'default' '(' 'none' | 'shared' | 'firstprivate' ')
///
/// proc_bind-clause:
/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')
@@ -1185,7 +1469,7 @@ parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) {
// Parse '('.
BalancedDelimiterTracker T(P, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
- getOpenMPClauseName(Kind)))
+ getOpenMPClauseName(Kind).data()))
return llvm::None;
unsigned Type = getOpenMPSimpleClauseType(
@@ -1289,21 +1573,48 @@ Parser::DeclGroupPtrTy Parser::ParseOMPDeclareTargetClauses() {
return Actions.BuildDeclaratorGroup(Decls);
}
-void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
- SourceLocation DTLoc) {
- if (DKind != OMPD_end_declare_target) {
- Diag(Tok, diag::err_expected_end_declare_target);
- Diag(DTLoc, diag::note_matching) << "'#pragma omp declare target'";
+void Parser::skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind) {
+ // The last seen token is annot_pragma_openmp_end - need to check for
+ // extra tokens.
+ if (Tok.is(tok::annot_pragma_openmp_end))
+ return;
+
+ Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
+ << getOpenMPDirectiveName(DKind);
+ while (Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+}
+
+void Parser::parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
+ OpenMPDirectiveKind ExpectedKind,
+ OpenMPDirectiveKind FoundKind,
+ SourceLocation BeginLoc,
+ SourceLocation FoundLoc,
+ bool SkipUntilOpenMPEnd) {
+ int DiagSelection = ExpectedKind == OMPD_end_declare_target ? 0 : 1;
+
+ if (FoundKind == ExpectedKind) {
+ ConsumeAnyToken();
+ skipUntilPragmaOpenMPEnd(ExpectedKind);
return;
}
- ConsumeAnyToken();
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_end_declare_target);
+
+ Diag(FoundLoc, diag::err_expected_end_declare_target_or_variant)
+ << DiagSelection;
+ Diag(BeginLoc, diag::note_matching)
+ << ("'#pragma omp " + getOpenMPDirectiveName(BeginKind) + "'").str();
+ if (SkipUntilOpenMPEnd)
SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+}
+
+void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
+ SourceLocation DKLoc) {
+ parseOMPEndDirective(OMPD_declare_target, OMPD_end_declare_target, DKind,
+ DKLoc, Tok.getLocation(),
+ /* SkipUntilOpenMPEnd */ false);
// Skip the last annot_pragma_openmp_end.
- ConsumeAnyToken();
+ if (Tok.is(tok::annot_pragma_openmp_end))
+ ConsumeAnnotationToken();
}
/// Parsing of declarative OpenMP directives.
@@ -1381,13 +1692,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
DeclDirectiveListParserHelper Helper(this, DKind);
if (!ParseOpenMPSimpleVarList(DKind, Helper,
/*AllowScopeSpecifier=*/true)) {
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+ skipUntilPragmaOpenMPEnd(DKind);
// Skip the last annot_pragma_openmp_end.
ConsumeAnnotationToken();
return Actions.ActOnOpenMPThreadprivateDirective(Loc,
@@ -1403,18 +1708,18 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
SmallVector<OMPClause *, 1> Clauses;
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
- OMPC_unknown + 1>
- FirstClauses(OMPC_unknown + 1);
+ llvm::omp::Clause_enumSize + 1>
+ FirstClauses(llvm::omp::Clause_enumSize + 1);
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OpenMPClauseKind CKind =
Tok.isAnnotation() ? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(OMPD_allocate, CKind,
- !FirstClauses[CKind].getInt());
+ OMPClause *Clause = ParseOpenMPClause(
+ OMPD_allocate, CKind, !FirstClauses[unsigned(CKind)].getInt());
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[CKind].setInt(true);
+ FirstClauses[unsigned(CKind)].setInt(true);
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
@@ -1426,13 +1731,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ConsumeToken();
Actions.EndOpenMPClause();
}
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+ skipUntilPragmaOpenMPEnd(DKind);
}
// Skip the last annot_pragma_openmp_end.
ConsumeAnnotationToken();
@@ -1444,8 +1743,9 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_requires: {
SourceLocation StartLoc = ConsumeToken();
SmallVector<OMPClause *, 5> Clauses;
- SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>, OMPC_unknown + 1>
- FirstClauses(OMPC_unknown + 1);
+ SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
+ llvm::omp::Clause_enumSize + 1>
+ FirstClauses(llvm::omp::Clause_enumSize + 1);
if (Tok.is(tok::annot_pragma_openmp_end)) {
Diag(Tok, diag::err_omp_expected_clause)
<< getOpenMPDirectiveName(OMPD_requires);
@@ -1456,11 +1756,11 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(OMPD_requires, CKind,
- !FirstClauses[CKind].getInt());
+ OMPClause *Clause = ParseOpenMPClause(
+ OMPD_requires, CKind, !FirstClauses[unsigned(CKind)].getInt());
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[CKind].setInt(true);
+ FirstClauses[unsigned(CKind)].setInt(true);
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
@@ -1473,7 +1773,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
Actions.EndOpenMPClause();
}
// Consume final annot_pragma_openmp_end
- if (Clauses.size() == 0) {
+ if (Clauses.empty()) {
Diag(Tok, diag::err_omp_expected_clause)
<< getOpenMPDirectiveName(OMPD_requires);
ConsumeAnnotationToken();
@@ -1485,14 +1785,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_declare_reduction:
ConsumeToken();
if (DeclGroupPtrTy Res = ParseOpenMPDeclareReductionDirective(AS)) {
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_declare_reduction);
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
- }
+ skipUntilPragmaOpenMPEnd(OMPD_declare_reduction);
// Skip the last annot_pragma_openmp_end.
ConsumeAnnotationToken();
return Res;
@@ -1507,6 +1800,63 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
}
break;
}
+ case OMPD_begin_declare_variant: {
+ // The syntax is:
+ // { #pragma omp begin declare variant clause }
+ // <function-declaration-or-definition-sequence>
+ // { #pragma omp end declare variant }
+ //
+ ConsumeToken();
+ OMPTraitInfo &TI = Actions.getASTContext().getNewOMPTraitInfo();
+ if (parseOMPDeclareVariantMatchClause(Loc, TI))
+ break;
+
+ // Skip last tokens.
+ skipUntilPragmaOpenMPEnd(OMPD_begin_declare_variant);
+
+ ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
+
+ VariantMatchInfo VMI;
+ ASTContext &ASTCtx = Actions.getASTContext();
+ TI.getAsVariantMatchInfo(ASTCtx, VMI);
+ OMPContext OMPCtx(ASTCtx.getLangOpts().OpenMPIsDevice,
+ ASTCtx.getTargetInfo().getTriple());
+
+ if (isVariantApplicableInContext(VMI, OMPCtx, /* DeviceSetOnly */ true)) {
+ Actions.ActOnOpenMPBeginDeclareVariant(Loc, TI);
+ break;
+ }
+
+ // Elide all the code till the matching end declare variant was found.
+ unsigned Nesting = 1;
+ SourceLocation DKLoc;
+ OpenMPDirectiveKind DK = OMPD_unknown;
+ do {
+ DKLoc = Tok.getLocation();
+ DK = parseOpenMPDirectiveKind(*this);
+ if (DK == OMPD_end_declare_variant)
+ --Nesting;
+ else if (DK == OMPD_begin_declare_variant)
+ ++Nesting;
+ if (!Nesting || isEofOrEom())
+ break;
+ ConsumeAnyToken();
+ } while (true);
+
+ parseOMPEndDirective(OMPD_begin_declare_variant, OMPD_end_declare_variant,
+ DK, Loc, DKLoc, /* SkipUntilOpenMPEnd */ true);
+ if (isEofOrEom())
+ return nullptr;
+ break;
+ }
+ case OMPD_end_declare_variant: {
+ if (Actions.isInOpenMPDeclareVariantScope())
+ Actions.ActOnOpenMPEndDeclareVariant();
+ else
+ Diag(Loc, diag::err_expected_begin_declare_variant);
+ ConsumeToken();
+ break;
+ }
case OMPD_declare_variant:
case OMPD_declare_simd: {
// The syntax is:
@@ -1563,6 +1913,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
if (!Actions.ActOnStartOpenMPDeclareTargetDirective(DTLoc))
return DeclGroupPtrTy();
+ ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
llvm::SmallVector<Decl *, 4> Decls;
DKind = parseOpenMPDirectiveKind(*this);
while (DKind != OMPD_end_declare_target && Tok.isNot(tok::eof) &&
@@ -1608,6 +1959,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
@@ -1656,6 +2009,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
break;
+ default:
+ break;
}
while (Tok.isNot(tok::annot_pragma_openmp_end))
ConsumeAnyToken();
@@ -1709,8 +2064,9 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
ParsingOpenMPDirectiveRAII DirScope(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
SmallVector<OMPClause *, 5> Clauses;
- SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>, OMPC_unknown + 1>
- FirstClauses(OMPC_unknown + 1);
+ SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
+ llvm::omp::Clause_enumSize + 1>
+ FirstClauses(llvm::omp::Clause_enumSize + 1);
unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope;
SourceLocation Loc = ConsumeAnnotationToken(), EndLoc;
@@ -1720,7 +2076,6 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
DeclarationNameInfo DirName;
StmtResult Directive = StmtError();
bool HasAssociatedStatement = true;
- bool FlushHasClause = false;
switch (DKind) {
case OMPD_threadprivate: {
@@ -1734,13 +2089,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
DeclDirectiveListParserHelper Helper(this, DKind);
if (!ParseOpenMPSimpleVarList(DKind, Helper,
/*AllowScopeSpecifier=*/false)) {
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+ skipUntilPragmaOpenMPEnd(DKind);
DeclGroupPtrTy Res = Actions.ActOnOpenMPThreadprivateDirective(
Loc, Helper.getIdentifiers());
Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
@@ -1762,18 +2111,18 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
SmallVector<OMPClause *, 1> Clauses;
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
- OMPC_unknown + 1>
- FirstClauses(OMPC_unknown + 1);
+ llvm::omp::Clause_enumSize + 1>
+ FirstClauses(llvm::omp::Clause_enumSize + 1);
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OpenMPClauseKind CKind =
Tok.isAnnotation() ? OMPC_unknown
: getOpenMPClauseKind(PP.getSpelling(Tok));
Actions.StartOpenMPClause(CKind);
- OMPClause *Clause = ParseOpenMPClause(OMPD_allocate, CKind,
- !FirstClauses[CKind].getInt());
+ OMPClause *Clause = ParseOpenMPClause(
+ OMPD_allocate, CKind, !FirstClauses[unsigned(CKind)].getInt());
SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- FirstClauses[CKind].setInt(true);
+ FirstClauses[unsigned(CKind)].setInt(true);
if (Clause != nullptr)
Clauses.push_back(Clause);
if (Tok.is(tok::annot_pragma_openmp_end)) {
@@ -1785,13 +2134,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
ConsumeToken();
Actions.EndOpenMPClause();
}
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
+ skipUntilPragmaOpenMPEnd(DKind);
}
DeclGroupPtrTy Res = Actions.ActOnOpenMPAllocateDirective(
Loc, Helper.getIdentifiers(), Clauses);
@@ -1804,14 +2147,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
ConsumeToken();
if (DeclGroupPtrTy Res =
ParseOpenMPDeclareReductionDirective(/*AS=*/AS_none)) {
- // The last seen token is annot_pragma_openmp_end - need to check for
- // extra tokens.
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_declare_reduction);
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
- }
+ skipUntilPragmaOpenMPEnd(OMPD_declare_reduction);
ConsumeAnyToken();
Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
} else {
@@ -1831,13 +2167,8 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
break;
}
case OMPD_flush:
- if (PP.LookAhead(0).is(tok::l_paren)) {
- FlushHasClause = true;
- // Push copy of the current token back to stream to properly parse
- // pseudo-clause OMPFlushClause.
- PP.EnterToken(Tok, /*IsReinject*/ true);
- }
- LLVM_FALLTHROUGH;
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
@@ -1897,6 +2228,13 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
case OMPD_target_teams_distribute_simd: {
+ // Special processing for flush and depobj clauses.
+ Token ImplicitTok;
+ bool ImplicitClauseAllowed = false;
+ if (DKind == OMPD_flush || DKind == OMPD_depobj) {
+ ImplicitTok = Tok;
+ ImplicitClauseAllowed = true;
+ }
ConsumeToken();
// Parse directive name of the 'critical' directive if any.
if (DKind == OMPD_critical) {
@@ -1926,18 +2264,37 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
Actions.StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope(), Loc);
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
- OpenMPClauseKind CKind =
- Tok.isAnnotation()
- ? OMPC_unknown
- : FlushHasClause ? OMPC_flush
- : getOpenMPClauseKind(PP.getSpelling(Tok));
+ bool HasImplicitClause = false;
+ if (ImplicitClauseAllowed && Tok.is(tok::l_paren)) {
+ HasImplicitClause = true;
+ // Push copy of the current token back to stream to properly parse
+ // pseudo-clause OMPFlushClause or OMPDepobjClause.
+ PP.EnterToken(Tok, /*IsReinject*/ true);
+ PP.EnterToken(ImplicitTok, /*IsReinject*/ true);
+ ConsumeAnyToken();
+ }
+ OpenMPClauseKind CKind = Tok.isAnnotation()
+ ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ if (HasImplicitClause) {
+ assert(CKind == OMPC_unknown && "Must be unknown implicit clause.");
+ if (DKind == OMPD_flush) {
+ CKind = OMPC_flush;
+ } else {
+ assert(DKind == OMPD_depobj &&
+ "Expected flush or depobj directives.");
+ CKind = OMPC_depobj;
+ }
+ }
+ // No more implicit clauses allowed.
+ ImplicitClauseAllowed = false;
Actions.StartOpenMPClause(CKind);
- FlushHasClause = false;
- OMPClause *Clause =
- ParseOpenMPClause(DKind, CKind, !FirstClauses[CKind].getInt());
- FirstClauses[CKind].setInt(true);
+ HasImplicitClause = false;
+ OMPClause *Clause = ParseOpenMPClause(
+ DKind, CKind, !FirstClauses[unsigned(CKind)].getInt());
+ FirstClauses[unsigned(CKind)].setInt(true);
if (Clause) {
- FirstClauses[CKind].setPointer(Clause);
+ FirstClauses[unsigned(CKind)].setPointer(Clause);
Clauses.push_back(Clause);
}
@@ -1954,7 +2311,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
// OpenMP [2.13.8, ordered Construct, Syntax]
// If the depend clause is specified, the ordered construct is a stand-alone
// directive.
- if (DKind == OMPD_ordered && FirstClauses[OMPC_depend].getInt()) {
+ if (DKind == OMPD_ordered && FirstClauses[unsigned(OMPC_depend)].getInt()) {
if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
ParsedStmtContext()) {
Diag(Loc, diag::err_omp_immediate_directive)
@@ -1971,6 +2328,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
// FIXME: We create a bogus CompoundStmt scope to hold the contents of
// the captured region. Code elsewhere assumes that any FunctionScopeInfo
// should have at least one compound statement scope within it.
+ ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
AssociatedStmt = (Sema::CompoundScopeRAII(Actions), ParseStatement());
AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
} else if (DKind == OMPD_target_update || DKind == OMPD_target_enter_data ||
@@ -1994,12 +2352,15 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_requires:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_variant:
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
SkipUntil(tok::annot_pragma_openmp_end);
break;
case OMPD_unknown:
+ default:
Diag(Tok, diag::err_omp_unknown_directive);
SkipUntil(tok::annot_pragma_openmp_end);
break;
@@ -2033,12 +2394,14 @@ bool Parser::ParseOpenMPSimpleVarList(
NoIdentIsFound = false;
if (AllowScopeSpecifier && getLangOpts().CPlusPlus &&
- ParseOptionalCXXScopeSpecifier(SS, nullptr, false)) {
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, false)) {
IsCorrect = false;
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
- } else if (ParseUnqualifiedId(SS, false, false, false, false, nullptr,
- nullptr, Name)) {
+ } else if (ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, false, false,
+ false, false, nullptr, Name)) {
IsCorrect = false;
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -2070,6 +2433,50 @@ bool Parser::ParseOpenMPSimpleVarList(
return !IsCorrect;
}
+OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
+ SourceLocation Loc = Tok.getLocation();
+ ConsumeAnyToken();
+
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "uses_allocator"))
+ return nullptr;
+ SmallVector<Sema::UsesAllocatorsData, 4> Data;
+ do {
+ ExprResult Allocator = ParseCXXIdExpression();
+ if (Allocator.isInvalid()) {
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ break;
+ }
+ Sema::UsesAllocatorsData &D = Data.emplace_back();
+ D.Allocator = Allocator.get();
+ if (Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker T(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ T.consumeOpen();
+ ExprResult AllocatorTraits = ParseCXXIdExpression();
+ T.consumeClose();
+ if (AllocatorTraits.isInvalid()) {
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ break;
+ }
+ D.AllocatorTraits = AllocatorTraits.get();
+ D.LParenLoc = T.getOpenLocation();
+ D.RParenLoc = T.getCloseLocation();
+ }
+ if (Tok.isNot(tok::comma) && Tok.isNot(tok::r_paren))
+ Diag(Tok, diag::err_omp_expected_punc) << "uses_allocators" << 0;
+ // Parse ','
+ if (Tok.is(tok::comma))
+ ConsumeAnyToken();
+ } while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end));
+ T.consumeClose();
+ return Actions.ActOnOpenMPUsesAllocatorClause(Loc, T.getOpenLocation(),
+ T.getCloseLocation(), Data);
+}
+
/// Parsing of OpenMP clauses.
///
/// clause:
@@ -2084,10 +2491,14 @@ bool Parser::ParseOpenMPSimpleVarList(
/// thread_limit-clause | priority-clause | grainsize-clause |
/// nogroup-clause | num_tasks-clause | hint-clause | to-clause |
/// from-clause | is_device_ptr-clause | task_reduction-clause |
-/// in_reduction-clause | allocator-clause | allocate-clause
+/// in_reduction-clause | allocator-clause | allocate-clause |
+/// acq_rel-clause | acquire-clause | release-clause | relaxed-clause |
+/// depobj-clause | destroy-clause | detach-clause | inclusive-clause |
+/// exclusive-clause | uses_allocators-clause | use_device_addr-clause
///
OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause) {
+ OMPClauseKind = CKind;
OMPClause *Clause = nullptr;
bool ErrorFound = false;
bool WrongDirective = false;
@@ -2107,7 +2518,6 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_simdlen:
case OMPC_collapse:
case OMPC_ordered:
- case OMPC_device:
case OMPC_num_teams:
case OMPC_thread_limit:
case OMPC_priority:
@@ -2115,14 +2525,14 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_num_tasks:
case OMPC_hint:
case OMPC_allocator:
+ case OMPC_depobj:
+ case OMPC_detach:
// OpenMP [2.5, Restrictions]
// At most one num_threads clause can appear on the directive.
// OpenMP [2.8.1, simd construct, Restrictions]
// Only one safelen clause can appear on a simd directive.
// Only one simdlen clause can appear on a simd directive.
// Only one collapse clause can appear on a simd directive.
- // OpenMP [2.9.1, target data construct, Restrictions]
- // At most one device clause can appear on the directive.
// OpenMP [2.11.1, task Construct, Restrictions]
// At most one if clause can appear on the directive.
// At most one final clause can appear on the directive.
@@ -2137,6 +2547,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// At most one num_tasks clause can appear on the directive.
// OpenMP [2.11.3, allocate Directive, Restrictions]
// At most one allocator clause can appear on the directive.
+ // OpenMP 5.0, 2.10.1 task Construct, Restrictions.
+ // At most one detach clause can appear on the directive.
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
@@ -2151,6 +2563,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_default:
case OMPC_proc_bind:
case OMPC_atomic_default_mem_order:
+ case OMPC_order:
// OpenMP [2.14.3.1, Restrictions]
// Only a single default clause may be specified on a parallel, task or
// teams directive.
@@ -2159,7 +2572,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// OpenMP [5.0, Requires directive, Restrictions]
// At most one atomic_default_mem_order clause can appear
// on the directive
- if (!FirstClause) {
+ if (!FirstClause && CKind != OMPC_order) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
ErrorFound = true;
@@ -2167,6 +2580,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
Clause = ParseOpenMPSimpleClause(CKind, WrongDirective);
break;
+ case OMPC_device:
case OMPC_schedule:
case OMPC_dist_schedule:
case OMPC_defaultmap:
@@ -2174,6 +2588,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// Only one schedule clause can appear on a loop directive.
// OpenMP 4.5 [2.10.4, Restrictions, p. 106]
// At most one defaultmap clause can appear on the directive.
+ // OpenMP 5.0 [2.12.5, target construct, Restrictions]
+ // At most one device clause can appear on the directive.
if ((getLangOpts().OpenMP < 50 || CKind != OMPC_defaultmap) &&
!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
@@ -2182,16 +2598,19 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
}
LLVM_FALLTHROUGH;
case OMPC_if:
- Clause = ParseOpenMPSingleExprWithArgClause(CKind, WrongDirective);
+ Clause = ParseOpenMPSingleExprWithArgClause(DKind, CKind, WrongDirective);
break;
case OMPC_nowait:
case OMPC_untied:
case OMPC_mergeable:
case OMPC_read:
case OMPC_write:
- case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_threads:
case OMPC_simd:
case OMPC_nogroup:
@@ -2199,6 +2618,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
case OMPC_dynamic_allocators:
+ case OMPC_destroy:
// OpenMP [2.7.1, Restrictions, p. 9]
// Only one ordered clause can appear on a loop directive.
// OpenMP [2.7.1, Restrictions, C/C++, p. 4]
@@ -2213,6 +2633,17 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
Clause = ParseOpenMPClause(CKind, WrongDirective);
break;
+ case OMPC_update:
+ if (!FirstClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
+ ErrorFound = true;
+ }
+
+ Clause = (DKind == OMPD_depobj)
+ ? ParseOpenMPSimpleClause(CKind, WrongDirective)
+ : ParseOpenMPClause(CKind, WrongDirective);
+ break;
case OMPC_private:
case OMPC_firstprivate:
case OMPC_lastprivate:
@@ -2230,16 +2661,21 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_allocate:
case OMPC_nontemporal:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_affinity:
Clause = ParseOpenMPVarListClause(DKind, CKind, WrongDirective);
break;
+ case OMPC_uses_allocators:
+ Clause = ParseOpenMPUsesAllocatorClause(DKind);
+ break;
case OMPC_device_type:
case OMPC_unknown:
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(DKind);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ skipUntilPragmaOpenMPEnd(DKind);
break;
case OMPC_threadprivate:
case OMPC_uniform:
@@ -2249,6 +2685,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
<< getOpenMPClauseName(CKind) << getOpenMPDirectiveName(DKind);
SkipUntil(tok::comma, tok::annot_pragma_openmp_end, StopBeforeMatch);
break;
+ default:
+ break;
}
return ErrorFound ? nullptr : Clause;
}
@@ -2279,7 +2717,8 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
/// Parsing of OpenMP clauses with single expressions like 'final',
/// 'collapse', 'safelen', 'num_threads', 'simdlen', 'num_teams',
-/// 'thread_limit', 'simdlen', 'priority', 'grainsize', 'num_tasks' or 'hint'.
+/// 'thread_limit', 'simdlen', 'priority', 'grainsize', 'num_tasks', 'hint' or
+/// 'detach'.
///
/// final-clause:
/// 'final' '(' expression ')'
@@ -2311,6 +2750,9 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
/// allocator-clause:
/// 'allocator' '(' expression ')'
///
+/// detach-clause:
+/// 'detach' '(' event-handler-expression ')'
+///
OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly) {
SourceLocation Loc = ConsumeToken();
@@ -2330,16 +2772,27 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
-/// 'default' '(' 'none' | 'shared' ')
+/// 'default' '(' 'none' | 'shared' | 'firstprivate' ')'
///
/// proc_bind-clause:
-/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')
+/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')'
+///
+/// update-clause:
+/// 'update' '(' 'in' | 'out' | 'inout' | 'mutexinoutset' ')'
///
OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind,
bool ParseOnly) {
llvm::Optional<SimpleClauseData> Val = parseOpenMPSimpleClause(*this, Kind);
if (!Val || ParseOnly)
return nullptr;
+ if (getLangOpts().OpenMP < 51 && Kind == OMPC_default &&
+ static_cast<DefaultKind>(Val.getValue().Type) ==
+ OMP_DEFAULT_firstprivate) {
+ Diag(Val.getValue().LOpen, diag::err_omp_invalid_dsa)
+ << getOpenMPClauseName(OMPC_firstprivate)
+ << getOpenMPClauseName(OMPC_default) << "5.1";
+ return nullptr;
+ }
return Actions.ActOnOpenMPSimpleClause(
Kind, Val.getValue().Type, Val.getValue().TypeLoc, Val.getValue().LOpen,
Val.getValue().Loc, Val.getValue().RLoc);
@@ -2380,7 +2833,6 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly) {
return Actions.ActOnOpenMPClause(Kind, Loc, Tok.getLocation());
}
-
/// Parsing of OpenMP clauses with single expressions and some additional
/// argument like 'schedule' or 'dist_schedule'.
///
@@ -2392,16 +2844,20 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly) {
/// 'if' '(' [ directive-name-modifier ':' ] expression ')'
///
/// defaultmap:
-/// 'defaultmap' '(' modifier ':' kind ')'
+/// 'defaultmap' '(' modifier [ ':' kind ] ')'
+///
+/// device-clause:
+/// 'device' '(' [ device-modifier ':' ] expression ')'
///
-OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
+OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
+ OpenMPClauseKind Kind,
bool ParseOnly) {
SourceLocation Loc = ConsumeToken();
SourceLocation DelimLoc;
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
- getOpenMPClauseName(Kind)))
+ getOpenMPClauseName(Kind).data()))
return nullptr;
ExprResult Val;
@@ -2477,17 +2933,37 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
Tok.isNot(tok::annot_pragma_openmp_end))
ConsumeAnyToken();
// Parse ':'
- if (Tok.is(tok::colon))
+ if (Tok.is(tok::colon) || getLangOpts().OpenMP < 50) {
+ if (Tok.is(tok::colon))
+ ConsumeAnyToken();
+ else if (Arg.back() != OMPC_DEFAULTMAP_MODIFIER_unknown)
+ Diag(Tok, diag::warn_pragma_expected_colon) << "defaultmap modifier";
+ // Get a defaultmap kind
+ Arg.push_back(getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok)));
+ KLoc.push_back(Tok.getLocation());
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ } else {
+ Arg.push_back(OMPC_DEFAULTMAP_unknown);
+ KLoc.push_back(SourceLocation());
+ }
+ } else if (Kind == OMPC_device) {
+ // Only target executable directives support extended device construct.
+ if (isOpenMPTargetExecutionDirective(DKind) && getLangOpts().OpenMP >= 50 &&
+ NextToken().is(tok::colon)) {
+ // Parse optional <device modifier> ':'
+ Arg.push_back(getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok)));
+ KLoc.push_back(Tok.getLocation());
ConsumeAnyToken();
- else if (Arg.back() != OMPC_DEFAULTMAP_MODIFIER_unknown)
- Diag(Tok, diag::warn_pragma_expected_colon) << "defaultmap modifier";
- // Get a defaultmap kind
- Arg.push_back(getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok)));
- KLoc.push_back(Tok.getLocation());
- if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
- Tok.isNot(tok::annot_pragma_openmp_end))
+ // Parse ':'
ConsumeAnyToken();
+ } else {
+ Arg.push_back(OMPC_DEVICE_unknown);
+ KLoc.emplace_back();
+ }
} else {
assert(Kind == OMPC_if);
KLoc.push_back(Tok.getLocation());
@@ -2510,7 +2986,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool NeedAnExpression = (Kind == OMPC_schedule && DelimLoc.isValid()) ||
(Kind == OMPC_dist_schedule && DelimLoc.isValid()) ||
- Kind == OMPC_if;
+ Kind == OMPC_if || Kind == OMPC_device;
if (NeedAnExpression) {
SourceLocation ELoc = Tok.getLocation();
ExprResult LHS(ParseCastExpression(AnyCastExpr, false, NotTypeCast));
@@ -2572,11 +3048,12 @@ static bool ParseReductionId(Parser &P, CXXScopeSpec &ReductionIdScopeSpec,
return false;
}
}
- return P.ParseUnqualifiedId(ReductionIdScopeSpec, /*EnteringContext*/ false,
- /*AllowDestructorName*/ false,
- /*AllowConstructorName*/ false,
- /*AllowDeductionGuide*/ false,
- nullptr, nullptr, ReductionId);
+ return P.ParseUnqualifiedId(
+ ReductionIdScopeSpec, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext*/ false,
+ /*AllowDestructorName*/ false,
+ /*AllowConstructorName*/ false,
+ /*AllowDeductionGuide*/ false, nullptr, ReductionId);
}
/// Checks if the token is a valid map-type-modifier.
@@ -2604,6 +3081,7 @@ bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
/*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::kw_default)) {
Diag(Tok.getLocation(), diag::err_omp_mapper_illegal_identifier);
@@ -2684,6 +3162,114 @@ static void parseMapType(Parser &P, Parser::OpenMPVarListDataTy &Data) {
P.ConsumeToken();
}
+/// Parses simple expression in parens for single-expression clauses of OpenMP
+/// constructs.
+/// \param RLoc Returned location of right paren.
+ExprResult Parser::ParseOpenMPIteratorsExpr() {
+ assert(Tok.is(tok::identifier) && PP.getSpelling(Tok) == "iterator" &&
+ "Expected 'iterator' token.");
+ SourceLocation IteratorKwLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "iterator"))
+ return ExprError();
+
+ SourceLocation LLoc = T.getOpenLocation();
+ SmallVector<Sema::OMPIteratorData, 4> Data;
+ while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end)) {
+ // Check if the type parsing is required.
+ ParsedType IteratorType;
+ if (Tok.isNot(tok::identifier) || NextToken().isNot(tok::equal)) {
+ // identifier '=' is not found - parse type.
+ TypeResult TR = ParseTypeName();
+ if (TR.isInvalid()) {
+ T.skipToEnd();
+ return ExprError();
+ }
+ IteratorType = TR.get();
+ }
+
+ // Parse identifier.
+ IdentifierInfo *II = nullptr;
+ SourceLocation IdLoc;
+ if (Tok.is(tok::identifier)) {
+ II = Tok.getIdentifierInfo();
+ IdLoc = ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_expected_unqualified_id) << 0;
+ }
+
+ // Parse '='.
+ SourceLocation AssignLoc;
+ if (Tok.is(tok::equal))
+ AssignLoc = ConsumeToken();
+ else
+ Diag(Tok, diag::err_omp_expected_equal_in_iterator);
+
+ // Parse range-specification - <begin> ':' <end> [ ':' <step> ]
+ ColonProtectionRAIIObject ColonRAII(*this);
+ // Parse <begin>
+ SourceLocation Loc = Tok.getLocation();
+ ExprResult LHS = ParseCastExpression(AnyCastExpr);
+ ExprResult Begin = Actions.CorrectDelayedTyposInExpr(
+ ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ Begin = Actions.ActOnFinishFullExpr(Begin.get(), Loc,
+ /*DiscardedValue=*/false);
+ // Parse ':'.
+ SourceLocation ColonLoc;
+ if (Tok.is(tok::colon))
+ ColonLoc = ConsumeToken();
+
+ // Parse <end>
+ Loc = Tok.getLocation();
+ LHS = ParseCastExpression(AnyCastExpr);
+ ExprResult End = Actions.CorrectDelayedTyposInExpr(
+ ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ End = Actions.ActOnFinishFullExpr(End.get(), Loc,
+ /*DiscardedValue=*/false);
+
+ SourceLocation SecColonLoc;
+ ExprResult Step;
+ // Parse optional step.
+ if (Tok.is(tok::colon)) {
+ // Parse ':'
+ SecColonLoc = ConsumeToken();
+ // Parse <step>
+ Loc = Tok.getLocation();
+ LHS = ParseCastExpression(AnyCastExpr);
+ Step = Actions.CorrectDelayedTyposInExpr(
+ ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ Step = Actions.ActOnFinishFullExpr(Step.get(), Loc,
+ /*DiscardedValue=*/false);
+ }
+
+ // Parse ',' or ')'
+ if (Tok.isNot(tok::comma) && Tok.isNot(tok::r_paren))
+ Diag(Tok, diag::err_omp_expected_punc_after_iterator);
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+
+ Sema::OMPIteratorData &D = Data.emplace_back();
+ D.DeclIdent = II;
+ D.DeclIdentLoc = IdLoc;
+ D.Type = IteratorType;
+ D.AssignLoc = AssignLoc;
+ D.ColonLoc = ColonLoc;
+ D.SecColonLoc = SecColonLoc;
+ D.Range.Begin = Begin.get();
+ D.Range.End = End.get();
+ D.Range.Step = Step.get();
+ }
+
+ // Parse ')'.
+ SourceLocation RLoc = Tok.getLocation();
+ if (!T.consumeClose())
+ RLoc = T.getCloseLocation();
+
+ return Actions.ActOnOMPIteratorExpr(getCurScope(), IteratorKwLoc, LLoc, RLoc,
+ Data);
+}
+
/// Parses clauses with list.
bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
@@ -2696,19 +3282,32 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
- getOpenMPClauseName(Kind)))
+ getOpenMPClauseName(Kind).data()))
return true;
+ bool HasIterator = false;
bool NeedRParenForLinear = false;
BalancedDelimiterTracker LinearT(*this, tok::l_paren,
tok::annot_pragma_openmp_end);
// Handle reduction-identifier for reduction clause.
if (Kind == OMPC_reduction || Kind == OMPC_task_reduction ||
Kind == OMPC_in_reduction) {
+ Data.ExtraModifier = OMPC_REDUCTION_unknown;
+ if (Kind == OMPC_reduction && getLangOpts().OpenMP >= 50 &&
+ (Tok.is(tok::identifier) || Tok.is(tok::kw_default)) &&
+ NextToken().is(tok::comma)) {
+ // Parse optional reduction modifier.
+ Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok));
+ Data.ExtraModifierLoc = Tok.getLocation();
+ ConsumeToken();
+ assert(Tok.is(tok::comma) && "Expected comma.");
+ (void)ConsumeToken();
+ }
ColonProtectionRAIIObject ColonRAII(*this);
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
/*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
InvalidReductionId = ParseReductionId(
*this, Data.ReductionOrMapperIdScopeSpec, UnqualifiedReductionId);
@@ -2724,11 +3323,27 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Data.ReductionOrMapperId =
Actions.GetNameFromUnqualifiedId(UnqualifiedReductionId);
} else if (Kind == OMPC_depend) {
+ if (getLangOpts().OpenMP >= 50) {
+ if (Tok.is(tok::identifier) && PP.getSpelling(Tok) == "iterator") {
+ // Handle optional dependence modifier.
+ // iterator(iterators-definition)
+ // where iterators-definition is iterator-specifier [,
+ // iterators-definition ]
+ // where iterator-specifier is [ iterator-type ] identifier =
+ // range-specification
+ HasIterator = true;
+ EnterScope(Scope::OpenMPDirectiveScope | Scope::DeclScope);
+ ExprResult IteratorRes = ParseOpenMPIteratorsExpr();
+ Data.DepModOrTailExpr = IteratorRes.get();
+ // Parse ','
+ ExpectAndConsume(tok::comma);
+ }
+ }
// Handle dependency type for depend clause.
ColonProtectionRAIIObject ColonRAII(*this);
Data.ExtraModifier = getOpenMPSimpleClauseType(
Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : "");
- Data.DepLinMapLastLoc = Tok.getLocation();
+ Data.ExtraModifierLoc = Tok.getLocation();
if (Data.ExtraModifier == OMPC_DEPEND_unknown) {
SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -2753,7 +3368,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Data.ExtraModifier = OMPC_LINEAR_val;
if (Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::l_paren)) {
Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok));
- Data.DepLinMapLastLoc = ConsumeToken();
+ Data.ExtraModifierLoc = ConsumeToken();
LinearT.consumeOpen();
NeedRParenForLinear = true;
}
@@ -2766,13 +3381,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
!isOpenMPTaskLoopDirective(DKind)) &&
Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::colon)) {
Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok));
- Data.DepLinMapLastLoc = Tok.getLocation();
- if (Data.ExtraModifier == OMPC_LASTPRIVATE_unknown) {
- SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
- StopBeforeMatch);
- } else {
- ConsumeToken();
- }
+ Data.ExtraModifierLoc = Tok.getLocation();
+ ConsumeToken();
assert(Tok.is(tok::colon) && "Expected colon.");
Data.ColonLoc = ConsumeToken();
}
@@ -2784,7 +3394,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// map-type-modifier. The map-type can also be delete which has the same
// spelling of the C++ delete keyword.
Data.ExtraModifier = OMPC_MAP_unknown;
- Data.DepLinMapLastLoc = Tok.getLocation();
+ Data.ExtraModifierLoc = Tok.getLocation();
// Check for presence of a colon in the map clause.
TentativeParsingAction TPA(*this);
@@ -2840,22 +3450,33 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
ConsumeToken();
}
}
- } else if (Kind == OMPC_allocate) {
+ } else if (Kind == OMPC_allocate ||
+ (Kind == OMPC_affinity && Tok.is(tok::identifier) &&
+ PP.getSpelling(Tok) == "iterator")) {
// Handle optional allocator expression followed by colon delimiter.
ColonProtectionRAIIObject ColonRAII(*this);
TentativeParsingAction TPA(*this);
- ExprResult Tail =
- Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ // OpenMP 5.0, 2.10.1, task Construct.
+ // where aff-modifier is one of the following:
+ // iterator(iterators-definition)
+ ExprResult Tail;
+ if (Kind == OMPC_allocate) {
+ Tail = ParseAssignmentExpression();
+ } else {
+ HasIterator = true;
+ EnterScope(Scope::OpenMPDirectiveScope | Scope::DeclScope);
+ Tail = ParseOpenMPIteratorsExpr();
+ }
+ Tail = Actions.CorrectDelayedTyposInExpr(Tail);
Tail = Actions.ActOnFinishFullExpr(Tail.get(), T.getOpenLocation(),
/*DiscardedValue=*/false);
if (Tail.isUsable()) {
if (Tok.is(tok::colon)) {
- Data.TailExpr = Tail.get();
+ Data.DepModOrTailExpr = Tail.get();
Data.ColonLoc = ConsumeToken();
TPA.Commit();
} else {
- // colon not found, no allocator specified, parse only list of
- // variables.
+ // Colon not found, parse only list of variables.
TPA.Revert();
}
} else {
@@ -2876,6 +3497,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
const bool MayHaveTail = (Kind == OMPC_linear || Kind == OMPC_aligned);
while (IsComma || (Tok.isNot(tok::r_paren) && Tok.isNot(tok::colon) &&
Tok.isNot(tok::annot_pragma_openmp_end))) {
+ ParseScope OMPListScope(this, Scope::OpenMPDirectiveScope);
ColonProtectionRAIIObject ColonRAII(*this, MayHaveTail);
// Parse variable
ExprResult VarExpr =
@@ -2912,7 +3534,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Tail =
Actions.ActOnFinishFullExpr(Tail.get(), ELoc, /*DiscardedValue*/ false);
if (Tail.isUsable())
- Data.TailExpr = Tail.get();
+ Data.DepModOrTailExpr = Tail.get();
else
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -2922,16 +3544,17 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Data.RLoc = Tok.getLocation();
if (!T.consumeClose())
Data.RLoc = T.getCloseLocation();
- return (Kind == OMPC_depend && Data.ExtraModifier != OMPC_DEPEND_unknown &&
- Vars.empty()) ||
- (Kind != OMPC_depend && Kind != OMPC_map && Vars.empty()) ||
- (MustHaveTail && !Data.TailExpr) || InvalidReductionId ||
+ // Exit from scope when the iterator is used in depend clause.
+ if (HasIterator)
+ ExitScope();
+ return (Kind != OMPC_depend && Kind != OMPC_map && Vars.empty()) ||
+ (MustHaveTail && !Data.DepModOrTailExpr) || InvalidReductionId ||
IsInvalidMapperModifier;
}
/// Parsing of OpenMP clause 'private', 'firstprivate', 'lastprivate',
-/// 'shared', 'copyin', 'copyprivate', 'flush', 'reduction', 'task_reduction' or
-/// 'in_reduction'.
+/// 'shared', 'copyin', 'copyprivate', 'flush', 'reduction', 'task_reduction',
+/// 'in_reduction', 'nontemporal', 'exclusive' or 'inclusive'.
///
/// private-clause:
/// 'private' '(' list ')'
@@ -2946,7 +3569,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
/// aligned-clause:
/// 'aligned' '(' list [ ':' alignment ] ')'
/// reduction-clause:
-/// 'reduction' '(' reduction-identifier ':' list ')'
+/// 'reduction' '(' [ modifier ',' ] reduction-identifier ':' list ')'
/// task_reduction-clause:
/// 'task_reduction' '(' reduction-identifier ':' list ')'
/// in_reduction-clause:
@@ -2967,10 +3590,18 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
/// 'from' '(' [ mapper '(' mapper-identifier ')' ':' ] list ')'
/// use_device_ptr-clause:
/// 'use_device_ptr' '(' list ')'
+/// use_device_addr-clause:
+/// 'use_device_addr' '(' list ')'
/// is_device_ptr-clause:
/// 'is_device_ptr' '(' list ')'
/// allocate-clause:
/// 'allocate' '(' [ allocator ':' ] list ')'
+/// nontemporal-clause:
+/// 'nontemporal' '(' list ')'
+/// inclusive-clause:
+/// 'inclusive' '(' list ')'
+/// exclusive-clause:
+/// 'exclusive' '(' list ')'
///
/// For 'linear' clause linear-list may have the following forms:
/// list
@@ -2991,9 +3622,9 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
return nullptr;
OMPVarListLocTy Locs(Loc, LOpen, Data.RLoc);
return Actions.ActOnOpenMPVarListClause(
- Kind, Vars, Data.TailExpr, Locs, Data.ColonLoc,
+ Kind, Vars, Data.DepModOrTailExpr, Locs, Data.ColonLoc,
Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId,
Data.ExtraModifier, Data.MapTypeModifiers, Data.MapTypeModifiersLoc,
- Data.IsMapTypeImplicit, Data.DepLinMapLastLoc);
+ Data.IsMapTypeImplicit, Data.ExtraModifierLoc);
}
diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp
index df411e1928d6..6402b31d00b2 100644
--- a/clang/lib/Parse/ParsePragma.cpp
+++ b/clang/lib/Parse/ParsePragma.cpp
@@ -108,6 +108,7 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
return;
if (OOS == tok::OOS_ON) {
PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
+ return;
}
MutableArrayRef<Token> Toks(PP.getPreprocessorAllocator().Allocate<Token>(1),
@@ -184,6 +185,13 @@ private:
Sema &Actions;
};
+struct PragmaFloatControlHandler : public PragmaHandler {
+ PragmaFloatControlHandler(Sema &Actions)
+ : PragmaHandler("float_control") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override;
+};
+
struct PragmaMSPointersToMembers : public PragmaHandler {
explicit PragmaMSPointersToMembers() : PragmaHandler("pointers_to_members") {}
void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
@@ -262,6 +270,18 @@ struct PragmaAttributeHandler : public PragmaHandler {
ParsedAttributes AttributesForPragmaAttribute;
};
+struct PragmaMaxTokensHereHandler : public PragmaHandler {
+ PragmaMaxTokensHereHandler() : PragmaHandler("max_tokens_here") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaMaxTokensTotalHandler : public PragmaHandler {
+ PragmaMaxTokensTotalHandler() : PragmaHandler("max_tokens_total") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override;
+};
+
} // end namespace
void Parser::initializePragmaHandlers() {
@@ -322,6 +342,8 @@ void Parser::initializePragmaHandlers() {
PP.AddPragmaHandler(MSCommentHandler.get());
}
+ FloatControlHandler = std::make_unique<PragmaFloatControlHandler>(Actions);
+ PP.AddPragmaHandler(FloatControlHandler.get());
if (getLangOpts().MicrosoftExt) {
MSDetectMismatchHandler =
std::make_unique<PragmaDetectMismatchHandler>(Actions);
@@ -382,6 +404,12 @@ void Parser::initializePragmaHandlers() {
AttributePragmaHandler =
std::make_unique<PragmaAttributeHandler>(AttrFactory);
PP.AddPragmaHandler("clang", AttributePragmaHandler.get());
+
+ MaxTokensHerePragmaHandler = std::make_unique<PragmaMaxTokensHereHandler>();
+ PP.AddPragmaHandler("clang", MaxTokensHerePragmaHandler.get());
+
+ MaxTokensTotalPragmaHandler = std::make_unique<PragmaMaxTokensTotalHandler>();
+ PP.AddPragmaHandler("clang", MaxTokensTotalPragmaHandler.get());
}
void Parser::resetPragmaHandlers() {
@@ -420,6 +448,8 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler("clang", PCSectionHandler.get());
PCSectionHandler.reset();
+ PP.RemovePragmaHandler(FloatControlHandler.get());
+ FloatControlHandler.reset();
if (getLangOpts().MicrosoftExt) {
PP.RemovePragmaHandler(MSDetectMismatchHandler.get());
MSDetectMismatchHandler.reset();
@@ -487,6 +517,12 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler("clang", AttributePragmaHandler.get());
AttributePragmaHandler.reset();
+
+ PP.RemovePragmaHandler("clang", MaxTokensHerePragmaHandler.get());
+ MaxTokensHerePragmaHandler.reset();
+
+ PP.RemovePragmaHandler("clang", MaxTokensTotalPragmaHandler.get());
+ MaxTokensTotalPragmaHandler.reset();
}
/// Handle the annotation token produced for #pragma unused(...)
@@ -605,21 +641,37 @@ void Parser::HandlePragmaFPContract() {
static_cast<tok::OnOffSwitch>(
reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
- LangOptions::FPContractModeKind FPC;
+ LangOptions::FPModeKind FPC;
switch (OOS) {
case tok::OOS_ON:
- FPC = LangOptions::FPC_On;
+ FPC = LangOptions::FPM_On;
break;
case tok::OOS_OFF:
- FPC = LangOptions::FPC_Off;
+ FPC = LangOptions::FPM_Off;
break;
case tok::OOS_DEFAULT:
FPC = getLangOpts().getDefaultFPContractMode();
break;
}
- Actions.ActOnPragmaFPContract(FPC);
- ConsumeAnnotationToken();
+ SourceLocation PragmaLoc = ConsumeAnnotationToken();
+ Actions.ActOnPragmaFPContract(PragmaLoc, FPC);
+}
+
+void Parser::HandlePragmaFloatControl() {
+ assert(Tok.is(tok::annot_pragma_float_control));
+
+ // The value that is held on the PragmaFloatControlStack encodes
+ // the PragmaFloatControl kind and the MSStackAction kind
+ // into a single 32-bit word. The MsStackAction is the high 16 bits
+ // and the FloatControl is the lower 16 bits. Use shift and bit-and
+ // to decode the parts.
+ uintptr_t Value = reinterpret_cast<uintptr_t>(Tok.getAnnotationValue());
+ Sema::PragmaMsStackAction Action =
+ static_cast<Sema::PragmaMsStackAction>((Value >> 16) & 0xFFFF);
+ PragmaFloatControlKind Kind = PragmaFloatControlKind(Value & 0xFFFF);
+ SourceLocation PragmaLoc = ConsumeAnnotationToken();
+ Actions.ActOnPragmaFloatControl(PragmaLoc, Action, Kind);
}
void Parser::HandlePragmaFEnvAccess() {
@@ -628,21 +680,21 @@ void Parser::HandlePragmaFEnvAccess() {
static_cast<tok::OnOffSwitch>(
reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
- LangOptions::FEnvAccessModeKind FPC;
+ bool IsEnabled;
switch (OOS) {
case tok::OOS_ON:
- FPC = LangOptions::FEA_On;
+ IsEnabled = true;
break;
case tok::OOS_OFF:
- FPC = LangOptions::FEA_Off;
+ IsEnabled = false;
break;
case tok::OOS_DEFAULT: // FIXME: Add this cli option when it makes sense.
- FPC = LangOptions::FEA_Off;
+ IsEnabled = false;
break;
}
- Actions.ActOnPragmaFEnvAccess(FPC);
- ConsumeAnnotationToken();
+ SourceLocation PragmaLoc = ConsumeAnnotationToken();
+ Actions.ActOnPragmaFEnvAccess(PragmaLoc, IsEnabled);
}
@@ -1008,11 +1060,11 @@ struct PragmaLoopHintInfo {
static std::string PragmaLoopHintString(Token PragmaName, Token Option) {
StringRef Str = PragmaName.getIdentifierInfo()->getName();
std::string ClangLoopStr = (llvm::Twine("clang loop ") + Str).str();
- return llvm::StringSwitch<StringRef>(Str)
- .Case("loop", ClangLoopStr)
- .Case("unroll_and_jam", Str)
- .Case("unroll", Str)
- .Default("");
+ return std::string(llvm::StringSwitch<StringRef>(Str)
+ .Case("loop", ClangLoopStr)
+ .Case("unroll_and_jam", Str)
+ .Case("unroll", Str)
+ .Default(""));
}
bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
@@ -1821,6 +1873,7 @@ void PragmaClangSectionHandler::HandlePragma(Preprocessor &PP,
return;
}
+ SourceLocation PragmaLocation = Tok.getLocation();
PP.Lex(Tok); // eat ['bss'|'data'|'rodata'|'text']
if (Tok.isNot(tok::equal)) {
PP.Diag(Tok.getLocation(), diag::err_pragma_clang_section_expected_equal) << SecKind;
@@ -1831,10 +1884,11 @@ void PragmaClangSectionHandler::HandlePragma(Preprocessor &PP,
if (!PP.LexStringLiteral(Tok, SecName, "pragma clang section", false))
return;
- Actions.ActOnPragmaClangSection(Tok.getLocation(),
- (SecName.size()? Sema::PragmaClangSectionAction::PCSA_Set :
- Sema::PragmaClangSectionAction::PCSA_Clear),
- SecKind, SecName);
+ Actions.ActOnPragmaClangSection(
+ PragmaLocation,
+ (SecName.size() ? Sema::PragmaClangSectionAction::PCSA_Set
+ : Sema::PragmaClangSectionAction::PCSA_Clear),
+ SecKind, SecName);
}
}
@@ -2465,6 +2519,129 @@ void PragmaMSPragma::HandlePragma(Preprocessor &PP,
PP.EnterToken(AnnotTok, /*IsReinject*/ false);
}
+/// Handle the \#pragma float_control extension.
+///
+/// The syntax is:
+/// \code
+/// #pragma float_control(keyword[, setting] [,push])
+/// \endcode
+/// Where 'keyword' and 'setting' are identifiers.
+// 'keyword' can be: precise, except, push, pop
+// 'setting' can be: on, off
+/// The optional arguments 'setting' and 'push' are supported only
+/// when the keyword is 'precise' or 'except'.
+void PragmaFloatControlHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &Tok) {
+ Sema::PragmaMsStackAction Action = Sema::PSK_Set;
+ SourceLocation FloatControlLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(FloatControlLoc, diag::err_expected) << tok::l_paren;
+ return;
+ }
+
+ // Read the identifier.
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+
+ // Verify that this is one of the float control options.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ PragmaFloatControlKind Kind =
+ llvm::StringSwitch<PragmaFloatControlKind>(II->getName())
+ .Case("precise", PFC_Precise)
+ .Case("except", PFC_Except)
+ .Case("push", PFC_Push)
+ .Case("pop", PFC_Pop)
+ .Default(PFC_Unknown);
+ PP.Lex(Tok); // the identifier
+ if (Kind == PFC_Unknown) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ } else if (Kind == PFC_Push || Kind == PFC_Pop) {
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ PP.Lex(Tok); // Eat the r_paren
+ Action = (Kind == PFC_Pop) ? Sema::PSK_Pop : Sema::PSK_Push;
+ } else {
+ if (Tok.is(tok::r_paren))
+ // Selecting Precise or Except
+ PP.Lex(Tok); // the r_paren
+ else if (Tok.isNot(tok::comma)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ } else {
+ PP.Lex(Tok); // ,
+ if (!Tok.isAnyIdentifier()) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ StringRef PushOnOff = Tok.getIdentifierInfo()->getName();
+ if (PushOnOff == "on")
+ // Kind is set correctly
+ ;
+ else if (PushOnOff == "off") {
+ if (Kind == PFC_Precise)
+ Kind = PFC_NoPrecise;
+ if (Kind == PFC_Except)
+ Kind = PFC_NoExcept;
+ } else if (PushOnOff == "push") {
+ Action = Sema::PSK_Push_Set;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ PP.Lex(Tok); // the identifier
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok); // ,
+ if (!Tok.isAnyIdentifier()) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ StringRef ExpectedPush = Tok.getIdentifierInfo()->getName();
+ if (ExpectedPush == "push") {
+ Action = Sema::PSK_Push_Set;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ PP.Lex(Tok); // the push identifier
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_float_control_malformed);
+ return;
+ }
+ PP.Lex(Tok); // the r_paren
+ }
+ }
+ SourceLocation EndLoc = Tok.getLocation();
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "float_control";
+ return;
+ }
+
+ // Note: there is no accomodation for PP callback for this pragma.
+
+ // Enter the annotation.
+ auto TokenArray = std::make_unique<Token[]>(1);
+ TokenArray[0].startToken();
+ TokenArray[0].setKind(tok::annot_pragma_float_control);
+ TokenArray[0].setLocation(FloatControlLoc);
+ TokenArray[0].setAnnotationEndLoc(EndLoc);
+ // Create an encoding of Action and Value by shifting the Action into
+ // the high 16 bits then union with the Kind.
+ TokenArray[0].setAnnotationValue(reinterpret_cast<void *>(
+ static_cast<uintptr_t>((Action << 16) | (Kind & 0xFFFF))));
+ PP.EnterTokenStream(std::move(TokenArray), 1,
+ /*DisableMacroExpansion=*/false, /*IsReinject=*/false);
+}
+
/// Handle the Microsoft \#pragma detect_mismatch extension.
///
/// The syntax is:
@@ -2548,7 +2725,7 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
return;
}
- // Verify that this is one of the 5 whitelisted options.
+ // Verify that this is one of the 5 explicitly listed options.
IdentifierInfo *II = Tok.getIdentifierInfo();
PragmaMSCommentKind Kind =
llvm::StringSwitch<PragmaMSCommentKind>(II->getName())
@@ -2589,7 +2766,7 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
// FIXME: If the kind is "compiler" warn if the string is present (it is
// ignored).
// The MSDN docs say that "lib" and "linker" require a string and have a short
- // whitelist of linker options they support, but in practice MSVC doesn't
+ // list of linker options they support, but in practice MSVC doesn't
// issue a diagnostic. Therefore neither does clang.
if (Tok.isNot(tok::r_paren)) {
@@ -2651,7 +2828,7 @@ void PragmaOptimizeHandler::HandlePragma(Preprocessor &PP,
namespace {
/// Used as the annotation value for tok::annot_pragma_fp.
struct TokFPAnnotValue {
- enum FlagKinds { Contract };
+ enum FlagKinds { Contract, Reassociate };
enum FlagValues { On, Off, Fast };
FlagKinds FlagKind;
@@ -2679,6 +2856,7 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
llvm::StringSwitch<llvm::Optional<TokFPAnnotValue::FlagKinds>>(
OptionInfo->getName())
.Case("contract", TokFPAnnotValue::Contract)
+ .Case("reassociate", TokFPAnnotValue::Reassociate)
.Default(None);
if (!FlagKind) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_option)
@@ -2696,7 +2874,8 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
if (Tok.isNot(tok::identifier)) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
- << PP.getSpelling(Tok) << OptionInfo->getName();
+ << PP.getSpelling(Tok) << OptionInfo->getName()
+ << (FlagKind == TokFPAnnotValue::Reassociate);
return;
}
const IdentifierInfo *II = Tok.getIdentifierInfo();
@@ -2709,9 +2888,11 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
.Case("fast", TokFPAnnotValue::Fast)
.Default(llvm::None);
- if (!FlagValue) {
+ if (!FlagValue || (FlagKind == TokFPAnnotValue::Reassociate &&
+ FlagValue == TokFPAnnotValue::Fast)) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
- << PP.getSpelling(Tok) << OptionInfo->getName();
+ << PP.getSpelling(Tok) << OptionInfo->getName()
+ << (FlagKind == TokFPAnnotValue::Reassociate);
return;
}
PP.Lex(Tok);
@@ -2725,7 +2906,7 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
auto *AnnotValue = new (PP.getPreprocessorAllocator())
TokFPAnnotValue{*FlagKind, *FlagValue};
- // Generate the loop hint token.
+ // Generate the fp annotation token.
Token FPTok;
FPTok.startToken();
FPTok.setKind(tok::annot_pragma_fp);
@@ -2753,20 +2934,24 @@ void Parser::HandlePragmaFP() {
auto *AnnotValue =
reinterpret_cast<TokFPAnnotValue *>(Tok.getAnnotationValue());
- LangOptions::FPContractModeKind FPC;
- switch (AnnotValue->FlagValue) {
- case TokFPAnnotValue::On:
- FPC = LangOptions::FPC_On;
- break;
- case TokFPAnnotValue::Fast:
- FPC = LangOptions::FPC_Fast;
- break;
- case TokFPAnnotValue::Off:
- FPC = LangOptions::FPC_Off;
- break;
+ if (AnnotValue->FlagKind == TokFPAnnotValue::Reassociate)
+ Actions.ActOnPragmaFPReassociate(
+ Tok.getLocation(), AnnotValue->FlagValue == TokFPAnnotValue::On);
+ else {
+ LangOptions::FPModeKind FPC;
+ switch (AnnotValue->FlagValue) {
+ case TokFPAnnotValue::Off:
+ FPC = LangOptions::FPM_Off;
+ break;
+ case TokFPAnnotValue::On:
+ FPC = LangOptions::FPM_On;
+ break;
+ case TokFPAnnotValue::Fast:
+ FPC = LangOptions::FPM_Fast;
+ break;
+ }
+ Actions.ActOnPragmaFPContract(Tok.getLocation(), FPC);
}
-
- Actions.ActOnPragmaFPContract(FPC);
ConsumeAnnotationToken();
}
@@ -2914,7 +3099,7 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
Token LoopHintTok;
LoopHintTok.startToken();
LoopHintTok.setKind(tok::annot_pragma_loop_hint);
- LoopHintTok.setLocation(PragmaName.getLocation());
+ LoopHintTok.setLocation(Introducer.Loc);
LoopHintTok.setAnnotationEndLoc(PragmaName.getLocation());
LoopHintTok.setAnnotationValue(static_cast<void *>(Info));
TokenList.push_back(LoopHintTok);
@@ -3001,7 +3186,7 @@ void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
auto TokenArray = std::make_unique<Token[]>(1);
TokenArray[0].startToken();
TokenArray[0].setKind(tok::annot_pragma_loop_hint);
- TokenArray[0].setLocation(PragmaName.getLocation());
+ TokenArray[0].setLocation(Introducer.Loc);
TokenArray[0].setAnnotationEndLoc(PragmaName.getLocation());
TokenArray[0].setAnnotationValue(static_cast<void *>(Info));
PP.EnterTokenStream(std::move(TokenArray), 1,
@@ -3279,3 +3464,64 @@ void PragmaAttributeHandler::HandlePragma(Preprocessor &PP,
PP.EnterTokenStream(std::move(TokenArray), 1,
/*DisableMacroExpansion=*/false, /*IsReinject=*/false);
}
+
+// Handle '#pragma clang max_tokens 12345'.
+void PragmaMaxTokensHereHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &Tok) {
+ PP.Lex(Tok);
+ if (Tok.is(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_missing_argument)
+ << "clang max_tokens_here" << /*Expected=*/true << "integer";
+ return;
+ }
+
+ SourceLocation Loc = Tok.getLocation();
+ uint64_t MaxTokens;
+ if (Tok.isNot(tok::numeric_constant) ||
+ !PP.parseSimpleIntegerLiteral(Tok, MaxTokens)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_expected_integer)
+ << "clang max_tokens_here";
+ return;
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "clang max_tokens_here";
+ return;
+ }
+
+ if (PP.getTokenCount() > MaxTokens) {
+ PP.Diag(Loc, diag::warn_max_tokens)
+ << PP.getTokenCount() << (unsigned)MaxTokens;
+ }
+}
+
+// Handle '#pragma clang max_tokens_total 12345'.
+void PragmaMaxTokensTotalHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &Tok) {
+ PP.Lex(Tok);
+ if (Tok.is(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_missing_argument)
+ << "clang max_tokens_total" << /*Expected=*/true << "integer";
+ return;
+ }
+
+ SourceLocation Loc = Tok.getLocation();
+ uint64_t MaxTokens;
+ if (Tok.isNot(tok::numeric_constant) ||
+ !PP.parseSimpleIntegerLiteral(Tok, MaxTokens)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_expected_integer)
+ << "clang max_tokens_total";
+ return;
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "clang max_tokens_total";
+ return;
+ }
+
+ PP.overrideMaxTokens(MaxTokens, Loc);
+}
diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp
index 0339328ca513..89a6a2b829ae 100644
--- a/clang/lib/Parse/ParseStmt.cpp
+++ b/clang/lib/Parse/ParseStmt.cpp
@@ -105,6 +105,7 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
StmtResult Res = ParseStatementOrDeclarationAfterAttributes(
Stmts, StmtCtx, TrailingElseLoc, Attrs);
+ MaybeDestroyTemplateIds();
assert((Attrs.empty() || Res.isInvalid() || Res.isUsable()) &&
"attributes on empty statement");
@@ -353,13 +354,13 @@ Retry:
case tok::annot_pragma_fp_contract:
ProhibitAttributes(Attrs);
- Diag(Tok, diag::err_pragma_fp_contract_scope);
+ Diag(Tok, diag::err_pragma_file_or_compound_scope) << "fp_contract";
ConsumeAnnotationToken();
return StmtError();
case tok::annot_pragma_fp:
ProhibitAttributes(Attrs);
- Diag(Tok, diag::err_pragma_fp_scope);
+ Diag(Tok, diag::err_pragma_file_or_compound_scope) << "clang fp";
ConsumeAnnotationToken();
return StmtError();
@@ -368,6 +369,12 @@ Retry:
HandlePragmaFEnvAccess();
return StmtEmpty();
+ case tok::annot_pragma_float_control:
+ ProhibitAttributes(Attrs);
+ Diag(Tok, diag::err_pragma_file_or_compound_scope) << "float_control";
+ ConsumeAnnotationToken();
+ return StmtError();
+
case tok::annot_pragma_opencl_extension:
ProhibitAttributes(Attrs);
HandlePragmaOpenCLExtension();
@@ -936,6 +943,9 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
case tok::annot_pragma_fenv_access:
HandlePragmaFEnvAccess();
break;
+ case tok::annot_pragma_float_control:
+ HandlePragmaFloatControl();
+ break;
case tok::annot_pragma_ms_pointers_to_members:
HandlePragmaMSPointersToMembers();
break;
@@ -1014,9 +1024,9 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
Tok.getLocation(),
"in compound statement ('{}')");
- // Record the state of the FP_CONTRACT pragma, restore on leaving the
+ // Record the state of the FPFeatures, restore on leaving the
// compound statement.
- Sema::FPContractStateRAII SaveFPContractState(Actions);
+ Sema::FPFeaturesStateRAII SaveFPContractState(Actions);
InMessageExpressionRAIIObject InMessage(*this, false);
BalancedDelimiterTracker T(*this, tok::l_brace);
@@ -1146,10 +1156,14 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
/// should try to recover harder. It returns false if the condition is
/// successfully parsed. Note that a successful parse can still have semantic
/// errors in the condition.
+/// Additionally, if LParenLoc and RParenLoc are non-null, it will assign
+/// the location of the outer-most '(' and ')', respectively, to them.
bool Parser::ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &Cond,
SourceLocation Loc,
- Sema::ConditionKind CK) {
+ Sema::ConditionKind CK,
+ SourceLocation *LParenLoc,
+ SourceLocation *RParenLoc) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -1179,6 +1193,13 @@ bool Parser::ParseParenExprOrCondition(StmtResult *InitStmt,
// Otherwise the condition is valid or the rparen is present.
T.consumeClose();
+ if (LParenLoc != nullptr) {
+ *LParenLoc = T.getOpenLocation();
+ }
+ if (RParenLoc != nullptr) {
+ *RParenLoc = T.getCloseLocation();
+ }
+
// Check for extraneous ')'s to catch things like "if (foo())) {". We know
// that all callers are looking for a statement after the condition, so ")"
// isn't valid.
@@ -1338,6 +1359,8 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
if (IsConstexpr)
ConstexprCondition = Cond.getKnownValue();
+ bool IsBracedThen = Tok.is(tok::l_brace);
+
// C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
// there is no compound stmt. C90 does not have this clause. We only do this
// if the body isn't a compound statement to avoid push/pop in common cases.
@@ -1356,7 +1379,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
// would have to notify ParseStatement not to create a new scope. It's
// simpler to let it create a new scope.
//
- ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, Tok.is(tok::l_brace));
+ ParseScope InnerScope(this, Scope::DeclScope, C99orCXX, IsBracedThen);
MisleadingIndentationChecker MIChecker(*this, MSK_if, IfLoc);
@@ -1417,7 +1440,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
// Pop the 'else' scope if needed.
InnerScope.Exit();
} else if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteAfterIf(getCurScope());
+ Actions.CodeCompleteAfterIf(getCurScope(), IsBracedThen);
cutOffParsing();
return StmtError();
} else if (InnerStatementTrailingElseLoc.isValid()) {
@@ -1570,8 +1593,10 @@ StmtResult Parser::ParseWhileStatement(SourceLocation *TrailingElseLoc) {
// Parse the condition.
Sema::ConditionResult Cond;
+ SourceLocation LParen;
+ SourceLocation RParen;
if (ParseParenExprOrCondition(nullptr, Cond, WhileLoc,
- Sema::ConditionKind::Boolean))
+ Sema::ConditionKind::Boolean, &LParen, &RParen))
return StmtError();
// C99 6.8.5p5 - In C99, the body of the while statement is a scope, even if
@@ -1601,7 +1626,7 @@ StmtResult Parser::ParseWhileStatement(SourceLocation *TrailingElseLoc) {
if (Cond.isInvalid() || Body.isInvalid())
return StmtError();
- return Actions.ActOnWhileStmt(WhileLoc, Cond, Body.get());
+ return Actions.ActOnWhileStmt(WhileLoc, LParen, Cond, RParen, Body.get());
}
/// ParseDoStatement
@@ -1921,7 +1946,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (ForRangeInfo.ParsedForRangeDecl()) {
Diag(FirstPart.get() ? FirstPart.get()->getBeginLoc()
: ForRangeInfo.ColonLoc,
- getLangOpts().CPlusPlus2a
+ getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_for_range_init_stmt
: diag::ext_for_range_init_stmt)
<< (FirstPart.get() ? FirstPart.get()->getSourceRange()
@@ -2162,6 +2187,8 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
// Create temporary attribute list.
ParsedAttributesWithRange TempAttrs(AttrFactory);
+ SourceLocation StartLoc = Tok.getLocation();
+
// Get loop hints and consume annotated token.
while (Tok.is(tok::annot_pragma_loop_hint)) {
LoopHint Hint;
@@ -2182,6 +2209,12 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
Stmts, StmtCtx, TrailingElseLoc, Attrs);
Attrs.takeAllFrom(TempAttrs);
+
+ // Start of attribute range may already be set for some invalid input.
+ // See PR46336.
+ if (Attrs.Range.getBegin().isInvalid())
+ Attrs.Range.setBegin(StartLoc);
+
return S;
}
diff --git a/clang/lib/Parse/ParseStmtAsm.cpp b/clang/lib/Parse/ParseStmtAsm.cpp
index ea2c871d6a82..7d0818840a4f 100644
--- a/clang/lib/Parse/ParseStmtAsm.cpp
+++ b/clang/lib/Parse/ParseStmtAsm.cpp
@@ -220,9 +220,10 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
// Parse an optional scope-specifier if we're in C++.
CXXScopeSpec SS;
- if (getLangOpts().CPlusPlus) {
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false);
- }
+ if (getLangOpts().CPlusPlus)
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false);
// Require an identifier here.
SourceLocation TemplateKWLoc;
@@ -233,12 +234,13 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
Result = ParseCXXThis();
Invalid = false;
} else {
- Invalid = ParseUnqualifiedId(SS,
- /*EnteringContext=*/false,
- /*AllowDestructorName=*/false,
- /*AllowConstructorName=*/false,
- /*AllowDeductionGuide=*/false,
- /*ObjectType=*/nullptr, &TemplateKWLoc, Id);
+ Invalid =
+ ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/false,
+ /*AllowConstructorName=*/false,
+ /*AllowDeductionGuide=*/false, &TemplateKWLoc, Id);
// Perform the lookup.
Result = Actions.LookupInlineAsmIdentifier(SS, TemplateKWLoc, Id,
IsUnevaluatedContext);
@@ -349,31 +351,13 @@ static bool buildMSAsmString(Preprocessor &PP, SourceLocation AsmLoc,
return false;
}
-/// isTypeQualifier - Return true if the current token could be the
-/// start of a type-qualifier-list.
-static bool isTypeQualifier(const Token &Tok) {
- switch (Tok.getKind()) {
- default: return false;
- // type-qualifier
- case tok::kw_const:
- case tok::kw_volatile:
- case tok::kw_restrict:
- case tok::kw___private:
- case tok::kw___local:
- case tok::kw___global:
- case tok::kw___constant:
- case tok::kw___generic:
- case tok::kw___read_only:
- case tok::kw___read_write:
- case tok::kw___write_only:
- return true;
- }
+// Determine if this is a GCC-style asm statement.
+bool Parser::isGCCAsmStatement(const Token &TokAfterAsm) const {
+ return TokAfterAsm.is(tok::l_paren) || isGNUAsmQualifier(TokAfterAsm);
}
-// Determine if this is a GCC-style asm statement.
-static bool isGCCAsmStatement(const Token &TokAfterAsm) {
- return TokAfterAsm.is(tok::l_paren) || TokAfterAsm.is(tok::kw_goto) ||
- isTypeQualifier(TokAfterAsm);
+bool Parser::isGNUAsmQualifier(const Token &TokAfterAsm) const {
+ return getGNUAsmQualifier(TokAfterAsm) != GNUAsmQualifiers::AQ_unspecified;
}
/// ParseMicrosoftAsmStatement. When -fms-extensions/-fasm-blocks is enabled,
@@ -631,8 +615,8 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
// Change to the Intel dialect.
Parser->setAssemblerDialect(1);
Parser->setTargetParser(*TargetParser.get());
- Parser->setParsingInlineAsm(true);
- TargetParser->setParsingInlineAsm(true);
+ Parser->setParsingMSInlineAsm(true);
+ TargetParser->setParsingMSInlineAsm(true);
ClangAsmParserCallback Callback(*this, AsmLoc, AsmString, AsmToks,
TokOffsets);
@@ -684,13 +668,41 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
ClobberRefs, Exprs, EndLoc);
}
+/// parseGNUAsmQualifierListOpt - Parse a GNU extended asm qualifier list.
+/// asm-qualifier:
+/// volatile
+/// inline
+/// goto
+///
+/// asm-qualifier-list:
+/// asm-qualifier
+/// asm-qualifier-list asm-qualifier
+bool Parser::parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ) {
+ while (1) {
+ const GNUAsmQualifiers::AQ A = getGNUAsmQualifier(Tok);
+ if (A == GNUAsmQualifiers::AQ_unspecified) {
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok.getLocation(), diag::err_asm_qualifier_ignored);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return true;
+ }
+ return false;
+ }
+ if (AQ.setAsmQualifier(A))
+ Diag(Tok.getLocation(), diag::err_asm_duplicate_qual)
+ << GNUAsmQualifiers::getQualifierName(A);
+ ConsumeToken();
+ }
+ return false;
+}
+
/// ParseAsmStatement - Parse a GNU extended asm statement.
/// asm-statement:
/// gnu-asm-statement
/// ms-asm-statement
///
/// [GNU] gnu-asm-statement:
-/// 'asm' type-qualifier[opt] '(' asm-argument ')' ';'
+/// 'asm' asm-qualifier-list[opt] '(' asm-argument ')' ';'
///
/// [GNU] asm-argument:
/// asm-string-literal
@@ -712,34 +724,14 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
return ParseMicrosoftAsmStatement(AsmLoc);
}
- DeclSpec DS(AttrFactory);
SourceLocation Loc = Tok.getLocation();
- ParseTypeQualifierListOpt(DS, AR_VendorAttributesParsed);
-
- // GNU asms accept, but warn, about type-qualifiers other than volatile.
- if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
- Diag(Loc, diag::warn_asm_qualifier_ignored) << "const";
- if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
- Diag(Loc, diag::warn_asm_qualifier_ignored) << "restrict";
- // FIXME: Once GCC supports _Atomic, check whether it permits it here.
- if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
- Diag(Loc, diag::warn_asm_qualifier_ignored) << "_Atomic";
-
- // Remember if this was a volatile asm.
- bool isVolatile = DS.getTypeQualifiers() & DeclSpec::TQ_volatile;
- // Remember if this was a goto asm.
- bool isGotoAsm = false;
-
- if (Tok.is(tok::kw_goto)) {
- isGotoAsm = true;
- ConsumeToken();
- }
-
- if (Tok.isNot(tok::l_paren)) {
- Diag(Tok, diag::err_expected_lparen_after) << "asm";
- SkipUntil(tok::r_paren, StopAtSemi);
+ GNUAsmQualifiers GAQ;
+ if (parseGNUAsmQualifierListOpt(GAQ))
return StmtError();
- }
+
+ if (GAQ.isGoto() && getLangOpts().SpeculativeLoadHardening)
+ Diag(Loc, diag::warn_slh_does_not_support_asm_goto);
+
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -767,11 +759,10 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
if (Tok.is(tok::r_paren)) {
// We have a simple asm expression like 'asm("foo")'.
T.consumeClose();
- return Actions.ActOnGCCAsmStmt(AsmLoc, /*isSimple*/ true, isVolatile,
- /*NumOutputs*/ 0, /*NumInputs*/ 0, nullptr,
- Constraints, Exprs, AsmString.get(),
- Clobbers, /*NumLabels*/ 0,
- T.getCloseLocation());
+ return Actions.ActOnGCCAsmStmt(
+ AsmLoc, /*isSimple*/ true, GAQ.isVolatile(),
+ /*NumOutputs*/ 0, /*NumInputs*/ 0, nullptr, Constraints, Exprs,
+ AsmString.get(), Clobbers, /*NumLabels*/ 0, T.getCloseLocation());
}
// Parse Outputs, if present.
@@ -781,12 +772,6 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
AteExtraColon = Tok.is(tok::coloncolon);
ConsumeToken();
- if (!AteExtraColon && isGotoAsm && Tok.isNot(tok::colon)) {
- Diag(Tok, diag::err_asm_goto_cannot_have_output);
- SkipUntil(tok::r_paren, StopAtSemi);
- return StmtError();
- }
-
if (!AteExtraColon && ParseAsmOperandsOpt(Names, Constraints, Exprs))
return StmtError();
}
@@ -835,7 +820,7 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
}
}
}
- if (!isGotoAsm && (Tok.isNot(tok::r_paren) || AteExtraColon)) {
+ if (!GAQ.isGoto() && (Tok.isNot(tok::r_paren) || AteExtraColon)) {
Diag(Tok, diag::err_expected) << tok::r_paren;
SkipUntil(tok::r_paren, StopAtSemi);
return StmtError();
@@ -868,16 +853,16 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
if (!TryConsumeToken(tok::comma))
break;
}
- } else if (isGotoAsm) {
+ } else if (GAQ.isGoto()) {
Diag(Tok, diag::err_expected) << tok::colon;
SkipUntil(tok::r_paren, StopAtSemi);
return StmtError();
}
T.consumeClose();
- return Actions.ActOnGCCAsmStmt(
- AsmLoc, false, isVolatile, NumOutputs, NumInputs, Names.data(),
- Constraints, Exprs, AsmString.get(), Clobbers, NumLabels,
- T.getCloseLocation());
+ return Actions.ActOnGCCAsmStmt(AsmLoc, false, GAQ.isVolatile(), NumOutputs,
+ NumInputs, Names.data(), Constraints, Exprs,
+ AsmString.get(), Clobbers, NumLabels,
+ T.getCloseLocation());
}
/// ParseAsmOperands - Parse the asm-operands production as used by
@@ -948,3 +933,28 @@ bool Parser::ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
return false;
}
}
+
+const char *Parser::GNUAsmQualifiers::getQualifierName(AQ Qualifier) {
+ switch (Qualifier) {
+ case AQ_volatile: return "volatile";
+ case AQ_inline: return "inline";
+ case AQ_goto: return "goto";
+ case AQ_unspecified: return "unspecified";
+ }
+ llvm_unreachable("Unknown GNUAsmQualifier");
+}
+
+Parser::GNUAsmQualifiers::AQ
+Parser::getGNUAsmQualifier(const Token &Tok) const {
+ switch (Tok.getKind()) {
+ case tok::kw_volatile: return GNUAsmQualifiers::AQ_volatile;
+ case tok::kw_inline: return GNUAsmQualifiers::AQ_inline;
+ case tok::kw_goto: return GNUAsmQualifiers::AQ_goto;
+ default: return GNUAsmQualifiers::AQ_unspecified;
+ }
+}
+bool Parser::GNUAsmQualifiers::setAsmQualifier(AQ Qualifier) {
+ bool IsDuplicate = Qualifiers & Qualifier;
+ Qualifiers |= Qualifier;
+ return IsDuplicate;
+}
diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp
index 1b9301b6591d..3ef73f579123 100644
--- a/clang/lib/Parse/ParseTemplate.cpp
+++ b/clang/lib/Parse/ParseTemplate.cpp
@@ -22,6 +22,16 @@
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
+/// Re-enter a possible template scope, creating as many template parameter
+/// scopes as necessary.
+/// \return The number of template parameter scopes entered.
+unsigned Parser::ReenterTemplateScopes(MultiParseScope &S, Decl *D) {
+ return Actions.ActOnReenterTemplateScope(D, [&] {
+ S.Enter(Scope::TemplateParamScope);
+ return Actions.getCurScope();
+ });
+}
+
/// Parse a template declaration, explicit instantiation, or
/// explicit specialization.
Decl *Parser::ParseDeclarationStartingWithTemplate(
@@ -67,8 +77,7 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
assert(Tok.isOneOf(tok::kw_export, tok::kw_template) &&
"Token does not start a template declaration.");
- // Enter template-parameter scope.
- ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
+ MultiParseScope TemplateParamScopes(*this);
// Tell the action that names should be checked in the context of
// the declaration to come.
@@ -116,7 +125,8 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
// Parse the '<' template-parameter-list '>'
SourceLocation LAngleLoc, RAngleLoc;
SmallVector<NamedDecl*, 4> TemplateParams;
- if (ParseTemplateParameters(CurTemplateDepthTracker.getDepth(),
+ if (ParseTemplateParameters(TemplateParamScopes,
+ CurTemplateDepthTracker.getDepth(),
TemplateParams, LAngleLoc, RAngleLoc)) {
// Skip until the semi-colon or a '}'.
SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
@@ -150,9 +160,6 @@ Decl *Parser::ParseTemplateDeclarationOrSpecialization(
TemplateParams, RAngleLoc, OptionalRequiresClauseConstraintER.get()));
} while (Tok.isOneOf(tok::kw_export, tok::kw_template));
- unsigned NewFlags = getCurScope()->getFlags() & ~Scope::TemplateParamScope;
- ParseScopeFlags TemplateScopeFlags(this, NewFlags, isSpecialization);
-
// Parse the actual template declaration.
if (Tok.is(tok::kw_concept))
return ParseConceptDefinition(
@@ -240,6 +247,8 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
// Parse the declarator.
ParsingDeclarator DeclaratorInfo(*this, DS, (DeclaratorContext)Context);
+ if (TemplateInfo.TemplateParams)
+ DeclaratorInfo.setTemplateParameterLists(*TemplateInfo.TemplateParams);
ParseDeclarator(DeclaratorInfo);
// Error parsing the declarator?
if (!DeclaratorInfo.hasName()) {
@@ -251,9 +260,9 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
}
llvm::TimeTraceScope TimeScope("ParseTemplate", [&]() {
- return DeclaratorInfo.getIdentifier() != nullptr
- ? DeclaratorInfo.getIdentifier()->getName()
- : "<unknown>";
+ return std::string(DeclaratorInfo.getIdentifier() != nullptr
+ ? DeclaratorInfo.getIdentifier()->getName()
+ : "<unknown>");
});
LateParsedAttrList LateParsedAttrs(true);
@@ -361,9 +370,11 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
DiagnoseAndSkipCXX11Attributes();
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
- /*EnteringContext=*/false, /*MayBePseudoDestructor=*/nullptr,
- /*IsTypename=*/false, /*LastII=*/nullptr, /*OnlyNamespace=*/true) ||
+ if (ParseOptionalCXXScopeSpecifier(
+ SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext=*/false,
+ /*MayBePseudoDestructor=*/nullptr,
+ /*IsTypename=*/false, /*LastII=*/nullptr, /*OnlyNamespace=*/true) ||
SS.isInvalid()) {
SkipUntil(tok::semi);
return nullptr;
@@ -374,12 +385,12 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
diag::err_concept_definition_not_identifier);
UnqualifiedId Result;
- if (ParseUnqualifiedId(SS, /*EnteringContext=*/false,
+ if (ParseUnqualifiedId(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext=*/false,
/*AllowDestructorName=*/false,
/*AllowConstructorName=*/false,
/*AllowDeductionGuide=*/false,
- /*ObjectType=*/ParsedType(), /*TemplateKWLoc=*/nullptr,
- Result)) {
+ /*TemplateKWLoc=*/nullptr, Result)) {
SkipUntil(tok::semi);
return nullptr;
}
@@ -426,8 +437,9 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
///
/// \returns true if an error occurred, false otherwise.
bool Parser::ParseTemplateParameters(
- unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams,
- SourceLocation &LAngleLoc, SourceLocation &RAngleLoc) {
+ MultiParseScope &TemplateScopes, unsigned Depth,
+ SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc,
+ SourceLocation &RAngleLoc) {
// Get the template parameter list.
if (!TryConsumeToken(tok::less, LAngleLoc)) {
Diag(Tok.getLocation(), diag::err_expected_less_after) << "template";
@@ -436,8 +448,11 @@ bool Parser::ParseTemplateParameters(
// Try to parse the template parameter list.
bool Failed = false;
- if (!Tok.is(tok::greater) && !Tok.is(tok::greatergreater))
+ // FIXME: Missing greatergreatergreater support.
+ if (!Tok.is(tok::greater) && !Tok.is(tok::greatergreater)) {
+ TemplateScopes.Enter(Scope::TemplateParamScope);
Failed = ParseTemplateParameterList(Depth, TemplateParams);
+ }
if (Tok.is(tok::greatergreater)) {
// No diagnostic required here: a template-parameter-list can only be
@@ -499,10 +514,7 @@ Parser::ParseTemplateParameterList(const unsigned Depth,
/// Determine whether the parser is at the start of a template
/// type parameter.
-/// \param ScopeError will receive true if there was an error parsing a
-/// scope specifier at the current location.
-bool Parser::isStartOfTemplateTypeParameter(bool &ScopeError) {
- ScopeError = false;
+Parser::TPResult Parser::isStartOfTemplateTypeParameter() {
if (Tok.is(tok::kw_class)) {
// "class" may be the start of an elaborated-type-specifier or a
// type-parameter. Per C++ [temp.param]p3, we prefer the type-parameter.
@@ -512,7 +524,7 @@ bool Parser::isStartOfTemplateTypeParameter(bool &ScopeError) {
case tok::greater:
case tok::greatergreater:
case tok::ellipsis:
- return true;
+ return TPResult::True;
case tok::identifier:
// This may be either a type-parameter or an elaborated-type-specifier.
@@ -520,7 +532,7 @@ bool Parser::isStartOfTemplateTypeParameter(bool &ScopeError) {
break;
default:
- return false;
+ return TPResult::False;
}
switch (GetLookAheadToken(2).getKind()) {
@@ -528,51 +540,28 @@ bool Parser::isStartOfTemplateTypeParameter(bool &ScopeError) {
case tok::comma:
case tok::greater:
case tok::greatergreater:
- return true;
+ return TPResult::True;
default:
- return false;
+ return TPResult::False;
}
}
- bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
- CXXScopeSpec SS;
- ScopeError =
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
- /*EnteringContext=*/false,
- /*MayBePseudoDestructor=*/nullptr,
- // If this is not a type-constraint, then
- // this scope-spec is part of the typename
- // of a non-type template parameter
- /*IsTypename=*/true, /*LastII=*/nullptr,
- // We won't find concepts in
- // non-namespaces anyway, so might as well
- // parse this correctly for possible type
- // names.
- /*OnlyNamespace=*/false);
- if (ScopeError)
- return false;
- if (TryAnnotateTypeConstraint(SS))
- return false;
- bool IsTypeConstraint = isTypeConstraintAnnotation();
- if (!IsTypeConstraint && SS.isNotEmpty()) {
- // This isn't a type-constraint but we've already parsed this scope
- // specifier - annotate it.
- AnnotateScopeToken(SS, /*isNewAnnotation=*/!WasScopeAnnotation);
- return false;
- }
+ if (TryAnnotateTypeConstraint())
+ return TPResult::Error;
- if (IsTypeConstraint &&
+ if (isTypeConstraintAnnotation() &&
// Next token might be 'auto' or 'decltype', indicating that this
// type-constraint is in fact part of a placeholder-type-specifier of a
// non-type template parameter.
- !NextToken().isOneOf(tok::kw_auto, tok::kw_decltype))
- return true;
+ !GetLookAheadToken(Tok.is(tok::annot_cxxscope) ? 2 : 1)
+ .isOneOf(tok::kw_auto, tok::kw_decltype))
+ return TPResult::True;
// 'typedef' is a reasonably-common typo/thinko for 'typename', and is
// ill-formed otherwise.
if (Tok.isNot(tok::kw_typename) && Tok.isNot(tok::kw_typedef))
- return false;
+ return TPResult::False;
// C++ [temp.param]p2:
// There is no semantic difference between class and typename in a
@@ -592,17 +581,17 @@ bool Parser::isStartOfTemplateTypeParameter(bool &ScopeError) {
case tok::greater:
case tok::greatergreater:
case tok::ellipsis:
- return true;
+ return TPResult::True;
case tok::kw_typename:
case tok::kw_typedef:
case tok::kw_class:
// These indicate that a comma was missed after a type parameter, not that
// we have found a non-type parameter.
- return true;
+ return TPResult::True;
default:
- return false;
+ return TPResult::False;
}
}
@@ -627,13 +616,9 @@ bool Parser::isStartOfTemplateTypeParameter(bool &ScopeError) {
/// typename
///
NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
- // We could be facing a type-constraint, which (could) start a type parameter.
- // Annotate it now (we might end up not using it if we determine this
- // type-constraint is in fact part of a placeholder-type-specifier of a
- // non-type template parameter.
- bool ScopeError;
- if (isStartOfTemplateTypeParameter(ScopeError)) {
+ switch (isStartOfTemplateTypeParameter()) {
+ case TPResult::True:
// Is there just a typo in the input code? ('typedef' instead of
// 'typename')
if (Tok.is(tok::kw_typedef)) {
@@ -649,8 +634,10 @@ NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
}
return ParseTypeParameter(Depth, Position);
- }
- if (ScopeError) {
+ case TPResult::False:
+ break;
+
+ case TPResult::Error: {
// We return an invalid parameter as opposed to null to avoid having bogus
// diagnostics about an empty template parameter list.
// FIXME: Fix ParseTemplateParameterList to better handle nullptr results
@@ -670,6 +657,11 @@ NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
StopAtSemi | StopBeforeMatch);
return ErrorParam;
}
+
+ case TPResult::Ambiguous:
+ llvm_unreachable("template param classification can't be ambiguous");
+ }
+
if (Tok.is(tok::kw_template))
return ParseTemplateTemplateParameter(Depth, Position);
@@ -682,15 +674,15 @@ NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
/// Check whether the current token is a template-id annotation denoting a
/// type-constraint.
bool Parser::isTypeConstraintAnnotation() {
- if (Tok.isNot(tok::annot_template_id))
+ const Token &T = Tok.is(tok::annot_cxxscope) ? NextToken() : Tok;
+ if (T.isNot(tok::annot_template_id))
return false;
const auto *ExistingAnnot =
- static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ static_cast<TemplateIdAnnotation *>(T.getAnnotationValue());
return ExistingAnnot->Kind == TNK_Concept_template;
}
-/// Try parsing a type-constraint construct at the current location, after the
-/// optional scope specifier.
+/// Try parsing a type-constraint at the current location.
///
/// type-constraint:
/// nested-name-specifier[opt] concept-name
@@ -698,35 +690,62 @@ bool Parser::isTypeConstraintAnnotation() {
/// '<' template-argument-list[opt] '>'[opt]
///
/// \returns true if an error occurred, and false otherwise.
-bool Parser::TryAnnotateTypeConstraint(CXXScopeSpec &SS) {
- if (!getLangOpts().ConceptsTS || Tok.isNot(tok::identifier))
+bool Parser::TryAnnotateTypeConstraint() {
+ if (!getLangOpts().CPlusPlus20)
return false;
+ CXXScopeSpec SS;
+ bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext=*/false,
+ /*MayBePseudoDestructor=*/nullptr,
+ // If this is not a type-constraint, then
+ // this scope-spec is part of the typename
+ // of a non-type template parameter
+ /*IsTypename=*/true, /*LastII=*/nullptr,
+ // We won't find concepts in
+ // non-namespaces anyway, so might as well
+ // parse this correctly for possible type
+ // names.
+ /*OnlyNamespace=*/false))
+ return true;
- UnqualifiedId PossibleConceptName;
- PossibleConceptName.setIdentifier(Tok.getIdentifierInfo(),
- Tok.getLocation());
-
- TemplateTy PossibleConcept;
- bool MemberOfUnknownSpecialization = false;
- auto TNK = Actions.isTemplateName(getCurScope(), SS,
- /*hasTemplateKeyword=*/false,
- PossibleConceptName,
- /*ObjectType=*/ParsedType(),
- /*EnteringContext=*/false,
- PossibleConcept,
- MemberOfUnknownSpecialization);
- assert(!MemberOfUnknownSpecialization
- && "Member when we only allowed namespace scope qualifiers??");
- if (!PossibleConcept || TNK != TNK_Concept_template)
- return false;
+ if (Tok.is(tok::identifier)) {
+ UnqualifiedId PossibleConceptName;
+ PossibleConceptName.setIdentifier(Tok.getIdentifierInfo(),
+ Tok.getLocation());
+
+ TemplateTy PossibleConcept;
+ bool MemberOfUnknownSpecialization = false;
+ auto TNK = Actions.isTemplateName(getCurScope(), SS,
+ /*hasTemplateKeyword=*/false,
+ PossibleConceptName,
+ /*ObjectType=*/ParsedType(),
+ /*EnteringContext=*/false,
+ PossibleConcept,
+ MemberOfUnknownSpecialization,
+ /*Disambiguation=*/true);
+ if (MemberOfUnknownSpecialization || !PossibleConcept ||
+ TNK != TNK_Concept_template) {
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
+ return false;
+ }
- // At this point we're sure we're dealing with a constrained parameter. It
- // may or may not have a template parameter list following the concept name.
- return AnnotateTemplateIdToken(PossibleConcept, TNK, SS,
- /*TemplateKWLoc=*/SourceLocation(),
- PossibleConceptName,
- /*AllowTypeAnnotation=*/false,
- /*TypeConstraint=*/true);
+ // At this point we're sure we're dealing with a constrained parameter. It
+ // may or may not have a template parameter list following the concept
+ // name.
+ if (AnnotateTemplateIdToken(PossibleConcept, TNK, SS,
+ /*TemplateKWLoc=*/SourceLocation(),
+ PossibleConceptName,
+ /*AllowTypeAnnotation=*/false,
+ /*TypeConstraint=*/true))
+ return true;
+ }
+
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
+ return false;
}
/// ParseTypeParameter - Parse a template type parameter (C++ [temp.param]).
@@ -739,13 +758,18 @@ bool Parser::TryAnnotateTypeConstraint(CXXScopeSpec &SS) {
/// 'typename' ...[opt][C++0x] identifier[opt]
/// 'typename' identifier[opt] '=' type-id
NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
- assert(Tok.isOneOf(tok::kw_class, tok::kw_typename, tok::annot_template_id) &&
+ assert((Tok.isOneOf(tok::kw_class, tok::kw_typename) ||
+ isTypeConstraintAnnotation()) &&
"A type-parameter starts with 'class', 'typename' or a "
"type-constraint");
+ CXXScopeSpec TypeConstraintSS;
TemplateIdAnnotation *TypeConstraint = nullptr;
bool TypenameKeyword = false;
SourceLocation KeyLoc;
+ ParseOptionalCXXScopeSpecifier(TypeConstraintSS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext*/ false);
if (Tok.is(tok::annot_template_id)) {
// Consume the 'type-constraint'.
TypeConstraint =
@@ -754,6 +778,9 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
"stray non-concept template-id annotation");
KeyLoc = ConsumeAnnotationToken();
} else {
+ assert(TypeConstraintSS.isEmpty() &&
+ "expected type constraint after scope specifier");
+
// Consume the 'class' or 'typename' keyword.
TypenameKeyword = Tok.is(tok::kw_typename);
KeyLoc = ConsumeToken();
@@ -795,7 +822,8 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
ParsedType DefaultArg;
if (TryConsumeToken(tok::equal, EqualLoc))
DefaultArg = ParseTypeName(/*Range=*/nullptr,
- DeclaratorContext::TemplateTypeArgContext).get();
+ DeclaratorContext::TemplateTypeArgContext)
+ .get();
NamedDecl *NewDecl = Actions.ActOnTypeParameter(getCurScope(),
TypenameKeyword, EllipsisLoc,
@@ -804,10 +832,11 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
DefaultArg,
TypeConstraint != nullptr);
- if (TypeConstraint)
- Actions.ActOnTypeConstraint(TypeConstraint,
+ if (TypeConstraint) {
+ Actions.ActOnTypeConstraint(TypeConstraintSS, TypeConstraint,
cast<TemplateTypeParmDecl>(NewDecl),
EllipsisLoc);
+ }
return NewDecl;
}
@@ -832,9 +861,9 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
SmallVector<NamedDecl*,8> TemplateParams;
SourceLocation LAngleLoc, RAngleLoc;
{
- ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
- if (ParseTemplateParameters(Depth + 1, TemplateParams, LAngleLoc,
- RAngleLoc)) {
+ MultiParseScope TemplateParmScope(*this);
+ if (ParseTemplateParameters(TemplateParmScope, Depth + 1, TemplateParams,
+ LAngleLoc, RAngleLoc)) {
return nullptr;
}
}
@@ -972,7 +1001,7 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
// Create the parameter.
return Actions.ActOnNonTypeTemplateParameter(getCurScope(), ParamDecl,
- Depth, Position, EqualLoc,
+ Depth, Position, EqualLoc,
DefaultArg.get());
}
@@ -1013,7 +1042,8 @@ void Parser::DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
/// or argument list.
///
/// \returns true, if current token does not start with '>', false otherwise.
-bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
+bool Parser::ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
+ SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList) {
// What will be left once we've consumed the '>'.
@@ -1023,7 +1053,8 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
switch (Tok.getKind()) {
default:
- Diag(Tok.getLocation(), diag::err_expected) << tok::greater;
+ Diag(getEndOfPreviousToken(), diag::err_expected) << tok::greater;
+ Diag(LAngleLoc, diag::note_matching) << tok::less;
return true;
case tok::greater:
@@ -1202,16 +1233,17 @@ Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
if (Invalid) {
// Try to find the closing '>'.
- if (ConsumeLastToken)
- SkipUntil(tok::greater, StopAtSemi);
+ if (getLangOpts().CPlusPlus11)
+ SkipUntil(tok::greater, tok::greatergreater,
+ tok::greatergreatergreater, StopAtSemi | StopBeforeMatch);
else
SkipUntil(tok::greater, StopAtSemi | StopBeforeMatch);
- return true;
}
}
- return ParseGreaterThanInTemplateList(RAngleLoc, ConsumeLastToken,
- /*ObjCGenericList=*/false);
+ return ParseGreaterThanInTemplateList(LAngleLoc, RAngleLoc, ConsumeLastToken,
+ /*ObjCGenericList=*/false) ||
+ Invalid;
}
/// Replace the tokens that form a simple-template-id with an
@@ -1262,12 +1294,13 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
bool AllowTypeAnnotation,
bool TypeConstraint) {
assert(getLangOpts().CPlusPlus && "Can only annotate template-ids in C++");
- assert(Template && (Tok.is(tok::less) || TypeConstraint) &&
+ assert((Tok.is(tok::less) || TypeConstraint) &&
"Parser isn't at the beginning of a template-id");
assert(!(TypeConstraint && AllowTypeAnnotation) && "type-constraint can't be "
"a type annotation");
assert((!TypeConstraint || TNK == TNK_Concept_template) && "type-constraint "
"must accompany a concept name");
+ assert((Template || TNK == TNK_Non_template) && "missing template name");
// Consume the template-name.
SourceLocation TemplateNameLoc = TemplateName.getSourceRange().getBegin();
@@ -1275,40 +1308,31 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
// Parse the enclosed template argument list.
SourceLocation LAngleLoc, RAngleLoc;
TemplateArgList TemplateArgs;
+ bool ArgsInvalid = false;
if (!TypeConstraint || Tok.is(tok::less)) {
- bool Invalid = ParseTemplateIdAfterTemplateName(false, LAngleLoc,
- TemplateArgs,
- RAngleLoc);
-
- if (Invalid) {
- // If we failed to parse the template ID but skipped ahead to a >, we're not
- // going to be able to form a token annotation. Eat the '>' if present.
- TryConsumeToken(tok::greater);
- // FIXME: Annotate the token stream so we don't produce the same errors
- // again if we're doing this annotation as part of a tentative parse.
+ ArgsInvalid = ParseTemplateIdAfterTemplateName(false, LAngleLoc,
+ TemplateArgs, RAngleLoc);
+ // If we couldn't recover from invalid arguments, don't form an annotation
+ // token -- we don't know how much to annotate.
+ // FIXME: This can lead to duplicate diagnostics if we retry parsing this
+ // template-id in another context. Try to annotate anyway?
+ if (RAngleLoc.isInvalid())
return true;
- }
}
ASTTemplateArgsPtr TemplateArgsPtr(TemplateArgs);
// Build the annotation token.
if (TNK == TNK_Type_template && AllowTypeAnnotation) {
- TypeResult Type = Actions.ActOnTemplateIdType(
- getCurScope(), SS, TemplateKWLoc, Template, TemplateName.Identifier,
- TemplateNameLoc, LAngleLoc, TemplateArgsPtr, RAngleLoc);
- if (Type.isInvalid()) {
- // If we failed to parse the template ID but skipped ahead to a >, we're
- // not going to be able to form a token annotation. Eat the '>' if
- // present.
- TryConsumeToken(tok::greater);
- // FIXME: Annotate the token stream so we don't produce the same errors
- // again if we're doing this annotation as part of a tentative parse.
- return true;
- }
+ TypeResult Type = ArgsInvalid
+ ? TypeError()
+ : Actions.ActOnTemplateIdType(
+ getCurScope(), SS, TemplateKWLoc, Template,
+ TemplateName.Identifier, TemplateNameLoc,
+ LAngleLoc, TemplateArgsPtr, RAngleLoc);
Tok.setKind(tok::annot_typename);
- setTypeAnnotation(Tok, Type.get());
+ setTypeAnnotation(Tok, Type);
if (SS.isNotEmpty())
Tok.setLocation(SS.getBeginLoc());
else if (TemplateKWLoc.isValid())
@@ -1331,8 +1355,8 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
: TemplateName.OperatorFunctionId.Operator;
TemplateIdAnnotation *TemplateId = TemplateIdAnnotation::Create(
- SS, TemplateKWLoc, TemplateNameLoc, TemplateII, OpKind, Template, TNK,
- LAngleLoc, RAngleLoc, TemplateArgs, TemplateIds);
+ TemplateKWLoc, TemplateNameLoc, TemplateII, OpKind, Template, TNK,
+ LAngleLoc, RAngleLoc, TemplateArgs, ArgsInvalid, TemplateIds);
Tok.setAnnotationValue(TemplateId);
if (TemplateKWLoc.isValid())
@@ -1357,39 +1381,37 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
/// a type annotation token will still be created, but will have a
/// NULL type pointer to signify an error.
///
+/// \param SS The scope specifier appearing before the template-id, if any.
+///
/// \param IsClassName Is this template-id appearing in a context where we
/// know it names a class, such as in an elaborated-type-specifier or
/// base-specifier? ('typename' and 'template' are unneeded and disallowed
/// in those contexts.)
-void Parser::AnnotateTemplateIdTokenAsType(bool IsClassName) {
+void Parser::AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
+ bool IsClassName) {
assert(Tok.is(tok::annot_template_id) && "Requires template-id tokens");
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
- assert((TemplateId->Kind == TNK_Type_template ||
- TemplateId->Kind == TNK_Dependent_template_name ||
- TemplateId->Kind == TNK_Undeclared_template) &&
+ assert(TemplateId->mightBeType() &&
"Only works for type and dependent templates");
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- TypeResult Type
- = Actions.ActOnTemplateIdType(getCurScope(),
- TemplateId->SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->Name,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc,
- /*IsCtorOrDtorName*/false,
- IsClassName);
+ TypeResult Type =
+ TemplateId->isInvalid()
+ ? TypeError()
+ : Actions.ActOnTemplateIdType(
+ getCurScope(), SS, TemplateId->TemplateKWLoc,
+ TemplateId->Template, TemplateId->Name,
+ TemplateId->TemplateNameLoc, TemplateId->LAngleLoc,
+ TemplateArgsPtr, TemplateId->RAngleLoc,
+ /*IsCtorOrDtorName*/ false, IsClassName);
// Create the new "type" annotation token.
Tok.setKind(tok::annot_typename);
- setTypeAnnotation(Tok, Type.isInvalid() ? nullptr : Type.get());
- if (TemplateId->SS.isNotEmpty()) // it was a C++ qualified type name.
- Tok.setLocation(TemplateId->SS.getBeginLoc());
+ setTypeAnnotation(Tok, Type);
+ if (SS.isNotEmpty()) // it was a C++ qualified type name.
+ Tok.setLocation(SS.getBeginLoc());
// End location stays the same
// Replace the template-id annotation token, and possible the scope-specifier
@@ -1399,7 +1421,9 @@ void Parser::AnnotateTemplateIdTokenAsType(bool IsClassName) {
/// Determine whether the given token can end a template argument.
static bool isEndOfTemplateArgument(Token Tok) {
- return Tok.isOneOf(tok::comma, tok::greater, tok::greatergreater);
+ // FIXME: Handle '>>>'.
+ return Tok.isOneOf(tok::comma, tok::greater, tok::greatergreater,
+ tok::greatergreatergreater);
}
/// Parse a C++ template template argument.
@@ -1420,7 +1444,8 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
// followed by a token that terminates a template argument, such as ',',
// '>', or (in some cases) '>>'.
CXXScopeSpec SS; // nested-name-specifier, if present
- ParseOptionalCXXScopeSpecifier(SS, nullptr,
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
ParsedTemplateArgument Result;
@@ -1438,15 +1463,14 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
TryConsumeToken(tok::ellipsis, EllipsisLoc);
- // If the next token signals the end of a template argument,
- // then we have a dependent template name that could be a template
- // template argument.
+ // If the next token signals the end of a template argument, then we have
+ // a (possibly-dependent) template name that could be a template template
+ // argument.
TemplateTy Template;
if (isEndOfTemplateArgument(Tok) &&
- Actions.ActOnDependentTemplateName(
- getCurScope(), SS, TemplateKWLoc, Name,
- /*ObjectType=*/nullptr,
- /*EnteringContext=*/false, Template))
+ Actions.ActOnTemplateName(getCurScope(), SS, TemplateKWLoc, Name,
+ /*ObjectType=*/nullptr,
+ /*EnteringContext=*/false, Template))
Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
}
} else if (Tok.is(tok::identifier)) {
@@ -1550,10 +1574,8 @@ Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs) {
if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
Arg = Actions.ActOnPackExpansion(Arg, EllipsisLoc);
- if (Arg.isInvalid()) {
- SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
+ if (Arg.isInvalid())
return true;
- }
// Save this template argument.
TemplateArgs.push_back(Arg);
@@ -1607,6 +1629,9 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
if (!LPT.D)
return;
+ // Destroy TemplateIdAnnotations when we're done, if possible.
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
+
// Get the FunctionDecl.
FunctionDecl *FunD = LPT.D->getAsFunction();
// Track template parameter depth.
@@ -1616,40 +1641,22 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
Sema::ContextRAII GlobalSavedContext(
Actions, Actions.Context.getTranslationUnitDecl());
- SmallVector<ParseScope*, 4> TemplateParamScopeStack;
-
- // Get the list of DeclContexts to reenter. For inline methods, we only want
- // to push the DeclContext of the outermost class. This matches the way the
- // parser normally parses bodies of inline methods when the outermost class is
- // complete.
- struct ContainingDC {
- ContainingDC(DeclContext *DC, bool ShouldPush) : Pair(DC, ShouldPush) {}
- llvm::PointerIntPair<DeclContext *, 1, bool> Pair;
- DeclContext *getDC() { return Pair.getPointer(); }
- bool shouldPushDC() { return Pair.getInt(); }
- };
- SmallVector<ContainingDC, 4> DeclContextsToReenter;
- DeclContext *DD = FunD;
- DeclContext *NextContaining = Actions.getContainingDC(DD);
- while (DD && !DD->isTranslationUnit()) {
- bool ShouldPush = DD == NextContaining;
- DeclContextsToReenter.push_back({DD, ShouldPush});
- if (ShouldPush)
- NextContaining = Actions.getContainingDC(DD);
- DD = DD->getLexicalParent();
- }
-
- // Reenter template scopes from outermost to innermost.
- for (ContainingDC CDC : reverse(DeclContextsToReenter)) {
- TemplateParamScopeStack.push_back(
- new ParseScope(this, Scope::TemplateParamScope));
- unsigned NumParamLists = Actions.ActOnReenterTemplateScope(
- getCurScope(), cast<Decl>(CDC.getDC()));
- CurTemplateDepthTracker.addDepth(NumParamLists);
- if (CDC.shouldPushDC()) {
- TemplateParamScopeStack.push_back(new ParseScope(this, Scope::DeclScope));
- Actions.PushDeclContext(Actions.getCurScope(), CDC.getDC());
- }
+ MultiParseScope Scopes(*this);
+
+ // Get the list of DeclContexts to reenter.
+ SmallVector<DeclContext*, 4> DeclContextsToReenter;
+ for (DeclContext *DC = FunD; DC && !DC->isTranslationUnit();
+ DC = DC->getLexicalParent())
+ DeclContextsToReenter.push_back(DC);
+
+ // Reenter scopes from outermost to innermost.
+ for (DeclContext *DC : reverse(DeclContextsToReenter)) {
+ CurTemplateDepthTracker.addDepth(
+ ReenterTemplateScopes(Scopes, cast<Decl>(DC)));
+ Scopes.Enter(Scope::DeclScope);
+ // We'll reenter the function context itself below.
+ if (DC != FunD)
+ Actions.PushDeclContext(Actions.getCurScope(), DC);
}
assert(!LPT.Toks.empty() && "Empty body!");
@@ -1670,8 +1677,7 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
Scope::CompoundStmtScope);
// Recreate the containing function DeclContext.
- Sema::ContextRAII FunctionSavedContext(Actions,
- Actions.getContainingDC(FunD));
+ Sema::ContextRAII FunctionSavedContext(Actions, FunD->getLexicalParent());
Actions.ActOnStartOfFunctionDef(getCurScope(), FunD);
@@ -1695,13 +1701,6 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
} else
Actions.ActOnFinishFunctionBody(LPT.D, nullptr);
}
-
- // Exit scopes.
- FnScope.Exit();
- SmallVectorImpl<ParseScope *>::reverse_iterator I =
- TemplateParamScopeStack.rbegin();
- for (; I != TemplateParamScopeStack.rend(); ++I)
- delete *I;
}
/// Lex a delayed template function for late parsing.
@@ -1733,7 +1732,7 @@ bool Parser::diagnoseUnknownTemplateId(ExprResult LHS, SourceLocation Less) {
TPA.Commit();
SourceLocation Greater;
- ParseGreaterThanInTemplateList(Greater, true, false);
+ ParseGreaterThanInTemplateList(Less, Greater, true, false);
Actions.diagnoseExprIntendedAsTemplateName(getCurScope(), LHS,
Less, Greater);
return true;
@@ -1762,7 +1761,7 @@ void Parser::checkPotentialAngleBracket(ExprResult &PotentialTemplateName) {
NextToken().isOneOf(tok::greatergreater, tok::greatergreatergreater))) {
SourceLocation Less = ConsumeToken();
SourceLocation Greater;
- ParseGreaterThanInTemplateList(Greater, true, false);
+ ParseGreaterThanInTemplateList(Less, Greater, true, false);
Actions.diagnoseExprIntendedAsTemplateName(
getCurScope(), PotentialTemplateName, Less, Greater);
// FIXME: Perform error recovery.
diff --git a/clang/lib/Parse/ParseTentative.cpp b/clang/lib/Parse/ParseTentative.cpp
index 4d69fb4693fb..f026f3a1bfb2 100644
--- a/clang/lib/Parse/ParseTentative.cpp
+++ b/clang/lib/Parse/ParseTentative.cpp
@@ -186,25 +186,10 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
ConsumeToken();
// Skip attributes.
- while (Tok.isOneOf(tok::l_square, tok::kw___attribute, tok::kw___declspec,
- tok::kw_alignas)) {
- if (Tok.is(tok::l_square)) {
- ConsumeBracket();
- if (!SkipUntil(tok::r_square))
- return TPResult::Error;
- } else {
- ConsumeToken();
- if (Tok.isNot(tok::l_paren))
- return TPResult::Error;
- ConsumeParen();
- if (!SkipUntil(tok::r_paren))
- return TPResult::Error;
- }
- }
+ if (!TrySkipAttributes())
+ return TPResult::Error;
- if (Tok.isOneOf(tok::identifier, tok::coloncolon, tok::kw_decltype,
- tok::annot_template_id) &&
- TryAnnotateCXXScopeToken())
+ if (TryAnnotateOptionalCXXScopeToken())
return TPResult::Error;
if (Tok.is(tok::annot_cxxscope))
ConsumeAnnotationToken();
@@ -443,6 +428,38 @@ struct Parser::ConditionDeclarationOrInitStatementState {
}
};
+bool Parser::isEnumBase(bool AllowSemi) {
+ assert(Tok.is(tok::colon) && "should be looking at the ':'");
+
+ RevertingTentativeParsingAction PA(*this);
+ // ':'
+ ConsumeToken();
+
+ // type-specifier-seq
+ bool InvalidAsDeclSpec = false;
+ // FIXME: We could disallow non-type decl-specifiers here, but it makes no
+ // difference: those specifiers are ill-formed regardless of the
+ // interpretation.
+ TPResult R = isCXXDeclarationSpecifier(/*BracedCastResult*/ TPResult::True,
+ &InvalidAsDeclSpec);
+ if (R == TPResult::Ambiguous) {
+ // We either have a decl-specifier followed by '(' or an undeclared
+ // identifier.
+ if (TryConsumeDeclarationSpecifier() == TPResult::Error)
+ return true;
+
+ // If we get to the end of the enum-base, we hit either a '{' or a ';'.
+ // Don't bother checking the enumerator-list.
+ if (Tok.is(tok::l_brace) || (AllowSemi && Tok.is(tok::semi)))
+ return true;
+
+ // A second decl-specifier unambiguously indicatges an enum-base.
+ R = isCXXDeclarationSpecifier(TPResult::True, &InvalidAsDeclSpec);
+ }
+
+ return R != TPResult::False;
+}
+
/// Disambiguates between a declaration in a condition, a
/// simple-declaration in an init-statement, and an expression for
/// a condition of a if/switch statement.
@@ -783,19 +800,49 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
return CAK_NotAttributeSpecifier;
}
+bool Parser::TrySkipAttributes() {
+ while (Tok.isOneOf(tok::l_square, tok::kw___attribute, tok::kw___declspec,
+ tok::kw_alignas)) {
+ if (Tok.is(tok::l_square)) {
+ ConsumeBracket();
+ if (Tok.isNot(tok::l_square))
+ return false;
+ ConsumeBracket();
+ if (!SkipUntil(tok::r_square) || Tok.isNot(tok::r_square))
+ return false;
+ // Note that explicitly checking for `[[` and `]]` allows to fail as
+ // expected in the case of the Objective-C message send syntax.
+ ConsumeBracket();
+ } else {
+ ConsumeToken();
+ if (Tok.isNot(tok::l_paren))
+ return false;
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return false;
+ }
+ }
+
+ return true;
+}
+
Parser::TPResult Parser::TryParsePtrOperatorSeq() {
while (true) {
- if (Tok.isOneOf(tok::coloncolon, tok::identifier))
- if (TryAnnotateCXXScopeToken(true))
- return TPResult::Error;
+ if (TryAnnotateOptionalCXXScopeToken(true))
+ return TPResult::Error;
if (Tok.isOneOf(tok::star, tok::amp, tok::caret, tok::ampamp) ||
(Tok.is(tok::annot_cxxscope) && NextToken().is(tok::star))) {
// ptr-operator
ConsumeAnyToken();
+
+ // Skip attributes.
+ if (!TrySkipAttributes())
+ return TPResult::Error;
+
while (Tok.isOneOf(tok::kw_const, tok::kw_volatile, tok::kw_restrict,
tok::kw__Nonnull, tok::kw__Nullable,
- tok::kw__Null_unspecified))
+ tok::kw__Null_unspecified, tok::kw__Atomic))
ConsumeToken();
} else {
return TPResult::True;
@@ -969,10 +1016,16 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
NextToken().is(tok::kw_operator)))) &&
mayHaveIdentifier) {
// declarator-id
- if (Tok.is(tok::annot_cxxscope))
+ if (Tok.is(tok::annot_cxxscope)) {
+ CXXScopeSpec SS;
+ Actions.RestoreNestedNameSpecifierAnnotation(
+ Tok.getAnnotationValue(), Tok.getAnnotationRange(), SS);
+ if (SS.isInvalid())
+ return TPResult::Error;
ConsumeAnnotationToken();
- else if (Tok.is(tok::identifier))
+ } else if (Tok.is(tok::identifier)) {
TentativelyDeclaredIdentifiers.push_back(Tok.getIdentifierInfo());
+ }
if (Tok.is(tok::kw_operator)) {
if (TryParseOperatorId() == TPResult::Error)
return TPResult::Error;
@@ -1046,130 +1099,6 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
return TPResult::Ambiguous;
}
-Parser::TPResult
-Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
- switch (Kind) {
- // Obviously starts an expression.
- case tok::numeric_constant:
- case tok::char_constant:
- case tok::wide_char_constant:
- case tok::utf8_char_constant:
- case tok::utf16_char_constant:
- case tok::utf32_char_constant:
- case tok::string_literal:
- case tok::wide_string_literal:
- case tok::utf8_string_literal:
- case tok::utf16_string_literal:
- case tok::utf32_string_literal:
- case tok::l_square:
- case tok::l_paren:
- case tok::amp:
- case tok::ampamp:
- case tok::star:
- case tok::plus:
- case tok::plusplus:
- case tok::minus:
- case tok::minusminus:
- case tok::tilde:
- case tok::exclaim:
- case tok::kw_sizeof:
- case tok::kw___func__:
- case tok::kw_const_cast:
- case tok::kw_delete:
- case tok::kw_dynamic_cast:
- case tok::kw_false:
- case tok::kw_new:
- case tok::kw_operator:
- case tok::kw_reinterpret_cast:
- case tok::kw_static_cast:
- case tok::kw_this:
- case tok::kw_throw:
- case tok::kw_true:
- case tok::kw_typeid:
- case tok::kw_alignof:
- case tok::kw_noexcept:
- case tok::kw_nullptr:
- case tok::kw__Alignof:
- case tok::kw___null:
- case tok::kw___alignof:
- case tok::kw___builtin_choose_expr:
- case tok::kw___builtin_offsetof:
- case tok::kw___builtin_va_arg:
- case tok::kw___imag:
- case tok::kw___real:
- case tok::kw___FUNCTION__:
- case tok::kw___FUNCDNAME__:
- case tok::kw___FUNCSIG__:
- case tok::kw_L__FUNCTION__:
- case tok::kw_L__FUNCSIG__:
- case tok::kw___PRETTY_FUNCTION__:
- case tok::kw___uuidof:
-#define TYPE_TRAIT(N,Spelling,K) \
- case tok::kw_##Spelling:
-#include "clang/Basic/TokenKinds.def"
- return TPResult::True;
-
- // Obviously starts a type-specifier-seq:
- case tok::kw_char:
- case tok::kw_const:
- case tok::kw_double:
- case tok::kw__Float16:
- case tok::kw___float128:
- case tok::kw_enum:
- case tok::kw_half:
- case tok::kw_float:
- case tok::kw_int:
- case tok::kw_long:
- case tok::kw___int64:
- case tok::kw___int128:
- case tok::kw_restrict:
- case tok::kw_short:
- case tok::kw_signed:
- case tok::kw_struct:
- case tok::kw_union:
- case tok::kw_unsigned:
- case tok::kw_void:
- case tok::kw_volatile:
- case tok::kw__Bool:
- case tok::kw__Complex:
- case tok::kw_class:
- case tok::kw_typename:
- case tok::kw_wchar_t:
- case tok::kw_char8_t:
- case tok::kw_char16_t:
- case tok::kw_char32_t:
- case tok::kw__Decimal32:
- case tok::kw__Decimal64:
- case tok::kw__Decimal128:
- case tok::kw___interface:
- case tok::kw___thread:
- case tok::kw_thread_local:
- case tok::kw__Thread_local:
- case tok::kw_typeof:
- case tok::kw___underlying_type:
- case tok::kw___cdecl:
- case tok::kw___stdcall:
- case tok::kw___fastcall:
- case tok::kw___thiscall:
- case tok::kw___regcall:
- case tok::kw___vectorcall:
- case tok::kw___unaligned:
- case tok::kw___vector:
- case tok::kw___pixel:
- case tok::kw___bool:
- case tok::kw__Atomic:
-#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
-#include "clang/Basic/OpenCLImageTypes.def"
- case tok::kw___unknown_anytype:
- return TPResult::False;
-
- default:
- break;
- }
-
- return TPResult::Ambiguous;
-}
-
bool Parser::isTentativelyDeclared(IdentifierInfo *II) {
return std::find(TentativelyDeclaredIdentifiers.begin(),
TentativelyDeclaredIdentifiers.end(), II)
@@ -1181,8 +1110,9 @@ class TentativeParseCCC final : public CorrectionCandidateCallback {
public:
TentativeParseCCC(const Token &Next) {
WantRemainingKeywords = false;
- WantTypeSpecifiers = Next.isOneOf(tok::l_paren, tok::r_paren, tok::greater,
- tok::l_brace, tok::identifier);
+ WantTypeSpecifiers =
+ Next.isOneOf(tok::l_paren, tok::r_paren, tok::greater, tok::l_brace,
+ tok::identifier, tok::comma);
}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
@@ -1316,6 +1246,18 @@ public:
Parser::TPResult
Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
bool *InvalidAsDeclSpec) {
+ auto IsPlaceholderSpecifier = [&] (TemplateIdAnnotation *TemplateId,
+ int Lookahead) {
+ // We have a placeholder-constraint (we check for 'auto' or 'decltype' to
+ // distinguish 'C<int>;' from 'C<int> auto c = 1;')
+ return TemplateId->Kind == TNK_Concept_template &&
+ GetLookAheadToken(Lookahead + 1).isOneOf(tok::kw_auto, tok::kw_decltype,
+ // If we have an identifier here, the user probably forgot the
+ // 'auto' in the placeholder constraint, e.g. 'C<int> x = 2;'
+ // This will be diagnosed nicely later, so disambiguate as a
+ // declaration.
+ tok::identifier);
+ };
switch (Tok.getKind()) {
case tok::identifier: {
// Check for need to substitute AltiVec __vector keyword
@@ -1334,6 +1276,15 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// this is ambiguous. Typo-correct to type and expression keywords and
// to types and identifiers, in order to try to recover from errors.
TentativeParseCCC CCC(Next);
+ // Tentative parsing may not be done in the right evaluation context
+ // for the ultimate expression. Enter an unevaluated context to prevent
+ // Sema from immediately e.g. treating this lookup as a potential ODR-use.
+ // If we generate an expression annotation token and the parser actually
+ // claims it as an expression, we'll transform the expression to a
+ // potentially-evaluated one then.
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
switch (TryAnnotateName(&CCC)) {
case ANK_Error:
return TPResult::Error;
@@ -1511,7 +1462,9 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
// If lookup for the template-name found nothing, don't assume we have a
// definitive disambiguation result yet.
- if (TemplateId->Kind == TNK_Undeclared_template && InvalidAsDeclSpec) {
+ if ((TemplateId->hasInvalidName() ||
+ TemplateId->Kind == TNK_Undeclared_template) &&
+ InvalidAsDeclSpec) {
// 'template-id(' can be a valid expression but not a valid decl spec if
// the template-name is not declared, but we don't consider this to be a
// definitive disambiguation. In any other context, it's an error either
@@ -1519,10 +1472,14 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
*InvalidAsDeclSpec = NextToken().is(tok::l_paren);
return TPResult::Ambiguous;
}
+ if (TemplateId->hasInvalidName())
+ return TPResult::Error;
+ if (IsPlaceholderSpecifier(TemplateId, /*Lookahead=*/0))
+ return TPResult::True;
if (TemplateId->Kind != TNK_Type_template)
return TPResult::False;
CXXScopeSpec SS;
- AnnotateTemplateIdTokenAsType();
+ AnnotateTemplateIdTokenAsType(SS);
assert(Tok.is(tok::annot_typename));
goto case_typename;
}
@@ -1532,6 +1489,20 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error;
if (!Tok.is(tok::annot_typename)) {
+ if (Tok.is(tok::annot_cxxscope) &&
+ NextToken().is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId =
+ takeTemplateIdAnnotation(NextToken());
+ if (TemplateId->hasInvalidName()) {
+ if (InvalidAsDeclSpec) {
+ *InvalidAsDeclSpec = NextToken().is(tok::l_paren);
+ return TPResult::Ambiguous;
+ }
+ return TPResult::Error;
+ }
+ if (IsPlaceholderSpecifier(TemplateId, /*Lookahead=*/1))
+ return TPResult::True;
+ }
// If the next token is an identifier or a type qualifier, then this
// can't possibly be a valid expression either.
if (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier)) {
@@ -1670,6 +1641,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_void:
@@ -1724,6 +1696,24 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw__Atomic:
return TPResult::True;
+ case tok::kw__ExtInt: {
+ if (NextToken().isNot(tok::l_paren))
+ return TPResult::Error;
+ RevertingTentativeParsingAction PA(*this);
+ ConsumeToken();
+ ConsumeParen();
+
+ if (!SkipUntil(tok::r_paren, StopAtSemi))
+ return TPResult::Error;
+
+ if (Tok.is(tok::l_paren))
+ return TPResult::Ambiguous;
+
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace))
+ return BracedCastResult;
+
+ return TPResult::True;
+ }
default:
return TPResult::False;
}
@@ -1756,6 +1746,7 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::kw_bool:
case tok::kw_short:
case tok::kw_int:
+ case tok::kw__ExtInt:
case tok::kw_long:
case tok::kw___int64:
case tok::kw___int128:
@@ -1764,6 +1755,7 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_void:
@@ -1975,17 +1967,14 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
// (a) the previous parameter did, and
// (b) this must be the first declaration of the function, so we can't
// inherit any default arguments from elsewhere.
- // If we see an ')', then we've reached the end of a
- // parameter-declaration-clause, and the last param is missing its default
- // argument.
+ // FIXME: If we reach a ')' without consuming any '>'s, then this must
+ // also be a function parameter (that's missing its default argument).
if (VersusTemplateArgument)
- return Tok.isOneOf(tok::equal, tok::r_paren) ? TPResult::True
- : TPResult::False;
+ return Tok.is(tok::equal) ? TPResult::True : TPResult::False;
if (Tok.is(tok::equal)) {
// '=' assignment-expression
// Parse through assignment-expression.
- // FIXME: assignment-expression may contain an unparenthesized comma.
if (!SkipUntil(tok::comma, tok::r_paren, StopAtSemi | StopBeforeMatch))
return TPResult::Error;
}
@@ -2137,3 +2126,58 @@ Parser::TPResult Parser::isTemplateArgumentList(unsigned TokensToSkip) {
return TPResult::Ambiguous;
return TPResult::False;
}
+
+/// Determine whether we might be looking at the '(' of a C++20 explicit(bool)
+/// in an earlier language mode.
+Parser::TPResult Parser::isExplicitBool() {
+ assert(Tok.is(tok::l_paren) && "expected to be looking at a '(' token");
+
+ RevertingTentativeParsingAction PA(*this);
+ ConsumeParen();
+
+ // We can only have 'explicit' on a constructor, conversion function, or
+ // deduction guide. The declarator of a deduction guide cannot be
+ // parenthesized, so we know this isn't a deduction guide. So the only
+ // thing we need to check for is some number of parens followed by either
+ // the current class name or 'operator'.
+ while (Tok.is(tok::l_paren))
+ ConsumeParen();
+
+ if (TryAnnotateOptionalCXXScopeToken())
+ return TPResult::Error;
+
+ // Class-scope constructor and conversion function names can't really be
+ // qualified, but we get better diagnostics if we assume they can be.
+ CXXScopeSpec SS;
+ if (Tok.is(tok::annot_cxxscope)) {
+ Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
+ Tok.getAnnotationRange(),
+ SS);
+ ConsumeAnnotationToken();
+ }
+
+ // 'explicit(operator' might be explicit(bool) or the declaration of a
+ // conversion function, but it's probably a conversion function.
+ if (Tok.is(tok::kw_operator))
+ return TPResult::Ambiguous;
+
+ // If this can't be a constructor name, it can only be explicit(bool).
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id))
+ return TPResult::True;
+ if (!Actions.isCurrentClassName(Tok.is(tok::identifier)
+ ? *Tok.getIdentifierInfo()
+ : *takeTemplateIdAnnotation(Tok)->Name,
+ getCurScope(), &SS))
+ return TPResult::True;
+ // Formally, we must have a right-paren after the constructor name to match
+ // the grammar for a constructor. But clang permits a parenthesized
+ // constructor declarator, so also allow a constructor declarator to follow
+ // with no ')' token after the constructor name.
+ if (!NextToken().is(tok::r_paren) &&
+ !isConstructorDeclarator(/*Unqualified=*/SS.isEmpty(),
+ /*DeductionGuide=*/false))
+ return TPResult::True;
+
+ // Might be explicit(bool) or a parenthesized constructor name.
+ return TPResult::Ambiguous;
+}
diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp
index 4249de361b89..764d4e8e9d52 100644
--- a/clang/lib/Parse/Parser.cpp
+++ b/clang/lib/Parse/Parser.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
@@ -432,16 +433,7 @@ Parser::~Parser() {
PP.clearCodeCompletionHandler();
- if (getLangOpts().DelayedTemplateParsing &&
- !PP.isIncrementalProcessingEnabled() && !TemplateIds.empty()) {
- // If an ASTConsumer parsed delay-parsed templates in their
- // HandleTranslationUnit() method, TemplateIds created there were not
- // guarded by a DestroyTemplateIdAnnotationsRAIIObj object in
- // ParseTopLevelDecl(). Destroy them here.
- DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
- }
-
- assert(TemplateIds.empty() && "Still alive TemplateIdAnnotations around?");
+ DestroyTemplateIds();
}
/// Initialize - Warm up the parser.
@@ -537,11 +529,10 @@ void Parser::Initialize() {
ConsumeToken();
}
-void Parser::LateTemplateParserCleanupCallback(void *P) {
- // While this RAII helper doesn't bracket any actual work, the destructor will
- // clean up annotations that were created during ActOnEndOfTranslationUnit
- // when incremental processing is enabled.
- DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(((Parser *)P)->TemplateIds);
+void Parser::DestroyTemplateIds() {
+ for (TemplateIdAnnotation *Id : TemplateIds)
+ Id->Destroy();
+ TemplateIds.clear();
}
/// Parse the first top-level declaration in a translation unit.
@@ -576,7 +567,7 @@ bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
/// declaration
/// [C++20] module-import-declaration
bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
- DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
// Skip over the EOF token, flagging end of previous input for incremental
// processing
@@ -650,12 +641,18 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
return false;
case tok::eof:
+ // Check whether -fmax-tokens= was reached.
+ if (PP.getMaxTokens() != 0 && PP.getTokenCount() > PP.getMaxTokens()) {
+ PP.Diag(Tok.getLocation(), diag::warn_max_tokens_total)
+ << PP.getTokenCount() << PP.getMaxTokens();
+ SourceLocation OverrideLoc = PP.getMaxTokensOverrideLoc();
+ if (OverrideLoc.isValid()) {
+ PP.Diag(OverrideLoc, diag::note_max_tokens_total_override);
+ }
+ }
+
// Late template parsing can begin.
- if (getLangOpts().DelayedTemplateParsing)
- Actions.SetLateTemplateParser(LateTemplateParserCallback,
- PP.isIncrementalProcessingEnabled() ?
- LateTemplateParserCleanupCallback : nullptr,
- this);
+ Actions.SetLateTemplateParser(LateTemplateParserCallback, nullptr, this);
if (!PP.isIncrementalProcessingEnabled())
Actions.ActOnEndOfTranslationUnit();
//else don't tell Sema that we ended parsing: more input might come.
@@ -716,7 +713,7 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
Parser::DeclGroupPtrTy
Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS) {
- DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds);
+ DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
if (PP.isCodeCompletionReached()) {
@@ -753,6 +750,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::annot_pragma_fenv_access:
HandlePragmaFEnvAccess();
return nullptr;
+ case tok::annot_pragma_float_control:
+ HandlePragmaFloatControl();
+ return nullptr;
case tok::annot_pragma_fp:
HandlePragmaFP();
break;
@@ -1136,6 +1136,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// Poison SEH identifiers so they are flagged as illegal in function bodies.
PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
const DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
// If this is C90 and the declspecs were completely missing, fudge in an
// implicit int. We do this here because this is the only place where
@@ -1262,6 +1263,15 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// safe because we're always the sole owner.
D.getMutableDeclSpec().abort();
+ // With abbreviated function templates - we need to explicitly add depth to
+ // account for the implicit template parameter list induced by the template.
+ if (auto *Template = dyn_cast_or_null<FunctionTemplateDecl>(Res))
+ if (Template->isAbbreviated() &&
+ Template->getTemplateParameters()->getParam(0)->isImplicit())
+ // First template parameter is implicit - meaning no explicit template
+ // parameter list was specified.
+ CurTemplateDepthTracker.addDepth(1);
+
if (TryConsumeToken(tok::equal)) {
assert(getLangOpts().CPlusPlus && "Only C++ function definitions have '='");
@@ -1508,13 +1518,13 @@ ExprResult Parser::ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc) {
assert(Tok.is(tok::kw_asm) && "Not an asm!");
SourceLocation Loc = ConsumeToken();
- if (Tok.is(tok::kw_volatile)) {
- // Remove from the end of 'asm' to the end of 'volatile'.
+ if (isGNUAsmQualifier(Tok)) {
+ // Remove from the end of 'asm' to the end of the asm qualifier.
SourceRange RemovalRange(PP.getLocForEndOfToken(Loc),
PP.getLocForEndOfToken(Tok.getLocation()));
-
- Diag(Tok, diag::warn_file_asm_volatile)
- << FixItHint::CreateRemoval(RemovalRange);
+ Diag(Tok, diag::err_global_asm_qualifier_ignored)
+ << GNUAsmQualifiers::getQualifierName(getGNUAsmQualifier(Tok))
+ << FixItHint::CreateRemoval(RemovalRange);
ConsumeToken();
}
@@ -1584,7 +1594,9 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus &&
- ParseOptionalCXXScopeSpecifier(SS, nullptr, EnteringContext))
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ EnteringContext))
return ANK_Error;
if (Tok.isNot(tok::identifier) || SS.isInvalid()) {
@@ -1680,7 +1692,8 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
}
case Sema::NC_ContextIndependentExpr:
- Tok.setKind(tok::annot_primary_expr);
+ Tok.setKind(Actions.isUnevaluatedContext() ? tok::annot_uneval_primary_expr
+ : tok::annot_primary_expr);
setExprAnnotation(Tok, Classification.getExpression());
Tok.setAnnotationEndLoc(NameLoc);
if (SS.isNotEmpty())
@@ -1732,6 +1745,20 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
return ANK_Error;
return ANK_Success;
}
+ case Sema::NC_Concept: {
+ UnqualifiedId Id;
+ Id.setIdentifier(Name, NameLoc);
+ if (Next.is(tok::less))
+ // We have a concept name followed by '<'. Consume the identifier token so
+ // we reach the '<' and annotate it.
+ ConsumeToken();
+ if (AnnotateTemplateIdToken(
+ TemplateTy::make(Classification.getTemplateName()),
+ Classification.getTemplateNameKind(), SS, SourceLocation(), Id,
+ /*AllowTypeAnnotation=*/false, /*TypeConstraint=*/true))
+ return ANK_Error;
+ return ANK_Success;
+ }
}
// Unable to classify the name, but maybe we can annotate a scope specifier.
@@ -1807,10 +1834,11 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
SourceLocation TypenameLoc = ConsumeToken();
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false, nullptr,
/*IsTypename*/ true))
return true;
- if (!SS.isSet()) {
+ if (SS.isEmpty()) {
if (Tok.is(tok::identifier) || Tok.is(tok::annot_template_id) ||
Tok.is(tok::annot_decltype)) {
// Attempt to recover by skipping the invalid 'typename'
@@ -1840,9 +1868,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
Tok.getLocation());
} else if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
- if (TemplateId->Kind != TNK_Type_template &&
- TemplateId->Kind != TNK_Dependent_template_name &&
- TemplateId->Kind != TNK_Undeclared_template) {
+ if (!TemplateId->mightBeType()) {
Diag(Tok, diag::err_typename_refers_to_non_type_template)
<< Tok.getAnnotationRange();
return true;
@@ -1851,14 +1877,13 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
- Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->Name,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc);
+ Ty = TemplateId->isInvalid()
+ ? TypeError()
+ : Actions.ActOnTypenameType(
+ getCurScope(), TypenameLoc, SS, TemplateId->TemplateKWLoc,
+ TemplateId->Template, TemplateId->Name,
+ TemplateId->TemplateNameLoc, TemplateId->LAngleLoc,
+ TemplateArgsPtr, TemplateId->RAngleLoc);
} else {
Diag(Tok, diag::err_expected_type_name_after_typename)
<< SS.getRange();
@@ -1867,7 +1892,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
SourceLocation EndLoc = Tok.getLastLoc();
Tok.setKind(tok::annot_typename);
- setTypeAnnotation(Tok, Ty.isInvalid() ? nullptr : Ty.get());
+ setTypeAnnotation(Tok, Ty);
Tok.setAnnotationEndLoc(EndLoc);
Tok.setLocation(TypenameLoc);
PP.AnnotateCachedTokens(Tok);
@@ -1879,7 +1904,9 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus)
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext*/false))
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ /*EnteringContext*/ false))
return true;
return TryAnnotateTypeOrScopeTokenAfterScopeSpec(SS, !WasScopeAnnotation);
@@ -1983,7 +2010,7 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
// template-id annotation in a context where we weren't allowed
// to produce a type annotation token. Update the template-id
// annotation token to a type annotation token now.
- AnnotateTemplateIdTokenAsType();
+ AnnotateTemplateIdTokenAsType(SS);
return false;
}
}
@@ -2005,13 +2032,12 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
assert(getLangOpts().CPlusPlus &&
"Call sites of this function should be guarded by checking for C++");
- assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
- (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) ||
- Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super)) &&
- "Cannot be a type or scope token!");
+ assert(MightBeCXXScopeToken() && "Cannot be a type or scope token!");
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, nullptr, EnteringContext))
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
+ EnteringContext))
return true;
if (SS.isEmpty())
return false;
@@ -2120,7 +2146,8 @@ bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
// Parse nested-name-specifier.
if (getLangOpts().CPlusPlus)
- ParseOptionalCXXScopeSpecifier(Result.SS, nullptr,
+ ParseOptionalCXXScopeSpecifier(Result.SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false,
/*EnteringContext=*/false);
// Check nested-name specifier.
@@ -2131,10 +2158,12 @@ bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
// Parse the unqualified-id.
SourceLocation TemplateKWLoc; // FIXME: parsed, but unused.
- if (ParseUnqualifiedId(
- Result.SS, /*EnteringContext*/false, /*AllowDestructorName*/true,
- /*AllowConstructorName*/true, /*AllowDeductionGuide*/false, nullptr,
- &TemplateKWLoc, Result.Name)) {
+ if (ParseUnqualifiedId(Result.SS, /*ObjectType=*/nullptr,
+ /*ObjectHadErrors=*/false, /*EnteringContext*/ false,
+ /*AllowDestructorName*/ true,
+ /*AllowConstructorName*/ true,
+ /*AllowDeductionGuide*/ false, &TemplateKWLoc,
+ Result.Name)) {
T.skipToEnd();
return true;
}
diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 04611dadde66..3b7356893833 100644
--- a/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -974,6 +974,14 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
<< Use.getUser()->getSourceRange();
}
+/// Diagnose uninitialized const reference usages.
+static bool DiagnoseUninitializedConstRefUse(Sema &S, const VarDecl *VD,
+ const UninitUse &Use) {
+ S.Diag(Use.getUser()->getBeginLoc(), diag::warn_uninit_const_reference)
+ << VD->getDeclName() << Use.getUser()->getSourceRange();
+ return true;
+}
+
/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
/// uninitialized variable. This manages the different forms of diagnostic
/// emitted for particular types of uses. Returns true if the use was diagnosed
@@ -1506,13 +1514,14 @@ class UninitValsDiagReporter : public UninitVariablesHandler {
// order of diagnostics when calling flushDiagnostics().
typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap;
UsesMap uses;
+ UsesMap constRefUses;
public:
UninitValsDiagReporter(Sema &S) : S(S) {}
~UninitValsDiagReporter() override { flushDiagnostics(); }
- MappedType &getUses(const VarDecl *vd) {
- MappedType &V = uses[vd];
+ MappedType &getUses(UsesMap &um, const VarDecl *vd) {
+ MappedType &V = um[vd];
if (!V.getPointer())
V.setPointer(new UsesVec());
return V;
@@ -1520,11 +1529,17 @@ public:
void handleUseOfUninitVariable(const VarDecl *vd,
const UninitUse &use) override {
- getUses(vd).getPointer()->push_back(use);
+ getUses(uses, vd).getPointer()->push_back(use);
+ }
+
+ void handleConstRefUseOfUninitVariable(const VarDecl *vd,
+ const UninitUse &use) override {
+ getUses(constRefUses, vd).getPointer()->push_back(use);
}
void handleSelfInit(const VarDecl *vd) override {
- getUses(vd).setInt(true);
+ getUses(uses, vd).setInt(true);
+ getUses(constRefUses, vd).setInt(true);
}
void flushDiagnostics() {
@@ -1571,6 +1586,32 @@ public:
}
uses.clear();
+
+ // Flush all const reference uses diags.
+ for (const auto &P : constRefUses) {
+ const VarDecl *vd = P.first;
+ const MappedType &V = P.second;
+
+ UsesVec *vec = V.getPointer();
+ bool hasSelfInit = V.getInt();
+
+ if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec))
+ DiagnoseUninitializedUse(S, vd,
+ UninitUse(vd->getInit()->IgnoreParenCasts(),
+ /* isAlwaysUninit */ true),
+ /* alwaysReportSelfInit */ true);
+ else {
+ for (const auto &U : *vec) {
+ if (DiagnoseUninitializedConstRefUse(S, vd, U))
+ break;
+ }
+ }
+
+ // Release the uses vector.
+ delete vec;
+ }
+
+ constRefUses.clear();
}
private:
@@ -1659,6 +1700,14 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
: getNotes();
}
+ OptionalNotes makeUnlockedHereNote(SourceLocation LocUnlocked,
+ StringRef Kind) {
+ return LocUnlocked.isValid()
+ ? getNotes(PartialDiagnosticAt(
+ LocUnlocked, S.PDiag(diag::note_unlocked_here) << Kind))
+ : getNotes();
+ }
+
public:
ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL)
: S(S), FunLocation(FL), FunEndLocation(FEL),
@@ -1685,13 +1734,14 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
Warnings.emplace_back(std::move(Warning), getNotes());
}
- void handleUnmatchedUnlock(StringRef Kind, Name LockName,
- SourceLocation Loc) override {
+ void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc,
+ SourceLocation LocPreviousUnlock) override {
if (Loc.isInvalid())
Loc = FunLocation;
PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_but_no_lock)
<< Kind << LockName);
- Warnings.emplace_back(std::move(Warning), getNotes());
+ Warnings.emplace_back(std::move(Warning),
+ makeUnlockedHereNote(LocPreviousUnlock, Kind));
}
void handleIncorrectUnlockKind(StringRef Kind, Name LockName,
@@ -2184,7 +2234,8 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
if (!Diags.isIgnored(diag::warn_uninit_var, D->getBeginLoc()) ||
!Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getBeginLoc()) ||
- !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getBeginLoc())) {
+ !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getBeginLoc()) ||
+ !Diags.isIgnored(diag::warn_uninit_const_reference, D->getBeginLoc())) {
if (CFG *cfg = AC.getCFG()) {
UninitValsDiagReporter reporter(S);
UninitVariablesAnalysisStats stats;
diff --git a/clang/lib/Sema/CodeCompleteConsumer.cpp b/clang/lib/Sema/CodeCompleteConsumer.cpp
index b88ff9dd64cd..f1ad8aeaacbb 100644
--- a/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -23,6 +23,7 @@
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Casting.h"
@@ -570,29 +571,10 @@ void PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(
if (const char *BriefComment = CCS->getBriefComment())
OS << " : " << BriefComment;
}
- for (const FixItHint &FixIt : Results[I].FixIts) {
- const SourceLocation BLoc = FixIt.RemoveRange.getBegin();
- const SourceLocation ELoc = FixIt.RemoveRange.getEnd();
-
- SourceManager &SM = SemaRef.SourceMgr;
- std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
- std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
- // Adjust for token ranges.
- if (FixIt.RemoveRange.isTokenRange())
- EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, SemaRef.LangOpts);
-
- OS << " (requires fix-it:"
- << " {" << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
- << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
- << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
- << SM.getColumnNumber(EInfo.first, EInfo.second) << "}"
- << " to \"" << FixIt.CodeToInsert << "\")";
- }
- OS << '\n';
break;
case CodeCompletionResult::RK_Keyword:
- OS << Results[I].Keyword << '\n';
+ OS << Results[I].Keyword;
break;
case CodeCompletionResult::RK_Macro:
@@ -602,13 +584,31 @@ void PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(
includeBriefComments())) {
OS << " : " << CCS->getAsString();
}
- OS << '\n';
break;
case CodeCompletionResult::RK_Pattern:
- OS << "Pattern : " << Results[I].Pattern->getAsString() << '\n';
+ OS << "Pattern : " << Results[I].Pattern->getAsString();
break;
}
+ for (const FixItHint &FixIt : Results[I].FixIts) {
+ const SourceLocation BLoc = FixIt.RemoveRange.getBegin();
+ const SourceLocation ELoc = FixIt.RemoveRange.getEnd();
+
+ SourceManager &SM = SemaRef.SourceMgr;
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
+ // Adjust for token ranges.
+ if (FixIt.RemoveRange.isTokenRange())
+ EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, SemaRef.LangOpts);
+
+ OS << " (requires fix-it:"
+ << " {" << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
+ << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
+ << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
+ << SM.getColumnNumber(EInfo.first, EInfo.second) << "}"
+ << " to \"" << FixIt.CodeToInsert << "\")";
+ }
+ OS << '\n';
}
}
diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp
index 639231c87232..f4c30c90ad27 100644
--- a/clang/lib/Sema/DeclSpec.cpp
+++ b/clang/lib/Sema/DeclSpec.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/LocInfoType.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Sema.h"
@@ -29,6 +30,9 @@ using namespace clang;
void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) {
assert(TemplateId && "NULL template-id annotation?");
+ assert(!TemplateId->isInvalid() &&
+ "should not convert invalid template-ids to unqualified-ids");
+
Kind = UnqualifiedIdKind::IK_TemplateId;
this->TemplateId = TemplateId;
StartLocation = TemplateId->TemplateNameLoc;
@@ -37,6 +41,9 @@ void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) {
void UnqualifiedId::setConstructorTemplateId(TemplateIdAnnotation *TemplateId) {
assert(TemplateId && "NULL template-id annotation?");
+ assert(!TemplateId->isInvalid() &&
+ "should not convert invalid template-ids to unqualified-ids");
+
Kind = UnqualifiedIdKind::IK_ConstructorTemplateId;
this->TemplateId = TemplateId;
StartLocation = TemplateId->TemplateNameLoc;
@@ -130,6 +137,8 @@ void CXXScopeSpec::Adopt(NestedNameSpecifierLoc Other) {
Range = Other.getSourceRange();
Builder.Adopt(Other);
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
}
SourceLocation CXXScopeSpec::getLastQualifierNameLoc() const {
@@ -351,6 +360,7 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_half:
case TST_int:
case TST_int128:
+ case TST_extint:
case TST_struct:
case TST_interface:
case TST_union:
@@ -358,6 +368,7 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_unspecified:
case TST_void:
case TST_wchar:
+ case TST_BFloat16:
#define GENERIC_IMAGE_TYPE(ImgType, Id) case TST_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
return false;
@@ -529,6 +540,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_char32: return "char32_t";
case DeclSpec::TST_int: return "int";
case DeclSpec::TST_int128: return "__int128";
+ case DeclSpec::TST_extint: return "_ExtInt";
case DeclSpec::TST_half: return "half";
case DeclSpec::TST_float: return "float";
case DeclSpec::TST_double: return "double";
@@ -555,6 +567,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_underlyingType: return "__underlying_type";
case DeclSpec::TST_unknown_anytype: return "__unknown_anytype";
case DeclSpec::TST_atomic: return "_Atomic";
+ case DeclSpec::TST_BFloat16: return "__bf16";
#define GENERIC_IMAGE_TYPE(ImgType, Id) \
case DeclSpec::TST_##ImgType##_t: \
return #ImgType "_t";
@@ -784,6 +797,15 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
return false;
}
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID, TemplateIdAnnotation *Rep,
+ const PrintingPolicy &Policy) {
+ assert(T == TST_auto || T == TST_decltype_auto);
+ ConstrainedAuto = true;
+ TemplateIdRep = Rep;
+ return SetTypeSpecType(T, Loc, PrevSpec, DiagID, Policy);
+}
+
bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
const char *&PrevSpec,
unsigned &DiagID,
@@ -895,6 +917,27 @@ bool DeclSpec::SetTypeSpecError() {
return false;
}
+bool DeclSpec::SetExtIntType(SourceLocation KWLoc, Expr *BitsExpr,
+ const char *&PrevSpec, unsigned &DiagID,
+ const PrintingPolicy &Policy) {
+ assert(BitsExpr && "no expression provided!");
+ if (TypeSpecType == TST_error)
+ return false;
+
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+
+ TypeSpecType = TST_extint;
+ ExprRep = BitsExpr;
+ TSTLoc = KWLoc;
+ TSTNameLoc = KWLoc;
+ TypeSpecOwned = false;
+ return false;
+}
+
bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID, const LangOptions &Lang) {
// Duplicates are permitted in C99 onwards, but are not permitted in C89 or
@@ -1107,14 +1150,20 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
S.Diag(TSSLoc, diag::err_invalid_vector_bool_decl_spec)
<< getSpecifierName((TSS)TypeSpecSign);
}
-
- // Only char/int are valid with vector bool. (PIM 2.1)
+ // Only char/int are valid with vector bool prior to Power10.
+ // Power10 adds instructions that produce vector bool data
+ // for quadwords as well so allow vector bool __int128.
if (((TypeSpecType != TST_unspecified) && (TypeSpecType != TST_char) &&
- (TypeSpecType != TST_int)) || TypeAltiVecPixel) {
+ (TypeSpecType != TST_int) && (TypeSpecType != TST_int128)) ||
+ TypeAltiVecPixel) {
S.Diag(TSTLoc, diag::err_invalid_vector_bool_decl_spec)
<< (TypeAltiVecPixel ? "__pixel" :
getSpecifierName((TST)TypeSpecType, Policy));
}
+ // vector bool __int128 requires Power10.
+ if ((TypeSpecType == TST_int128) &&
+ (!S.Context.getTargetInfo().hasFeature("power10-vector")))
+ S.Diag(TSTLoc, diag::err_invalid_vector_bool_int128_decl_spec);
// Only 'short' and 'long long' are valid with vector bool. (PIM 2.1)
if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short) &&
@@ -1131,7 +1180,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// Elements of vector bool are interpreted as unsigned. (PIM 2.1)
if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) ||
- (TypeSpecWidth != TSW_unspecified))
+ (TypeSpecType == TST_int128) || (TypeSpecWidth != TSW_unspecified))
TypeSpecSign = TSS_unsigned;
} else if (TypeSpecType == TST_double) {
// vector long double and vector long long double are never allowed.
@@ -1176,7 +1225,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
TypeSpecType != TST_char && TypeSpecType != TST_wchar &&
- !IsFixedPointType) {
+ !IsFixedPointType && TypeSpecType != TST_extint) {
S.Diag(TSSLoc, diag::err_invalid_sign_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
// signed double -> double.
@@ -1223,11 +1272,13 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
S.getLocForEndOfToken(getTypeSpecComplexLoc()),
" double");
TypeSpecType = TST_double; // _Complex -> _Complex double.
- } else if (TypeSpecType == TST_int || TypeSpecType == TST_char) {
+ } else if (TypeSpecType == TST_int || TypeSpecType == TST_char ||
+ TypeSpecType == TST_extint) {
// Note that this intentionally doesn't include _Complex _Bool.
if (!S.getLangOpts().CPlusPlus)
S.Diag(TSTLoc, diag::ext_integer_complex);
- } else if (TypeSpecType != TST_float && TypeSpecType != TST_double) {
+ } else if (TypeSpecType != TST_float && TypeSpecType != TST_double &&
+ TypeSpecType != TST_float128) {
S.Diag(TSCLoc, diag::err_invalid_complex_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecComplex = TSC_unspecified;
diff --git a/clang/lib/Sema/JumpDiagnostics.cpp b/clang/lib/Sema/JumpDiagnostics.cpp
index 960e62d4a2db..b34243edea35 100644
--- a/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/clang/lib/Sema/JumpDiagnostics.cpp
@@ -75,6 +75,7 @@ private:
void BuildScopeInformation(Decl *D, unsigned &ParentScope);
void BuildScopeInformation(VarDecl *D, const BlockDecl *BDecl,
unsigned &ParentScope);
+ void BuildScopeInformation(CompoundLiteralExpr *CLE, unsigned &ParentScope);
void BuildScopeInformation(Stmt *S, unsigned &origParentScope);
void VerifyJumps();
@@ -276,6 +277,16 @@ void JumpScopeChecker::BuildScopeInformation(VarDecl *D,
}
}
+/// Build scope information for compound literals of C struct types that are
+/// non-trivial to destruct.
+void JumpScopeChecker::BuildScopeInformation(CompoundLiteralExpr *CLE,
+ unsigned &ParentScope) {
+ unsigned InDiag = diag::note_enters_compound_literal_scope;
+ unsigned OutDiag = diag::note_exits_compound_literal_scope;
+ Scopes.push_back(GotoScope(ParentScope, InDiag, OutDiag, CLE->getExprLoc()));
+ ParentScope = Scopes.size() - 1;
+}
+
/// BuildScopeInformation - The statements from CI to CE are known to form a
/// coherent VLA scope with a specified parent node. Walk through the
/// statements, adding any labels or gotos to LabelAndGotoScopes and recursively
@@ -529,11 +540,15 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
// implementable but a lot of work which we haven't felt up to doing.
ExprWithCleanups *EWC = cast<ExprWithCleanups>(S);
for (unsigned i = 0, e = EWC->getNumObjects(); i != e; ++i) {
- const BlockDecl *BDecl = EWC->getObject(i);
- for (const auto &CI : BDecl->captures()) {
- VarDecl *variable = CI.getVariable();
- BuildScopeInformation(variable, BDecl, origParentScope);
- }
+ if (auto *BDecl = EWC->getObject(i).dyn_cast<BlockDecl *>())
+ for (const auto &CI : BDecl->captures()) {
+ VarDecl *variable = CI.getVariable();
+ BuildScopeInformation(variable, BDecl, origParentScope);
+ }
+ else if (auto *CLE = EWC->getObject(i).dyn_cast<CompoundLiteralExpr *>())
+ BuildScopeInformation(CLE, origParentScope);
+ else
+ llvm_unreachable("unexpected cleanup object type");
}
break;
}
diff --git a/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/clang/lib/Sema/MultiplexExternalSemaSource.cpp
index 2b0cd6b8c4fc..80333e63127e 100644
--- a/clang/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/clang/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -275,6 +275,12 @@ void MultiplexExternalSemaSource::ReadExtVectorDecls(
Sources[i]->ReadExtVectorDecls(Decls);
}
+void MultiplexExternalSemaSource::ReadDeclsToCheckForDeferredDiags(
+ llvm::SmallVector<Decl *, 4> &Decls) {
+ for(size_t i = 0; i < Sources.size(); ++i)
+ Sources[i]->ReadDeclsToCheckForDeferredDiags(Decls);
+}
+
void MultiplexExternalSemaSource::ReadUnusedLocalTypedefNameCandidates(
llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) {
for(size_t i = 0; i < Sources.size(); ++i)
diff --git a/clang/lib/Sema/OpenCLBuiltins.td b/clang/lib/Sema/OpenCLBuiltins.td
index 9d6bb411eff8..745363a6b43f 100644
--- a/clang/lib/Sema/OpenCLBuiltins.td
+++ b/clang/lib/Sema/OpenCLBuiltins.td
@@ -60,10 +60,17 @@ def FuncExtKhrLocalInt32ExtendedAtomics : FunctionExtension<"cl_khr_local_int32
def FuncExtKhrInt64BaseAtomics : FunctionExtension<"cl_khr_int64_base_atomics">;
def FuncExtKhrInt64ExtendedAtomics : FunctionExtension<"cl_khr_int64_extended_atomics">;
def FuncExtKhrMipmapImage : FunctionExtension<"cl_khr_mipmap_image">;
+def FuncExtKhrMipmapImageWrites : FunctionExtension<"cl_khr_mipmap_image_writes">;
def FuncExtKhrGlMsaaSharing : FunctionExtension<"cl_khr_gl_msaa_sharing">;
// Multiple extensions
-def FuncExtKhrMipmapAndWrite3d : FunctionExtension<"cl_khr_mipmap_image cl_khr_3d_image_writes">;
+def FuncExtKhrMipmapWritesAndWrite3d : FunctionExtension<"cl_khr_mipmap_image_writes cl_khr_3d_image_writes">;
+
+// Arm extensions.
+def ArmIntegerDotProductInt8 : FunctionExtension<"cl_arm_integer_dot_product_int8">;
+def ArmIntegerDotProductAccumulateInt8 : FunctionExtension<"cl_arm_integer_dot_product_accumulate_int8">;
+def ArmIntegerDotProductAccumulateInt16 : FunctionExtension<"cl_arm_integer_dot_product_accumulate_int16">;
+def ArmIntegerDotProductAccumulateSaturateInt8 : FunctionExtension<"cl_arm_integer_dot_product_accumulate_saturate_int8">;
// Qualified Type. These map to ASTContext::QualType.
class QualType<string _Name, bit _IsAbstract=0> {
@@ -120,7 +127,7 @@ class VectorType<Type _Ty, int _VecWidth> : Type<_Ty.Name, _Ty.QTName> {
// OpenCL pointer types (e.g. int*, float*, ...).
class PointerType<Type _Ty, AddressSpace _AS = DefaultAS> :
- Type<_Ty.Name, _Ty.QTName> {
+ Type<_Ty.Name, _Ty.QTName> {
let AddrSpace = _AS.Name;
// Inherited fields
let VecWidth = _Ty.VecWidth;
@@ -154,7 +161,7 @@ class VolatileType<Type _Ty> : Type<_Ty.Name, _Ty.QTName> {
// OpenCL image types (e.g. image2d).
class ImageType<Type _Ty, string _AccessQualifier> :
- Type<_Ty.Name, QualType<_Ty.QTName.Name#_AccessQualifier#"Ty", 0>> {
+ Type<_Ty.Name, QualType<_Ty.QTName.Name#_AccessQualifier#"Ty", 0>> {
let VecWidth = 0;
let AccessQualifier = _AccessQualifier;
// Inherited fields
@@ -165,8 +172,7 @@ class ImageType<Type _Ty, string _AccessQualifier> :
}
// List of Types.
-class TypeList<string _Name, list<Type> _Type> {
- string Name = _Name;
+class TypeList<list<Type> _Type> {
list<Type> List = _Type;
}
@@ -195,7 +201,7 @@ class TypeList<string _Name, list<Type> _Type> {
// A declaration f(GenT, SGenT) results in the combinations
// f(half, half), f(half2, half), f(int, int), f(int2, int) .
class GenericType<string _Ty, TypeList _TypeList, IntList _VectorList> :
- Type<_Ty, QualType<"null", 1>> {
+ Type<_Ty, QualType<"null", 1>> {
// Possible element types of the generic type.
TypeList TypeList = _TypeList;
// Possible vector sizes of the types in the TypeList.
@@ -259,8 +265,8 @@ def Half : Type<"half", QualType<"HalfTy">>;
def Size : Type<"size_t", QualType<"getSizeType()">>;
def PtrDiff : Type<"ptrdiff_t", QualType<"getPointerDiffType()">>;
def IntPtr : Type<"intptr_t", QualType<"getIntPtrType()">>;
-def UIntPtr : Type<"uintPtr_t", QualType<"getUIntPtrType()">>;
-def Void : Type<"void_t", QualType<"VoidTy">>;
+def UIntPtr : Type<"uintptr_t", QualType<"getUIntPtrType()">>;
+def Void : Type<"void", QualType<"VoidTy">>;
// OpenCL v1.0/1.2/2.0 s6.1.2: Built-in Vector Data Types.
// Built-in vector data types are created by TableGen's OpenCLBuiltinEmitter.
@@ -268,21 +274,36 @@ def Void : Type<"void_t", QualType<"VoidTy">>;
// OpenCL v1.0/1.2/2.0 s6.1.3: Other Built-in Data Types.
// The image definitions are "abstract". They should not be used without
// specifying an access qualifier (RO/WO/RW).
-def Image1d : Type<"Image1d", QualType<"OCLImage1d", 1>>;
-def Image2d : Type<"Image2d", QualType<"OCLImage2d", 1>>;
-def Image3d : Type<"Image3d", QualType<"OCLImage3d", 1>>;
-def Image1dArray : Type<"Image1dArray", QualType<"OCLImage1dArray", 1>>;
-def Image1dBuffer : Type<"Image1dBuffer", QualType<"OCLImage1dBuffer", 1>>;
-def Image2dArray : Type<"Image2dArray", QualType<"OCLImage2dArray", 1>>;
-def Image2dDepth : Type<"Image2dDepth", QualType<"OCLImage2dDepth", 1>>;
-def Image2dArrayDepth : Type<"Image2dArrayDepth", QualType<"OCLImage2dArrayDepth", 1>>;
-def Image2dMsaa : Type<"Image2dMsaa", QualType<"OCLImage2dMSAA", 1>>;
-def Image2dArrayMsaa : Type<"Image2dArrayMsaa", QualType<"OCLImage2dArrayMSAA", 1>>;
-def Image2dMsaaDepth : Type<"Image2dMsaaDepth", QualType<"OCLImage2dMSAADepth", 1>>;
-def Image2dArrayMsaaDepth : Type<"Image2dArrayMsaaDepth", QualType<"OCLImage2dArrayMSAADepth", 1>>;
-
-def Sampler : Type<"Sampler", QualType<"OCLSamplerTy">>;
-def Event : Type<"Event", QualType<"OCLEventTy">>;
+def Image1d : Type<"image1d_t", QualType<"OCLImage1d", 1>>;
+def Image2d : Type<"image2d_t", QualType<"OCLImage2d", 1>>;
+def Image3d : Type<"image3d_t", QualType<"OCLImage3d", 1>>;
+def Image1dArray : Type<"image1d_array_t", QualType<"OCLImage1dArray", 1>>;
+def Image1dBuffer : Type<"image1d_buffer_t", QualType<"OCLImage1dBuffer", 1>>;
+def Image2dArray : Type<"image2d_array_t", QualType<"OCLImage2dArray", 1>>;
+def Image2dDepth : Type<"image2d_depth_t", QualType<"OCLImage2dDepth", 1>>;
+def Image2dArrayDepth : Type<"image2d_array_depth_t", QualType<"OCLImage2dArrayDepth", 1>>;
+def Image2dMsaa : Type<"image2d_msaa_t", QualType<"OCLImage2dMSAA", 1>>;
+def Image2dArrayMsaa : Type<"image2d_array_msaa_t", QualType<"OCLImage2dArrayMSAA", 1>>;
+def Image2dMsaaDepth : Type<"image2d_msaa_depth_t", QualType<"OCLImage2dMSAADepth", 1>>;
+def Image2dArrayMsaaDepth : Type<"image2d_array_msaa_depth_t", QualType<"OCLImage2dArrayMSAADepth", 1>>;
+
+def Sampler : Type<"sampler_t", QualType<"OCLSamplerTy">>;
+def ClkEvent : Type<"clk_event_t", QualType<"OCLClkEventTy">>;
+def Event : Type<"event_t", QualType<"OCLEventTy">>;
+def Queue : Type<"queue_t", QualType<"OCLQueueTy">>;
+def ReserveId : Type<"reserve_id_t", QualType<"OCLReserveIDTy">>;
+
+// OpenCL v2.0 s6.13.11: Atomic integer and floating-point types.
+def AtomicInt : Type<"atomic_int", QualType<"getAtomicType(Context.IntTy)">>;
+def AtomicUInt : Type<"atomic_uint", QualType<"getAtomicType(Context.UnsignedIntTy)">>;
+def AtomicLong : Type<"atomic_long", QualType<"getAtomicType(Context.LongTy)">>;
+def AtomicULong : Type<"atomic_ulong", QualType<"getAtomicType(Context.UnsignedLongTy)">>;
+def AtomicFloat : Type<"atomic_float", QualType<"getAtomicType(Context.FloatTy)">>;
+def AtomicDouble : Type<"atomic_double", QualType<"getAtomicType(Context.DoubleTy)">>;
+def AtomicIntPtr : Type<"atomic_intptr_t", QualType<"getAtomicType(Context.getIntPtrType())">>;
+def AtomicUIntPtr : Type<"atomic_uintptr_t", QualType<"getAtomicType(Context.getUIntPtrType())">>;
+def AtomicSize : Type<"atomic_size_t", QualType<"getAtomicType(Context.getSizeType())">>;
+def AtomicPtrDiff : Type<"atomic_ptrdiff_t", QualType<"getAtomicType(Context.getPointerDiffType())">>;
//===----------------------------------------------------------------------===//
// Definitions of OpenCL gentype variants
@@ -305,20 +326,20 @@ def Vec16 : IntList<"Vec16", [16]>;
def Vec1234 : IntList<"Vec1234", [1, 2, 3, 4]>;
// Type lists.
-def TLAll : TypeList<"TLAll", [Char, UChar, Short, UShort, Int, UInt, Long, ULong, Float, Double, Half]>;
-def TLAllUnsigned : TypeList<"TLAllUnsigned", [UChar, UChar, UShort, UShort, UInt, UInt, ULong, ULong, UInt, ULong, UShort]>;
-def TLFloat : TypeList<"TLFloat", [Float, Double, Half]>;
-def TLSignedInts : TypeList<"TLSignedInts", [Char, Short, Int, Long]>;
-def TLUnsignedInts : TypeList<"TLUnsignedInts", [UChar, UShort, UInt, ULong]>;
+def TLAll : TypeList<[Char, UChar, Short, UShort, Int, UInt, Long, ULong, Float, Double, Half]>;
+def TLAllUnsigned : TypeList<[UChar, UChar, UShort, UShort, UInt, UInt, ULong, ULong, UInt, ULong, UShort]>;
+def TLFloat : TypeList<[Float, Double, Half]>;
+def TLSignedInts : TypeList<[Char, Short, Int, Long]>;
+def TLUnsignedInts : TypeList<[UChar, UShort, UInt, ULong]>;
-def TLIntLongFloats : TypeList<"TLIntLongFloats", [Int, UInt, Long, ULong, Float, Double, Half]>;
+def TLIntLongFloats : TypeList<[Int, UInt, Long, ULong, Float, Double, Half]>;
// All unsigned integer types twice, to facilitate unsigned return types for e.g.
// uchar abs(char) and
// uchar abs(uchar).
-def TLAllUIntsTwice : TypeList<"TLAllUIntsTwice", [UChar, UChar, UShort, UShort, UInt, UInt, ULong, ULong]>;
+def TLAllUIntsTwice : TypeList<[UChar, UChar, UShort, UShort, UInt, UInt, ULong, ULong]>;
-def TLAllInts : TypeList<"TLAllInts", [Char, UChar, Short, UShort, Int, UInt, Long, ULong]>;
+def TLAllInts : TypeList<[Char, UChar, Short, UShort, Int, UInt, Long, ULong]>;
// GenType definitions for multiple base types (e.g. all floating point types,
// or all integer types).
@@ -348,8 +369,7 @@ foreach Type = [Char, UChar, Short, UShort,
foreach VecSizes = [VecAndScalar, VecNoScalar] in {
def "GenType" # Type # VecSizes :
GenericType<"GenType" # Type # VecSizes,
- TypeList<"GL" # Type.Name, [Type]>,
- VecSizes>;
+ TypeList<[Type]>, VecSizes>;
}
}
@@ -357,8 +377,7 @@ foreach Type = [Char, UChar, Short, UShort,
foreach Type = [Float, Double, Half] in {
def "GenType" # Type # Vec1234 :
GenericType<"GenType" # Type # Vec1234,
- TypeList<"GL" # Type.Name, [Type]>,
- Vec1234>;
+ TypeList<[Type]>, Vec1234>;
}
@@ -374,7 +393,11 @@ foreach RType = [Float, Double, Half, Char, UChar, Short,
UShort, Int, UInt, Long, ULong] in {
foreach IType = [Float, Double, Half, Char, UChar, Short,
UShort, Int, UInt, Long, ULong] in {
- foreach sat = ["", "_sat"] in {
+ // Conversions to integer type have a sat and non-sat variant.
+ foreach sat = !cond(!eq(RType.Name, "float") : [""],
+ !eq(RType.Name, "double") : [""],
+ !eq(RType.Name, "half") : [""],
+ 1 : ["", "_sat"]) in {
foreach rnd = ["", "_rte", "_rtn", "_rtp", "_rtz"] in {
def : Builtin<"convert_" # RType.Name # sat # rnd, [RType, IType],
Attr.Const>;
@@ -667,7 +690,7 @@ foreach name = ["isfinite", "isinf", "isnan", "isnormal", "signbit"] in {
def : Builtin<name, [GenTypeShortVecNoScalar, GenTypeHalfVecNoScalar], Attr.Const>;
}
foreach name = ["any", "all"] in {
- def : Builtin<name, [Int, AIGenTypeN], Attr.Const>;
+ def : Builtin<name, [Int, SGenTypeN], Attr.Const>;
}
// --- 2 arguments ---
@@ -722,17 +745,17 @@ let MaxVersion = CL20 in {
def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
}
foreach name = ["vstore" # VSize] in {
- def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, AS>]>;
- def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, AS>]>;
- def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, AS>]>;
- def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, AS>]>;
- def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, AS>]>;
- def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, AS>]>;
- def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, AS>]>;
- def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, AS>]>;
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, AS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, AS>]>;
- def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<Char, AS>]>;
+ def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<UChar, AS>]>;
+ def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<Short, AS>]>;
+ def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<UShort, AS>]>;
+ def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<Int, AS>]>;
+ def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<UInt, AS>]>;
+ def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<Long, AS>]>;
+ def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ULong, AS>]>;
+ def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Float, AS>]>;
+ def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Double, AS>]>;
+ def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<Half, AS>]>;
}
foreach name = ["vloada_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
@@ -764,17 +787,17 @@ let MinVersion = CL20 in {
def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, GenericAS>]>;
}
foreach name = ["vstore" # VSize] in {
- def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<Char, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<UChar, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<Short, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<UShort, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<Int, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<UInt, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<Long, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ULong, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Float, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Double, GenericAS>]>;
+ def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<Half, GenericAS>]>;
}
foreach name = ["vloada_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, GenericAS>]>;
@@ -805,24 +828,21 @@ foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vloada_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, ConstantAS>]>;
}
- foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- foreach name = ["vstorea_half" # VSize # rnd] in {
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, ConstantAS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Half, ConstantAS>]>;
- }
- }
}
let MaxVersion = CL20 in {
foreach AS = [GlobalAS, LocalAS, PrivateAS] in {
def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<"vloada_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
}
}
foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- def : Builtin<"vstore_half" # rnd, [Void, Float, Size, PointerType<Half, AS>]>;
- def : Builtin<"vstore_half" # rnd, [Void, Double, Size, PointerType<Half, AS>]>;
+ foreach name = ["vstore_half" # rnd, "vstorea_half" # rnd] in {
+ def : Builtin<name, [Void, Float, Size, PointerType<Half, AS>]>;
+ def : Builtin<name, [Void, Double, Size, PointerType<Half, AS>]>;
+ }
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vstore_half" # VSize # rnd] in {
def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, AS>]>;
@@ -835,14 +855,17 @@ let MaxVersion = CL20 in {
let MinVersion = CL20 in {
foreach AS = [GenericAS] in {
def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<"vloada_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
}
}
foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- def : Builtin<"vstore_half" # rnd, [Void, Float, Size, PointerType<Half, AS>]>;
- def : Builtin<"vstore_half" # rnd, [Void, Double, Size, PointerType<Half, AS>]>;
+ foreach name = ["vstore_half" # rnd, "vstorea_half" # rnd] in {
+ def : Builtin<name, [Void, Float, Size, PointerType<Half, AS>]>;
+ def : Builtin<name, [Void, Double, Size, PointerType<Half, AS>]>;
+ }
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vstore_half" # VSize # rnd] in {
def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, AS>]>;
@@ -855,6 +878,7 @@ let MinVersion = CL20 in {
foreach AS = [ConstantAS] in {
def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<"vloada_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload_half" # VSize] in {
def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
@@ -976,6 +1000,45 @@ foreach AS = [GlobalAS, LocalAS] in {
}
}
}
+// OpenCL v2.0 s6.13.11 - Atomic Functions.
+let MinVersion = CL20 in {
+ foreach TypePair = [[AtomicInt, Int], [AtomicUInt, UInt],
+ [AtomicLong, Long], [AtomicULong, ULong],
+ [AtomicFloat, Float], [AtomicDouble, Double]] in {
+ def : Builtin<"atomic_init",
+ [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
+ def : Builtin<"atomic_store",
+ [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
+ def : Builtin<"atomic_load",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>]>;
+ def : Builtin<"atomic_exchange",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
+ foreach Variant = ["weak", "strong"] in {
+ def : Builtin<"atomic_compare_exchange_" # Variant,
+ [Bool, PointerType<VolatileType<TypePair[0]>, GenericAS>,
+ PointerType<TypePair[1], GenericAS>, TypePair[1]]>;
+ }
+ }
+
+ foreach TypePair = [[AtomicInt, Int, Int], [AtomicUInt, UInt, UInt],
+ [AtomicLong, Long, Long], [AtomicULong, ULong, ULong],
+ [AtomicIntPtr, IntPtr, PtrDiff],
+ [AtomicUIntPtr, UIntPtr, PtrDiff]] in {
+ foreach ModOp = ["add", "sub"] in {
+ def : Builtin<"atomic_fetch_" # ModOp,
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2]]>;
+ }
+ }
+ foreach TypePair = [[AtomicInt, Int, Int], [AtomicUInt, UInt, UInt],
+ [AtomicLong, Long, Long], [AtomicULong, ULong, ULong],
+ [AtomicIntPtr, IntPtr, IntPtr],
+ [AtomicUIntPtr, UIntPtr, UIntPtr]] in {
+ foreach ModOp = ["or", "xor", "and", "min", "max"] in {
+ def : Builtin<"atomic_fetch_" # ModOp,
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2]]>;
+ }
+ }
+}
//--------------------------------------------------------------------
// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
@@ -1172,14 +1235,43 @@ let MinVersion = CL20 in {
}
-// OpenCL v2.0 s9.17.3: Additions to section 6.13.1: Work-Item Functions
-let MinVersion = CL20 in {
- let Extension = FuncExtKhrSubgroups in {
- def get_sub_group_size : Builtin<"get_sub_group_size", [UInt]>;
- def get_max_sub_group_size : Builtin<"get_max_sub_group_size", [UInt]>;
- def get_num_sub_groups : Builtin<"get_num_sub_groups", [UInt]>;
- }
-}
+//--------------------------------------------------------------------
+// OpenCL2.0 : 6.13.16 : Pipe Functions
+// --- Table 27 ---
+// Defined in Builtins.def
+
+// --- Table 28 ---
+// Builtins taking pipe arguments are defined in Builtins.def
+def : Builtin<"is_valid_reserve_id", [Bool, ReserveId]>;
+
+// --- Table 29 ---
+// Defined in Builtins.def
+
+
+//--------------------------------------------------------------------
+// OpenCL2.0 : 6.13.17 : Enqueuing Kernels
+// --- Table 30 ---
+// Defined in Builtins.def
+
+// --- Table 32 ---
+// Defined in Builtins.def
+
+// --- Table 33 ---
+def : Builtin<"enqueue_marker",
+ [Int, Queue, UInt, PointerType<ConstType<ClkEvent>, GenericAS>, PointerType<ClkEvent, GenericAS>]>;
+
+// --- Table 34 ---
+def : Builtin<"retain_event", [Void, ClkEvent]>;
+def : Builtin<"release_event", [Void, ClkEvent]>;
+def : Builtin<"create_user_event", [ClkEvent]>;
+def : Builtin<"is_valid_event", [Bool, ClkEvent]>;
+def : Builtin<"set_user_event_status", [Void, ClkEvent, Int]>;
+// TODO: capture_event_profiling_info
+
+// --- Table 35 ---
+def : Builtin<"get_default_queue", [Queue]>;
+// TODO: ndrange functions
+
//--------------------------------------------------------------------
// End of the builtin functions defined in the OpenCL C specification.
@@ -1274,6 +1366,16 @@ let Extension = FuncExtKhrMipmapImage in {
}
}
}
+ // Added to section 6.13.14.5
+ foreach aQual = ["RO", "WO", "RW"] in {
+ foreach imgTy = [Image1d, Image2d, Image3d, Image1dArray, Image2dArray, Image2dDepth, Image2dArrayDepth] in {
+ def : Builtin<"get_image_num_mip_levels", [Int, ImageType<imgTy, aQual>]>;
+ }
+ }
+}
+
+// Write functions are enabled using a separate extension.
+let Extension = FuncExtKhrMipmapImageWrites in {
// Added to section 6.13.14.4.
foreach aQual = ["WO"] in {
foreach imgTy = [Image2d] in {
@@ -1298,7 +1400,7 @@ let Extension = FuncExtKhrMipmapImage in {
def : Builtin<"write_imageui", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<UInt, 4>]>;
}
def : Builtin<"write_imagef", [Void, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>, Int, Float]>;
- let Extension = FuncExtKhrMipmapAndWrite3d in {
+ let Extension = FuncExtKhrMipmapWritesAndWrite3d in {
foreach imgTy = [Image3d] in {
def : Builtin<"write_imagef", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Float, 4>]>;
def : Builtin<"write_imagei", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Int, 4>]>;
@@ -1306,15 +1408,8 @@ let Extension = FuncExtKhrMipmapImage in {
}
}
}
- // Added to section 6.13.14.5
- foreach aQual = ["RO", "WO", "RW"] in {
- foreach imgTy = [Image1d, Image2d, Image3d, Image1dArray, Image2dArray, Image2dDepth, Image2dArrayDepth] in {
- def : Builtin<"get_image_num_mip_levels", [Int, ImageType<imgTy, aQual>]>;
- }
- }
}
-
//--------------------------------------------------------------------
// OpenCL Extension v2.0 s18.3 - Creating OpenCL Memory Objects from OpenGL MSAA Textures
let Extension = FuncExtKhrGlMsaaSharing in {
@@ -1346,6 +1441,70 @@ let Extension = FuncExtKhrGlMsaaSharing in {
}
def : Builtin<"get_image_dim", [VectorType<Int, 2>, ImageType<imgTy, aQual>], Attr.Const>;
}
- def : Builtin<"get_image_array_size", [Size, ImageType<Image2dArrayMsaaDepth, aQual>], Attr.Const>;
+ foreach imgTy = [Image2dArrayMsaa, Image2dArrayMsaaDepth] in {
+ def : Builtin<"get_image_array_size", [Size, ImageType<imgTy, aQual>], Attr.Const>;
+ }
+ }
+}
+
+//--------------------------------------------------------------------
+// OpenCL Extension v2.0 s28 - Subgroups
+// --- Table 28.2.1 ---
+let Extension = FuncExtKhrSubgroups in {
+ foreach name = ["get_sub_group_size", "get_max_sub_group_size",
+ "get_num_sub_groups", "get_sub_group_id",
+ "get_sub_group_local_id"] in {
+ def : Builtin<name, [UInt]>;
+ }
+ let MinVersion = CL20 in {
+ foreach name = ["get_enqueued_num_sub_groups"] in {
+ def : Builtin<name, [UInt]>;
+ }
+ }
+}
+
+// --- Table 28.2.2 ---
+// TODO: sub_group_barrier
+
+// --- Table 28.2.4 ---
+let Extension = FuncExtKhrSubgroups in {
+ foreach name = ["sub_group_all", "sub_group_any"] in {
+ def : Builtin<name, [Int, Int], Attr.Convergent>;
+ }
+ foreach name = ["sub_group_broadcast"] in {
+ def : Builtin<name, [IntLongFloatGenType1, IntLongFloatGenType1, UInt], Attr.Convergent>;
+ }
+ foreach name = ["sub_group_reduce_", "sub_group_scan_exclusive_",
+ "sub_group_scan_inclusive_"] in {
+ foreach op = ["add", "min", "max"] in {
+ def : Builtin<name # op, [IntLongFloatGenType1, IntLongFloatGenType1], Attr.Convergent>;
+ }
+ }
+}
+
+//--------------------------------------------------------------------
+// Arm extensions.
+let Extension = ArmIntegerDotProductInt8 in {
+ foreach name = ["arm_dot"] in {
+ def : Builtin<name, [UInt, VectorType<UChar, 4>, VectorType<UChar, 4>]>;
+ def : Builtin<name, [Int, VectorType<Char, 4>, VectorType<Char, 4>]>;
+ }
+}
+let Extension = ArmIntegerDotProductAccumulateInt8 in {
+ foreach name = ["arm_dot_acc"] in {
+ def : Builtin<name, [UInt, VectorType<UChar, 4>, VectorType<UChar, 4>, UInt]>;
+ def : Builtin<name, [Int, VectorType<Char, 4>, VectorType<Char, 4>, Int]>;
+ }
+}
+let Extension = ArmIntegerDotProductAccumulateInt16 in {
+ foreach name = ["arm_dot_acc"] in {
+ def : Builtin<name, [UInt, VectorType<UShort, 2>, VectorType<UShort, 2>, UInt]>;
+ def : Builtin<name, [Int, VectorType<Short, 2>, VectorType<Short, 2>, Int]>;
+ }
+}
+let Extension = ArmIntegerDotProductAccumulateSaturateInt8 in {
+ foreach name = ["arm_dot_acc_sat"] in {
+ def : Builtin<name, [UInt, VectorType<UChar, 4>, VectorType<UChar, 4>, UInt]>;
+ def : Builtin<name, [Int, VectorType<Char, 4>, VectorType<Char, 4>, Int]>;
}
}
diff --git a/clang/lib/Sema/ParsedAttr.cpp b/clang/lib/Sema/ParsedAttr.cpp
index 5d0a734f237a..3ef8498baffd 100644
--- a/clang/lib/Sema/ParsedAttr.cpp
+++ b/clang/lib/Sema/ParsedAttr.cpp
@@ -19,12 +19,15 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ManagedStatic.h"
#include <cassert>
#include <cstddef>
#include <utility>
using namespace clang;
+LLVM_INSTANTIATE_REGISTRY(ParsedAttrInfoRegistry)
+
IdentifierLoc *IdentifierLoc::create(ASTContext &Ctx, SourceLocation Loc,
IdentifierInfo *Ident) {
IdentifierLoc *Result = new (Ctx) IdentifierLoc;
@@ -100,47 +103,60 @@ void AttributePool::takePool(AttributePool &pool) {
pool.Attrs.clear();
}
-struct ParsedAttrInfo {
- unsigned NumArgs : 4;
- unsigned OptArgs : 4;
- unsigned HasCustomParsing : 1;
- unsigned IsTargetSpecific : 1;
- unsigned IsType : 1;
- unsigned IsStmt : 1;
- unsigned IsKnownToGCC : 1;
- unsigned IsSupportedByPragmaAttribute : 1;
-
- bool (*DiagAppertainsToDecl)(Sema &S, const ParsedAttr &Attr, const Decl *);
- bool (*DiagLangOpts)(Sema &S, const ParsedAttr &Attr);
- bool (*ExistsInTarget)(const TargetInfo &Target);
- unsigned (*SpellingIndexToSemanticSpelling)(const ParsedAttr &Attr);
- void (*GetPragmaAttributeMatchRules)(
- llvm::SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &Rules,
- const LangOptions &LangOpts);
-};
-
namespace {
#include "clang/Sema/AttrParsedAttrImpl.inc"
} // namespace
-static const ParsedAttrInfo &getInfo(const ParsedAttr &A) {
- return AttrInfoMap[A.getKind()];
+const ParsedAttrInfo &ParsedAttrInfo::get(const AttributeCommonInfo &A) {
+ // If we have a ParsedAttrInfo for this ParsedAttr then return that.
+ if ((size_t)A.getParsedKind() < llvm::array_lengthof(AttrInfoMap))
+ return *AttrInfoMap[A.getParsedKind()];
+
+ // If this is an ignored attribute then return an appropriate ParsedAttrInfo.
+ static const ParsedAttrInfo IgnoredParsedAttrInfo(
+ AttributeCommonInfo::IgnoredAttribute);
+ if (A.getParsedKind() == AttributeCommonInfo::IgnoredAttribute)
+ return IgnoredParsedAttrInfo;
+
+ // Otherwise this may be an attribute defined by a plugin. First instantiate
+ // all plugin attributes if we haven't already done so.
+ static llvm::ManagedStatic<std::list<std::unique_ptr<ParsedAttrInfo>>>
+ PluginAttrInstances;
+ if (PluginAttrInstances->empty())
+ for (auto It : ParsedAttrInfoRegistry::entries())
+ PluginAttrInstances->emplace_back(It.instantiate());
+
+ // Search for a ParsedAttrInfo whose name and syntax match.
+ std::string FullName = A.getNormalizedFullName();
+ AttributeCommonInfo::Syntax SyntaxUsed = A.getSyntax();
+ if (SyntaxUsed == AttributeCommonInfo::AS_ContextSensitiveKeyword)
+ SyntaxUsed = AttributeCommonInfo::AS_Keyword;
+
+ for (auto &Ptr : *PluginAttrInstances)
+ for (auto &S : Ptr->Spellings)
+ if (S.Syntax == SyntaxUsed && S.NormalizedFullName == FullName)
+ return *Ptr;
+
+ // If we failed to find a match then return a default ParsedAttrInfo.
+ static const ParsedAttrInfo DefaultParsedAttrInfo(
+ AttributeCommonInfo::UnknownAttribute);
+ return DefaultParsedAttrInfo;
}
-unsigned ParsedAttr::getMinArgs() const { return getInfo(*this).NumArgs; }
+unsigned ParsedAttr::getMinArgs() const { return getInfo().NumArgs; }
unsigned ParsedAttr::getMaxArgs() const {
- return getMinArgs() + getInfo(*this).OptArgs;
+ return getMinArgs() + getInfo().OptArgs;
}
bool ParsedAttr::hasCustomParsing() const {
- return getInfo(*this).HasCustomParsing;
+ return getInfo().HasCustomParsing;
}
bool ParsedAttr::diagnoseAppertainsTo(Sema &S, const Decl *D) const {
- return getInfo(*this).DiagAppertainsToDecl(S, *this, D);
+ return getInfo().diagAppertainsToDecl(S, *this, D);
}
bool ParsedAttr::appliesToDecl(const Decl *D,
@@ -152,33 +168,33 @@ void ParsedAttr::getMatchRules(
const LangOptions &LangOpts,
SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &MatchRules)
const {
- return getInfo(*this).GetPragmaAttributeMatchRules(MatchRules, LangOpts);
+ return getInfo().getPragmaAttributeMatchRules(MatchRules, LangOpts);
}
bool ParsedAttr::diagnoseLangOpts(Sema &S) const {
- return getInfo(*this).DiagLangOpts(S, *this);
+ return getInfo().diagLangOpts(S, *this);
}
bool ParsedAttr::isTargetSpecificAttr() const {
- return getInfo(*this).IsTargetSpecific;
+ return getInfo().IsTargetSpecific;
}
-bool ParsedAttr::isTypeAttr() const { return getInfo(*this).IsType; }
+bool ParsedAttr::isTypeAttr() const { return getInfo().IsType; }
-bool ParsedAttr::isStmtAttr() const { return getInfo(*this).IsStmt; }
+bool ParsedAttr::isStmtAttr() const { return getInfo().IsStmt; }
bool ParsedAttr::existsInTarget(const TargetInfo &Target) const {
- return getInfo(*this).ExistsInTarget(Target);
+ return getInfo().existsInTarget(Target);
}
-bool ParsedAttr::isKnownToGCC() const { return getInfo(*this).IsKnownToGCC; }
+bool ParsedAttr::isKnownToGCC() const { return getInfo().IsKnownToGCC; }
bool ParsedAttr::isSupportedByPragmaAttribute() const {
- return getInfo(*this).IsSupportedByPragmaAttribute;
+ return getInfo().IsSupportedByPragmaAttribute;
}
unsigned ParsedAttr::getSemanticSpelling() const {
- return getInfo(*this).SpellingIndexToSemanticSpelling(*this);
+ return getInfo().spellingIndexToSemanticSpelling(*this);
}
bool ParsedAttr::hasVariadicArg() const {
@@ -186,5 +202,5 @@ bool ParsedAttr::hasVariadicArg() const {
// claim that as being variadic. If we someday get an attribute that
// legitimately bumps up against that maximum, we can use another bit to track
// whether it's truly variadic or not.
- return getInfo(*this).OptArgs == 15;
+ return getInfo().OptArgs == 15;
}
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index 2cd158a8b43c..2f2b52106f3d 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "UsedDeclVisitor.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/DeclCXX.h"
@@ -22,6 +23,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/HeaderSearch.h"
@@ -52,6 +54,21 @@ SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
+IdentifierInfo *
+Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
+ unsigned int Index) {
+ std::string InventedName;
+ llvm::raw_string_ostream OS(InventedName);
+
+ if (!ParamName)
+ OS << "auto:" << Index + 1;
+ else
+ OS << ParamName->getName() << ":auto";
+
+ OS.flush();
+ return &Context.Idents.get(OS.str());
+}
+
PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
const Preprocessor &PP) {
PrintingPolicy Policy = Context.getPrintingPolicy();
@@ -127,10 +144,13 @@ public:
} // end namespace sema
} // end namespace clang
+const unsigned Sema::MaxAlignmentExponent;
+const unsigned Sema::MaximumAlignment;
+
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
: ExternalSource(nullptr), isMultiplexExternalSource(false),
- FPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
+ CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
SourceMgr(PP.getSourceManager()), CollectStats(false),
CodeCompleter(CodeCompleter), CurContext(nullptr),
@@ -139,8 +159,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
LangOpts.getMSPointerToMemberRepresentationMethod()),
VtorDispStack(LangOpts.getVtorDispMode()), PackStack(0),
DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
- CodeSegStack(nullptr), CurInitSeg(nullptr), VisContext(nullptr),
- PragmaAttributeCurrentTargetDecl(nullptr),
+ CodeSegStack(nullptr), FpPragmaStack(0xffffffff), CurInitSeg(nullptr),
+ VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr),
LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
@@ -153,10 +173,10 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TUKind(TUKind), NumSFINAEErrors(0),
FullyCheckedComparisonCategories(
static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
- AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
- NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
- CurrentInstantiationScope(nullptr), DisableTypoCorrection(false),
- TyposCorrected(0), AnalysisWarnings(*this),
+ SatisfactionCache(Context), AccessCheckingSFINAE(false),
+ InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
+ ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
+ DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) {
TUScope = nullptr;
@@ -379,6 +399,14 @@ Sema::~Sema() {
if (isMultiplexExternalSource)
delete ExternalSource;
+ // Delete cached satisfactions.
+ std::vector<ConstraintSatisfaction *> Satisfactions;
+ Satisfactions.reserve(Satisfactions.size());
+ for (auto &Node : SatisfactionCache)
+ Satisfactions.push_back(&Node);
+ for (auto *Node : Satisfactions)
+ delete Node;
+
threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
// Destroys data sharing attributes stack for OpenMP
@@ -928,9 +956,7 @@ void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
PerformPendingInstantiations();
}
- // Finalize analysis of OpenMP-specific constructs.
- if (LangOpts.OpenMP)
- finalizeOpenMPDelayedAnalysis();
+ emitDeferredDiags();
assert(LateParsedInstantiations.empty() &&
"end of TU template instantiation should not create more "
@@ -983,6 +1009,11 @@ void Sema::ActOnEndOfTranslationUnit() {
LateParsedInstantiations.begin(),
LateParsedInstantiations.end());
LateParsedInstantiations.clear();
+
+ if (LangOpts.PCHInstantiateTemplates) {
+ llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
+ PerformPendingInstantiations();
+ }
}
DiagnoseUnterminatedPragmaPack();
@@ -1261,7 +1292,8 @@ DeclContext *Sema::getFunctionLevelDeclContext() {
DeclContext *DC = CurContext;
while (true) {
- if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC)) {
+ if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
+ isa<RequiresExprBodyDecl>(DC)) {
DC = DC->getParent();
} else if (isa<CXXMethodDecl>(DC) &&
cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
@@ -1413,38 +1445,184 @@ Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
static void emitCallStackNotes(Sema &S, FunctionDecl *FD) {
auto FnIt = S.DeviceKnownEmittedFns.find(FD);
while (FnIt != S.DeviceKnownEmittedFns.end()) {
+ // Respect error limit.
+ if (S.Diags.hasFatalErrorOccurred())
+ return;
DiagnosticBuilder Builder(
S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
Builder << FnIt->second.FD;
- Builder.setForceEmit();
-
FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD);
}
}
-// Emit any deferred diagnostics for FD and erase them from the map in which
-// they're stored.
-static void emitDeferredDiags(Sema &S, FunctionDecl *FD, bool ShowCallStack) {
- auto It = S.DeviceDeferredDiags.find(FD);
- if (It == S.DeviceDeferredDiags.end())
- return;
- bool HasWarningOrError = false;
- for (PartialDiagnosticAt &PDAt : It->second) {
- const SourceLocation &Loc = PDAt.first;
- const PartialDiagnostic &PD = PDAt.second;
- HasWarningOrError |= S.getDiagnostics().getDiagnosticLevel(
- PD.getDiagID(), Loc) >= DiagnosticsEngine::Warning;
- DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
- Builder.setForceEmit();
- PD.Emit(Builder);
+namespace {
+
+/// Helper class that emits deferred diagnostic messages if an entity directly
+/// or indirectly using the function that causes the deferred diagnostic
+/// messages is known to be emitted.
+///
+/// During parsing of AST, certain diagnostic messages are recorded as deferred
+/// diagnostics since it is unknown whether the functions containing such
+/// diagnostics will be emitted. A list of potentially emitted functions and
+/// variables that may potentially trigger emission of functions are also
+/// recorded. DeferredDiagnosticsEmitter recursively visits used functions
+/// by each function to emit deferred diagnostics.
+///
+/// During the visit, certain OpenMP directives or initializer of variables
+/// with certain OpenMP attributes will cause subsequent visiting of any
+/// functions enter a state which is called OpenMP device context in this
+/// implementation. The state is exited when the directive or initializer is
+/// exited. This state can change the emission states of subsequent uses
+/// of functions.
+///
+/// Conceptually the functions or variables to be visited form a use graph
+/// where the parent node uses the child node. At any point of the visit,
+/// the tree nodes traversed from the tree root to the current node form a use
+/// stack. The emission state of the current node depends on two factors:
+/// 1. the emission state of the root node
+/// 2. whether the current node is in OpenMP device context
+/// If the function is decided to be emitted, its contained deferred diagnostics
+/// are emitted, together with the information about the use stack.
+///
+class DeferredDiagnosticsEmitter
+ : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
+public:
+ typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
+
+ // Whether the function is already in the current use-path.
+ llvm::SmallSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
+
+ // The current use-path.
+ llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
+
+ // Whether the visiting of the function has been done. Done[0] is for the
+ // case not in OpenMP device context. Done[1] is for the case in OpenMP
+ // device context. We need two sets because diagnostics emission may be
+ // different depending on whether it is in OpenMP device context.
+ llvm::SmallSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
+
+ // Emission state of the root node of the current use graph.
+ bool ShouldEmitRootNode;
+
+ // Current OpenMP device context level. It is initialized to 0 and each
+ // entering of device context increases it by 1 and each exit decreases
+ // it by 1. Non-zero value indicates it is currently in device context.
+ unsigned InOMPDeviceContext;
+
+ DeferredDiagnosticsEmitter(Sema &S)
+ : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
+
+ void VisitOMPTargetDirective(OMPTargetDirective *Node) {
+ ++InOMPDeviceContext;
+ Inherited::VisitOMPTargetDirective(Node);
+ --InOMPDeviceContext;
+ }
+
+ void visitUsedDecl(SourceLocation Loc, Decl *D) {
+ if (isa<VarDecl>(D))
+ return;
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ checkFunc(Loc, FD);
+ else
+ Inherited::visitUsedDecl(Loc, D);
+ }
+
+ void checkVar(VarDecl *VD) {
+ assert(VD->isFileVarDecl() &&
+ "Should only check file-scope variables");
+ if (auto *Init = VD->getInit()) {
+ auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
+ bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
+ if (IsDev)
+ ++InOMPDeviceContext;
+ this->Visit(Init);
+ if (IsDev)
+ --InOMPDeviceContext;
+ }
+ }
+
+ void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
+ auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
+ FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
+ if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
+ S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
+ return;
+ // Finalize analysis of OpenMP-specific constructs.
+ if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1)
+ S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
+ if (Caller)
+ S.DeviceKnownEmittedFns[FD] = {Caller, Loc};
+ // Always emit deferred diagnostics for the direct users. This does not
+ // lead to explosion of diagnostics since each user is visited at most
+ // twice.
+ if (ShouldEmitRootNode || InOMPDeviceContext)
+ emitDeferredDiags(FD, Caller);
+ // Do not revisit a function if the function body has been completely
+ // visited before.
+ if (!Done.insert(FD).second)
+ return;
+ InUsePath.insert(FD);
+ UsePath.push_back(FD);
+ if (auto *S = FD->getBody()) {
+ this->Visit(S);
+ }
+ UsePath.pop_back();
+ InUsePath.erase(FD);
+ }
+
+ void checkRecordedDecl(Decl *D) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) ==
+ Sema::FunctionEmissionStatus::Emitted;
+ checkFunc(SourceLocation(), FD);
+ } else
+ checkVar(cast<VarDecl>(D));
+ }
+
+ // Emit any deferred diagnostics for FD
+ void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
+ auto It = S.DeviceDeferredDiags.find(FD);
+ if (It == S.DeviceDeferredDiags.end())
+ return;
+ bool HasWarningOrError = false;
+ bool FirstDiag = true;
+ for (PartialDiagnosticAt &PDAt : It->second) {
+ // Respect error limit.
+ if (S.Diags.hasFatalErrorOccurred())
+ return;
+ const SourceLocation &Loc = PDAt.first;
+ const PartialDiagnostic &PD = PDAt.second;
+ HasWarningOrError |=
+ S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >=
+ DiagnosticsEngine::Warning;
+ {
+ DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
+ PD.Emit(Builder);
+ }
+ // Emit the note on the first diagnostic in case too many diagnostics
+ // cause the note not emitted.
+ if (FirstDiag && HasWarningOrError && ShowCallStack) {
+ emitCallStackNotes(S, FD);
+ FirstDiag = false;
+ }
+ }
}
- S.DeviceDeferredDiags.erase(It);
+};
+} // namespace
+
+void Sema::emitDeferredDiags() {
+ if (ExternalSource)
+ ExternalSource->ReadDeclsToCheckForDeferredDiags(
+ DeclsToCheckForDeferredDiags);
+
+ if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
+ DeclsToCheckForDeferredDiags.empty())
+ return;
- // FIXME: Should this be called after every warning/error emitted in the loop
- // above, instead of just once per function? That would be consistent with
- // how we handle immediate errors, but it also seems like a bit much.
- if (HasWarningOrError && ShowCallStack)
- emitCallStackNotes(S, FD);
+ DeferredDiagnosticsEmitter DDE(*this);
+ for (auto D : DeclsToCheckForDeferredDiags)
+ DDE.checkRecordedDecl(D);
}
// In CUDA, there are some constructs which may appear in semantically-valid
@@ -1517,71 +1695,6 @@ Sema::DeviceDiagBuilder::~DeviceDiagBuilder() {
}
}
-// Indicate that this function (and thus everything it transtively calls) will
-// be codegen'ed, and emit any deferred diagnostics on this function and its
-// (transitive) callees.
-void Sema::markKnownEmitted(
- Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
- SourceLocation OrigLoc,
- const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted) {
- // Nothing to do if we already know that FD is emitted.
- if (IsKnownEmitted(S, OrigCallee)) {
- assert(!S.DeviceCallGraph.count(OrigCallee));
- return;
- }
-
- // We've just discovered that OrigCallee is known-emitted. Walk our call
- // graph to see what else we can now discover also must be emitted.
-
- struct CallInfo {
- FunctionDecl *Caller;
- FunctionDecl *Callee;
- SourceLocation Loc;
- };
- llvm::SmallVector<CallInfo, 4> Worklist = {{OrigCaller, OrigCallee, OrigLoc}};
- llvm::SmallSet<CanonicalDeclPtr<FunctionDecl>, 4> Seen;
- Seen.insert(OrigCallee);
- while (!Worklist.empty()) {
- CallInfo C = Worklist.pop_back_val();
- assert(!IsKnownEmitted(S, C.Callee) &&
- "Worklist should not contain known-emitted functions.");
- S.DeviceKnownEmittedFns[C.Callee] = {C.Caller, C.Loc};
- emitDeferredDiags(S, C.Callee, C.Caller);
-
- // If this is a template instantiation, explore its callgraph as well:
- // Non-dependent calls are part of the template's callgraph, while dependent
- // calls are part of to the instantiation's call graph.
- if (auto *Templ = C.Callee->getPrimaryTemplate()) {
- FunctionDecl *TemplFD = Templ->getAsFunction();
- if (!Seen.count(TemplFD) && !S.DeviceKnownEmittedFns.count(TemplFD)) {
- Seen.insert(TemplFD);
- Worklist.push_back(
- {/* Caller = */ C.Caller, /* Callee = */ TemplFD, C.Loc});
- }
- }
-
- // Add all functions called by Callee to our worklist.
- auto CGIt = S.DeviceCallGraph.find(C.Callee);
- if (CGIt == S.DeviceCallGraph.end())
- continue;
-
- for (std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation> FDLoc :
- CGIt->second) {
- FunctionDecl *NewCallee = FDLoc.first;
- SourceLocation CallLoc = FDLoc.second;
- if (Seen.count(NewCallee) || IsKnownEmitted(S, NewCallee))
- continue;
- Seen.insert(NewCallee);
- Worklist.push_back(
- {/* Caller = */ C.Callee, /* Callee = */ NewCallee, CallLoc});
- }
-
- // C.Callee is now known-emitted, so we no longer need to maintain its list
- // of callees in DeviceCallGraph.
- S.DeviceCallGraph.erase(CGIt);
- }
-}
-
Sema::DeviceDiagBuilder Sema::targetDiag(SourceLocation Loc, unsigned DiagID) {
if (LangOpts.OpenMP)
return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID)
@@ -1589,10 +1702,59 @@ Sema::DeviceDiagBuilder Sema::targetDiag(SourceLocation Loc, unsigned DiagID) {
if (getLangOpts().CUDA)
return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
: CUDADiagIfHostCode(Loc, DiagID);
+
+ if (getLangOpts().SYCLIsDevice)
+ return SYCLDiagIfDeviceCode(Loc, DiagID);
+
return DeviceDiagBuilder(DeviceDiagBuilder::K_Immediate, Loc, DiagID,
getCurFunctionDecl(), *this);
}
+void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) {
+ if (isUnevaluatedContext())
+ return;
+
+ Decl *C = cast<Decl>(getCurLexicalContext());
+
+ // Memcpy operations for structs containing a member with unsupported type
+ // are ok, though.
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) {
+ if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
+ MD->isTrivial())
+ return;
+
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD))
+ if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
+ return;
+ }
+
+ auto CheckType = [&](QualType Ty) {
+ if (Ty->isDependentType())
+ return;
+
+ if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
+ ((Ty->isFloat128Type() ||
+ (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
+ !Context.getTargetInfo().hasFloat128Type()) ||
+ (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
+ !Context.getTargetInfo().hasInt128Type())) {
+ targetDiag(Loc, diag::err_device_unsupported_type)
+ << D << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
+ << Context.getTargetInfo().getTriple().str();
+ targetDiag(D->getLocation(), diag::note_defined_here) << D;
+ }
+ };
+
+ QualType Ty = D->getType();
+ CheckType(Ty);
+
+ if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
+ for (const auto &ParamTy : FPTy->param_types())
+ CheckType(ParamTy);
+ CheckType(FPTy->getReturnType());
+ }
+}
+
/// Looks through the macro-expansion chain for the given
/// location, looking for a macro expansion with the given name.
/// If one is found, returns true and sets the location to that
@@ -1794,7 +1956,7 @@ void Sema::PopCompoundScope() {
/// Determine whether any errors occurred within this function/method/
/// block.
bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
- return getCurFunction()->ErrorTrap.hasUnrecoverableErrorOccurred();
+ return getCurFunction()->hasUnrecoverableErrorOccurred();
}
void Sema::setFunctionHasBranchIntoScope() {
@@ -2246,16 +2408,8 @@ std::string Sema::getOpenCLExtensionsFromTypeExtMap(FunctionType *FT) {
template <typename T, typename MapT>
std::string Sema::getOpenCLExtensionsFromExtMap(T *FDT, MapT &Map) {
- std::string ExtensionNames = "";
auto Loc = Map.find(FDT);
-
- for (auto const& I : Loc->second) {
- ExtensionNames += I;
- ExtensionNames += " ";
- }
- ExtensionNames.pop_back();
-
- return ExtensionNames;
+ return llvm::join(Loc->second, " ");
}
bool Sema::isOpenCLDisabledDecl(Decl *FD) {
diff --git a/clang/lib/Sema/SemaAttr.cpp b/clang/lib/Sema/SemaAttr.cpp
index cd2a65276b09..b354e810974c 100644
--- a/clang/lib/Sema/SemaAttr.cpp
+++ b/clang/lib/Sema/SemaAttr.cpp
@@ -256,12 +256,15 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName) {
PragmaClangSection *CSec;
+ int SectionFlags = ASTContext::PSF_Read;
switch (SecKind) {
case PragmaClangSectionKind::PCSK_BSS:
CSec = &PragmaClangBSSSection;
+ SectionFlags |= ASTContext::PSF_Write | ASTContext::PSF_ZeroInit;
break;
case PragmaClangSectionKind::PCSK_Data:
CSec = &PragmaClangDataSection;
+ SectionFlags |= ASTContext::PSF_Write;
break;
case PragmaClangSectionKind::PCSK_Rodata:
CSec = &PragmaClangRodataSection;
@@ -271,6 +274,7 @@ void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionA
break;
case PragmaClangSectionKind::PCSK_Text:
CSec = &PragmaClangTextSection;
+ SectionFlags |= ASTContext::PSF_Execute;
break;
default:
llvm_unreachable("invalid clang section kind");
@@ -281,8 +285,11 @@ void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionA
return;
}
+ if (UnifySection(SecName, SectionFlags, PragmaLoc))
+ return;
+
CSec->Valid = true;
- CSec->SectionName = SecName;
+ CSec->SectionName = std::string(SecName);
CSec->PragmaLocation = PragmaLoc;
}
@@ -407,6 +414,70 @@ void Sema::ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
Consumer.HandleTopLevelDecl(DeclGroupRef(PDMD));
}
+void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
+ PragmaMsStackAction Action,
+ PragmaFloatControlKind Value) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ if ((Action == PSK_Push_Set || Action == PSK_Push || Action == PSK_Pop) &&
+ !(CurContext->isTranslationUnit()) && !CurContext->isNamespace()) {
+ // Push and pop can only occur at file or namespace scope.
+ Diag(Loc, diag::err_pragma_fc_pp_scope);
+ return;
+ }
+ switch (Value) {
+ default:
+ llvm_unreachable("invalid pragma float_control kind");
+ case PFC_Precise:
+ NewFPFeatures.setFPPreciseEnabled(true);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ break;
+ case PFC_NoPrecise:
+ if (CurFPFeatures.getFPExceptionMode() == LangOptions::FPE_Strict)
+ Diag(Loc, diag::err_pragma_fc_noprecise_requires_noexcept);
+ else if (CurFPFeatures.getAllowFEnvAccess())
+ Diag(Loc, diag::err_pragma_fc_noprecise_requires_nofenv);
+ else
+ NewFPFeatures.setFPPreciseEnabled(false);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ break;
+ case PFC_Except:
+ if (!isPreciseFPEnabled())
+ Diag(Loc, diag::err_pragma_fc_except_requires_precise);
+ else
+ NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Strict);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ break;
+ case PFC_NoExcept:
+ NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Ignore);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewValue);
+ break;
+ case PFC_Push:
+ FpPragmaStack.Act(Loc, Sema::PSK_Push_Set, StringRef(),
+ NewFPFeatures.getAsOpaqueInt());
+ break;
+ case PFC_Pop:
+ if (FpPragmaStack.Stack.empty()) {
+ Diag(Loc, diag::warn_pragma_pop_failed) << "float_control"
+ << "stack empty";
+ return;
+ }
+ FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures.getAsOpaqueInt());
+ NewValue = FpPragmaStack.CurrentValue;
+ break;
+ }
+ FPOptionsOverride NewOverrides;
+ if (NewValue != FpPragmaStack.DefaultValue)
+ NewOverrides.getFromOpaqueInt(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+}
+
void Sema::ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind RepresentationMethod,
SourceLocation PragmaLoc) {
@@ -423,83 +494,52 @@ void Sema::ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
VtorDispStack.Act(PragmaLoc, Action, StringRef(), Mode);
}
-template<typename ValueType>
-void Sema::PragmaStack<ValueType>::Act(SourceLocation PragmaLocation,
- PragmaMsStackAction Action,
- llvm::StringRef StackSlotLabel,
- ValueType Value) {
- if (Action == PSK_Reset) {
- CurrentValue = DefaultValue;
- CurrentPragmaLocation = PragmaLocation;
- return;
- }
- if (Action & PSK_Push)
- Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
- PragmaLocation);
- else if (Action & PSK_Pop) {
- if (!StackSlotLabel.empty()) {
- // If we've got a label, try to find it and jump there.
- auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
- return x.StackSlotLabel == StackSlotLabel;
- });
- // If we found the label so pop from there.
- if (I != Stack.rend()) {
- CurrentValue = I->Value;
- CurrentPragmaLocation = I->PragmaLocation;
- Stack.erase(std::prev(I.base()), Stack.end());
- }
- } else if (!Stack.empty()) {
- // We do not have a label, just pop the last entry.
- CurrentValue = Stack.back().Value;
- CurrentPragmaLocation = Stack.back().PragmaLocation;
- Stack.pop_back();
- }
- }
- if (Action & PSK_Set) {
- CurrentValue = Value;
- CurrentPragmaLocation = PragmaLocation;
- }
-}
-
bool Sema::UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *Decl) {
- auto Section = Context.SectionInfos.find(SectionName);
- if (Section == Context.SectionInfos.end()) {
+ SourceLocation PragmaLocation;
+ if (auto A = Decl->getAttr<SectionAttr>())
+ if (A->isImplicit())
+ PragmaLocation = A->getLocation();
+ auto SectionIt = Context.SectionInfos.find(SectionName);
+ if (SectionIt == Context.SectionInfos.end()) {
Context.SectionInfos[SectionName] =
- ASTContext::SectionInfo(Decl, SourceLocation(), SectionFlags);
+ ASTContext::SectionInfo(Decl, PragmaLocation, SectionFlags);
return false;
}
// A pre-declared section takes precedence w/o diagnostic.
- if (Section->second.SectionFlags == SectionFlags ||
- !(Section->second.SectionFlags & ASTContext::PSF_Implicit))
+ const auto &Section = SectionIt->second;
+ if (Section.SectionFlags == SectionFlags ||
+ ((SectionFlags & ASTContext::PSF_Implicit) &&
+ !(Section.SectionFlags & ASTContext::PSF_Implicit)))
return false;
- auto OtherDecl = Section->second.Decl;
- Diag(Decl->getLocation(), diag::err_section_conflict)
- << Decl << OtherDecl;
- Diag(OtherDecl->getLocation(), diag::note_declared_at)
- << OtherDecl->getName();
- if (auto A = Decl->getAttr<SectionAttr>())
- if (A->isImplicit())
- Diag(A->getLocation(), diag::note_pragma_entered_here);
- if (auto A = OtherDecl->getAttr<SectionAttr>())
- if (A->isImplicit())
- Diag(A->getLocation(), diag::note_pragma_entered_here);
+ Diag(Decl->getLocation(), diag::err_section_conflict) << Decl << Section;
+ if (Section.Decl)
+ Diag(Section.Decl->getLocation(), diag::note_declared_at)
+ << Section.Decl->getName();
+ if (PragmaLocation.isValid())
+ Diag(PragmaLocation, diag::note_pragma_entered_here);
+ if (Section.PragmaSectionLocation.isValid())
+ Diag(Section.PragmaSectionLocation, diag::note_pragma_entered_here);
return true;
}
bool Sema::UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation) {
- auto Section = Context.SectionInfos.find(SectionName);
- if (Section != Context.SectionInfos.end()) {
- if (Section->second.SectionFlags == SectionFlags)
+ auto SectionIt = Context.SectionInfos.find(SectionName);
+ if (SectionIt != Context.SectionInfos.end()) {
+ const auto &Section = SectionIt->second;
+ if (Section.SectionFlags == SectionFlags)
return false;
- if (!(Section->second.SectionFlags & ASTContext::PSF_Implicit)) {
+ if (!(Section.SectionFlags & ASTContext::PSF_Implicit)) {
Diag(PragmaSectionLocation, diag::err_section_conflict)
- << "this" << "a prior #pragma section";
- Diag(Section->second.PragmaSectionLocation,
- diag::note_pragma_entered_here);
+ << "this" << Section;
+ if (Section.Decl)
+ Diag(Section.Decl->getLocation(), diag::note_declared_at)
+ << Section.Decl->getName();
+ if (Section.PragmaSectionLocation.isValid())
+ Diag(Section.PragmaSectionLocation, diag::note_pragma_entered_here);
return true;
}
}
@@ -926,31 +966,85 @@ void Sema::ActOnPragmaVisibility(const IdentifierInfo* VisType,
}
}
-void Sema::ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC) {
+void Sema::ActOnPragmaFPContract(SourceLocation Loc,
+ LangOptions::FPModeKind FPC) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
switch (FPC) {
- case LangOptions::FPC_On:
- FPFeatures.setAllowFPContractWithinStatement();
+ case LangOptions::FPM_On:
+ NewFPFeatures.setAllowFPContractWithinStatement();
break;
- case LangOptions::FPC_Fast:
- FPFeatures.setAllowFPContractAcrossStatement();
+ case LangOptions::FPM_Fast:
+ NewFPFeatures.setAllowFPContractAcrossStatement();
break;
- case LangOptions::FPC_Off:
- FPFeatures.setDisallowFPContract();
+ case LangOptions::FPM_Off:
+ NewFPFeatures.setDisallowFPContract();
break;
}
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
+ FpPragmaStack.Act(Loc, Sema::PSK_Set, StringRef(),
+ NewFPFeatures.getAsOpaqueInt());
}
-void Sema::ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC) {
- switch (FPC) {
- case LangOptions::FEA_On:
- FPFeatures.setAllowFEnvAccess();
- break;
- case LangOptions::FEA_Off:
- FPFeatures.setDisallowFEnvAccess();
- break;
- }
+void Sema::ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ NewFPFeatures.setAllowFPReassociateOverride(IsEnabled);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
+ FPOptionsOverride NewOverrides(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
}
+void Sema::setRoundingMode(SourceLocation Loc, llvm::RoundingMode FPR) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ NewFPFeatures.setRoundingModeOverride(FPR);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
+ FPOptionsOverride NewOverrides(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+}
+
+void Sema::setExceptionMode(SourceLocation Loc,
+ LangOptions::FPExceptionModeKind FPE) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ NewFPFeatures.setFPExceptionModeOverride(FPE);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
+ FPOptionsOverride NewOverrides(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+}
+
+void Sema::ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled) {
+ unsigned NewValue = FpPragmaStack.hasValue()
+ ? FpPragmaStack.CurrentValue
+ : CurFPFeatureOverrides().getAsOpaqueInt();
+ FPOptionsOverride NewFPFeatures(NewValue);
+ if (IsEnabled) {
+ // Verify Microsoft restriction:
+ // You can't enable fenv_access unless precise semantics are enabled.
+ // Precise semantics can be enabled either by the float_control
+ // pragma, or by using the /fp:precise or /fp:strict compiler options
+ if (!isPreciseFPEnabled())
+ Diag(Loc, diag::err_pragma_fenv_requires_precise);
+ NewFPFeatures.setAllowFEnvAccessOverride(true);
+ } else
+ NewFPFeatures.setAllowFEnvAccessOverride(false);
+ NewValue = NewFPFeatures.getAsOpaqueInt();
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewValue);
+ FPOptionsOverride NewOverrides(NewValue);
+ CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
+}
void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc) {
diff --git a/clang/lib/Sema/SemaAvailability.cpp b/clang/lib/Sema/SemaAvailability.cpp
new file mode 100644
index 000000000000..74c4b9e16f74
--- /dev/null
+++ b/clang/lib/Sema/SemaAvailability.cpp
@@ -0,0 +1,964 @@
+//===--- SemaAvailability.cpp - Availability attribute handling -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file processes the availability attribute.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Sema.h"
+
+using namespace clang;
+using namespace sema;
+
+static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
+ const Decl *D) {
+ // Check each AvailabilityAttr to find the one for this platform.
+ for (const auto *A : D->attrs()) {
+ if (const auto *Avail = dyn_cast<AvailabilityAttr>(A)) {
+ // FIXME: this is copied from CheckAvailability. We should try to
+ // de-duplicate.
+
+ // Check if this is an App Extension "platform", and if so chop off
+ // the suffix for matching with the actual platform.
+ StringRef ActualPlatform = Avail->getPlatform()->getName();
+ StringRef RealizedPlatform = ActualPlatform;
+ if (Context.getLangOpts().AppExt) {
+ size_t suffix = RealizedPlatform.rfind("_app_extension");
+ if (suffix != StringRef::npos)
+ RealizedPlatform = RealizedPlatform.slice(0, suffix);
+ }
+
+ StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
+
+ // Match the platform name.
+ if (RealizedPlatform == TargetPlatform)
+ return Avail;
+ }
+ }
+ return nullptr;
+}
+
+/// The diagnostic we should emit for \c D, and the declaration that
+/// originated it, or \c AR_Available.
+///
+/// \param D The declaration to check.
+/// \param Message If non-null, this will be populated with the message from
+/// the availability attribute that is selected.
+/// \param ClassReceiver If we're checking the the method of a class message
+/// send, the class. Otherwise nullptr.
+static std::pair<AvailabilityResult, const NamedDecl *>
+ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
+ std::string *Message,
+ ObjCInterfaceDecl *ClassReceiver) {
+ AvailabilityResult Result = D->getAvailability(Message);
+
+ // For typedefs, if the typedef declaration appears available look
+ // to the underlying type to see if it is more restrictive.
+ while (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (Result == AR_Available) {
+ if (const auto *TT = TD->getUnderlyingType()->getAs<TagType>()) {
+ D = TT->getDecl();
+ Result = D->getAvailability(Message);
+ continue;
+ }
+ }
+ break;
+ }
+
+ // Forward class declarations get their attributes from their definition.
+ if (const auto *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (IDecl->getDefinition()) {
+ D = IDecl->getDefinition();
+ Result = D->getAvailability(Message);
+ }
+ }
+
+ if (const auto *ECD = dyn_cast<EnumConstantDecl>(D))
+ if (Result == AR_Available) {
+ const DeclContext *DC = ECD->getDeclContext();
+ if (const auto *TheEnumDecl = dyn_cast<EnumDecl>(DC)) {
+ Result = TheEnumDecl->getAvailability(Message);
+ D = TheEnumDecl;
+ }
+ }
+
+ // For +new, infer availability from -init.
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (S.NSAPIObj && ClassReceiver) {
+ ObjCMethodDecl *Init = ClassReceiver->lookupInstanceMethod(
+ S.NSAPIObj->getInitSelector());
+ if (Init && Result == AR_Available && MD->isClassMethod() &&
+ MD->getSelector() == S.NSAPIObj->getNewSelector() &&
+ MD->definedInNSObject(S.getASTContext())) {
+ Result = Init->getAvailability(Message);
+ D = Init;
+ }
+ }
+ }
+
+ return {Result, D};
+}
+
+
+/// whether we should emit a diagnostic for \c K and \c DeclVersion in
+/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
+/// in a deprecated context, but not the other way around.
+static bool
+ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
+ VersionTuple DeclVersion, Decl *Ctx,
+ const NamedDecl *OffendingDecl) {
+ assert(K != AR_Available && "Expected an unavailable declaration here!");
+
+ // Checks if we should emit the availability diagnostic in the context of C.
+ auto CheckContext = [&](const Decl *C) {
+ if (K == AR_NotYetIntroduced) {
+ if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, C))
+ if (AA->getIntroduced() >= DeclVersion)
+ return true;
+ } else if (K == AR_Deprecated) {
+ if (C->isDeprecated())
+ return true;
+ } else if (K == AR_Unavailable) {
+ // It is perfectly fine to refer to an 'unavailable' Objective-C method
+ // when it is referenced from within the @implementation itself. In this
+ // context, we interpret unavailable as a form of access control.
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(OffendingDecl)) {
+ if (const auto *Impl = dyn_cast<ObjCImplDecl>(C)) {
+ if (MD->getClassInterface() == Impl->getClassInterface())
+ return true;
+ }
+ }
+ }
+
+ if (C->isUnavailable())
+ return true;
+ return false;
+ };
+
+ do {
+ if (CheckContext(Ctx))
+ return false;
+
+ // An implementation implicitly has the availability of the interface.
+ // Unless it is "+load" method.
+ if (const auto *MethodD = dyn_cast<ObjCMethodDecl>(Ctx))
+ if (MethodD->isClassMethod() &&
+ MethodD->getSelector().getAsString() == "load")
+ return true;
+
+ if (const auto *CatOrImpl = dyn_cast<ObjCImplDecl>(Ctx)) {
+ if (const ObjCInterfaceDecl *Interface = CatOrImpl->getClassInterface())
+ if (CheckContext(Interface))
+ return false;
+ }
+ // A category implicitly has the availability of the interface.
+ else if (const auto *CatD = dyn_cast<ObjCCategoryDecl>(Ctx))
+ if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
+ if (CheckContext(Interface))
+ return false;
+ } while ((Ctx = cast_or_null<Decl>(Ctx->getDeclContext())));
+
+ return true;
+}
+
+static bool
+shouldDiagnoseAvailabilityByDefault(const ASTContext &Context,
+ const VersionTuple &DeploymentVersion,
+ const VersionTuple &DeclVersion) {
+ const auto &Triple = Context.getTargetInfo().getTriple();
+ VersionTuple ForceAvailabilityFromVersion;
+ switch (Triple.getOS()) {
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS:
+ ForceAvailabilityFromVersion = VersionTuple(/*Major=*/11);
+ break;
+ case llvm::Triple::WatchOS:
+ ForceAvailabilityFromVersion = VersionTuple(/*Major=*/4);
+ break;
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ ForceAvailabilityFromVersion = VersionTuple(/*Major=*/10, /*Minor=*/13);
+ break;
+ default:
+ // New targets should always warn about availability.
+ return Triple.getVendor() == llvm::Triple::Apple;
+ }
+ return DeploymentVersion >= ForceAvailabilityFromVersion ||
+ DeclVersion >= ForceAvailabilityFromVersion;
+}
+
+static NamedDecl *findEnclosingDeclToAnnotate(Decl *OrigCtx) {
+ for (Decl *Ctx = OrigCtx; Ctx;
+ Ctx = cast_or_null<Decl>(Ctx->getDeclContext())) {
+ if (isa<TagDecl>(Ctx) || isa<FunctionDecl>(Ctx) || isa<ObjCMethodDecl>(Ctx))
+ return cast<NamedDecl>(Ctx);
+ if (auto *CD = dyn_cast<ObjCContainerDecl>(Ctx)) {
+ if (auto *Imp = dyn_cast<ObjCImplDecl>(Ctx))
+ return Imp->getClassInterface();
+ return CD;
+ }
+ }
+
+ return dyn_cast<NamedDecl>(OrigCtx);
+}
+
+namespace {
+
+struct AttributeInsertion {
+ StringRef Prefix;
+ SourceLocation Loc;
+ StringRef Suffix;
+
+ static AttributeInsertion createInsertionAfter(const NamedDecl *D) {
+ return {" ", D->getEndLoc(), ""};
+ }
+ static AttributeInsertion createInsertionAfter(SourceLocation Loc) {
+ return {" ", Loc, ""};
+ }
+ static AttributeInsertion createInsertionBefore(const NamedDecl *D) {
+ return {"", D->getBeginLoc(), "\n"};
+ }
+};
+
+} // end anonymous namespace
+
+/// Tries to parse a string as ObjC method name.
+///
+/// \param Name The string to parse. Expected to originate from availability
+/// attribute argument.
+/// \param SlotNames The vector that will be populated with slot names. In case
+/// of unsuccessful parsing can contain invalid data.
+/// \returns A number of method parameters if parsing was successful, None
+/// otherwise.
+static Optional<unsigned>
+tryParseObjCMethodName(StringRef Name, SmallVectorImpl<StringRef> &SlotNames,
+ const LangOptions &LangOpts) {
+ // Accept replacements starting with - or + as valid ObjC method names.
+ if (!Name.empty() && (Name.front() == '-' || Name.front() == '+'))
+ Name = Name.drop_front(1);
+ if (Name.empty())
+ return None;
+ Name.split(SlotNames, ':');
+ unsigned NumParams;
+ if (Name.back() == ':') {
+ // Remove an empty string at the end that doesn't represent any slot.
+ SlotNames.pop_back();
+ NumParams = SlotNames.size();
+ } else {
+ if (SlotNames.size() != 1)
+ // Not a valid method name, just a colon-separated string.
+ return None;
+ NumParams = 0;
+ }
+ // Verify all slot names are valid.
+ bool AllowDollar = LangOpts.DollarIdents;
+ for (StringRef S : SlotNames) {
+ if (S.empty())
+ continue;
+ if (!isValidIdentifier(S, AllowDollar))
+ return None;
+ }
+ return NumParams;
+}
+
+/// Returns a source location in which it's appropriate to insert a new
+/// attribute for the given declaration \D.
+static Optional<AttributeInsertion>
+createAttributeInsertion(const NamedDecl *D, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (isa<ObjCPropertyDecl>(D))
+ return AttributeInsertion::createInsertionAfter(D);
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (MD->hasBody())
+ return None;
+ return AttributeInsertion::createInsertionAfter(D);
+ }
+ if (const auto *TD = dyn_cast<TagDecl>(D)) {
+ SourceLocation Loc =
+ Lexer::getLocForEndOfToken(TD->getInnerLocStart(), 0, SM, LangOpts);
+ if (Loc.isInvalid())
+ return None;
+ // Insert after the 'struct'/whatever keyword.
+ return AttributeInsertion::createInsertionAfter(Loc);
+ }
+ return AttributeInsertion::createInsertionBefore(D);
+}
+
+/// Actually emit an availability diagnostic for a reference to an unavailable
+/// decl.
+///
+/// \param Ctx The context that the reference occurred in
+/// \param ReferringDecl The exact declaration that was referenced.
+/// \param OffendingDecl A related decl to \c ReferringDecl that has an
+/// availability attribute corresponding to \c K attached to it. Note that this
+/// may not be the same as ReferringDecl, i.e. if an EnumDecl is annotated and
+/// we refer to a member EnumConstantDecl, ReferringDecl is the EnumConstantDecl
+/// and OffendingDecl is the EnumDecl.
+static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
+ Decl *Ctx, const NamedDecl *ReferringDecl,
+ const NamedDecl *OffendingDecl,
+ StringRef Message,
+ ArrayRef<SourceLocation> Locs,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
+ bool ObjCPropertyAccess) {
+ // Diagnostics for deprecated or unavailable.
+ unsigned diag, diag_message, diag_fwdclass_message;
+ unsigned diag_available_here = diag::note_availability_specified_here;
+ SourceLocation NoteLocation = OffendingDecl->getLocation();
+
+ // Matches 'diag::note_property_attribute' options.
+ unsigned property_note_select;
+
+ // Matches diag::note_availability_specified_here.
+ unsigned available_here_select_kind;
+
+ VersionTuple DeclVersion;
+ if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl))
+ DeclVersion = AA->getIntroduced();
+
+ if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx,
+ OffendingDecl))
+ return;
+
+ SourceLocation Loc = Locs.front();
+
+ // The declaration can have multiple availability attributes, we are looking
+ // at one of them.
+ const AvailabilityAttr *A = getAttrForPlatform(S.Context, OffendingDecl);
+ if (A && A->isInherited()) {
+ for (const Decl *Redecl = OffendingDecl->getMostRecentDecl(); Redecl;
+ Redecl = Redecl->getPreviousDecl()) {
+ const AvailabilityAttr *AForRedecl =
+ getAttrForPlatform(S.Context, Redecl);
+ if (AForRedecl && !AForRedecl->isInherited()) {
+ // If D is a declaration with inherited attributes, the note should
+ // point to the declaration with actual attributes.
+ NoteLocation = Redecl->getLocation();
+ break;
+ }
+ }
+ }
+
+ switch (K) {
+ case AR_NotYetIntroduced: {
+ // We would like to emit the diagnostic even if -Wunguarded-availability is
+ // not specified for deployment targets >= to iOS 11 or equivalent or
+ // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
+ // later.
+ const AvailabilityAttr *AA =
+ getAttrForPlatform(S.getASTContext(), OffendingDecl);
+ VersionTuple Introduced = AA->getIntroduced();
+
+ bool UseNewWarning = shouldDiagnoseAvailabilityByDefault(
+ S.Context, S.Context.getTargetInfo().getPlatformMinVersion(),
+ Introduced);
+ unsigned Warning = UseNewWarning ? diag::warn_unguarded_availability_new
+ : diag::warn_unguarded_availability;
+
+ std::string PlatformName(AvailabilityAttr::getPrettyPlatformName(
+ S.getASTContext().getTargetInfo().getPlatformName()));
+
+ S.Diag(Loc, Warning) << OffendingDecl << PlatformName
+ << Introduced.getAsString();
+
+ S.Diag(OffendingDecl->getLocation(),
+ diag::note_partial_availability_specified_here)
+ << OffendingDecl << PlatformName << Introduced.getAsString()
+ << S.Context.getTargetInfo().getPlatformMinVersion().getAsString();
+
+ if (const auto *Enclosing = findEnclosingDeclToAnnotate(Ctx)) {
+ if (const auto *TD = dyn_cast<TagDecl>(Enclosing))
+ if (TD->getDeclName().isEmpty()) {
+ S.Diag(TD->getLocation(),
+ diag::note_decl_unguarded_availability_silence)
+ << /*Anonymous*/ 1 << TD->getKindName();
+ return;
+ }
+ auto FixitNoteDiag =
+ S.Diag(Enclosing->getLocation(),
+ diag::note_decl_unguarded_availability_silence)
+ << /*Named*/ 0 << Enclosing;
+ // Don't offer a fixit for declarations with availability attributes.
+ if (Enclosing->hasAttr<AvailabilityAttr>())
+ return;
+ if (!S.getPreprocessor().isMacroDefined("API_AVAILABLE"))
+ return;
+ Optional<AttributeInsertion> Insertion = createAttributeInsertion(
+ Enclosing, S.getSourceManager(), S.getLangOpts());
+ if (!Insertion)
+ return;
+ std::string PlatformName =
+ AvailabilityAttr::getPlatformNameSourceSpelling(
+ S.getASTContext().getTargetInfo().getPlatformName())
+ .lower();
+ std::string Introduced =
+ OffendingDecl->getVersionIntroduced().getAsString();
+ FixitNoteDiag << FixItHint::CreateInsertion(
+ Insertion->Loc,
+ (llvm::Twine(Insertion->Prefix) + "API_AVAILABLE(" + PlatformName +
+ "(" + Introduced + "))" + Insertion->Suffix)
+ .str());
+ }
+ return;
+ }
+ case AR_Deprecated:
+ diag = !ObjCPropertyAccess ? diag::warn_deprecated
+ : diag::warn_property_method_deprecated;
+ diag_message = diag::warn_deprecated_message;
+ diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
+ property_note_select = /* deprecated */ 0;
+ available_here_select_kind = /* deprecated */ 2;
+ if (const auto *AL = OffendingDecl->getAttr<DeprecatedAttr>())
+ NoteLocation = AL->getLocation();
+ break;
+
+ case AR_Unavailable:
+ diag = !ObjCPropertyAccess ? diag::err_unavailable
+ : diag::err_property_method_unavailable;
+ diag_message = diag::err_unavailable_message;
+ diag_fwdclass_message = diag::warn_unavailable_fwdclass_message;
+ property_note_select = /* unavailable */ 1;
+ available_here_select_kind = /* unavailable */ 0;
+
+ if (auto AL = OffendingDecl->getAttr<UnavailableAttr>()) {
+ if (AL->isImplicit() && AL->getImplicitReason()) {
+ // Most of these failures are due to extra restrictions in ARC;
+ // reflect that in the primary diagnostic when applicable.
+ auto flagARCError = [&] {
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ S.getSourceManager().isInSystemHeader(
+ OffendingDecl->getLocation()))
+ diag = diag::err_unavailable_in_arc;
+ };
+
+ switch (AL->getImplicitReason()) {
+ case UnavailableAttr::IR_None: break;
+
+ case UnavailableAttr::IR_ARCForbiddenType:
+ flagARCError();
+ diag_available_here = diag::note_arc_forbidden_type;
+ break;
+
+ case UnavailableAttr::IR_ForbiddenWeak:
+ if (S.getLangOpts().ObjCWeakRuntime)
+ diag_available_here = diag::note_arc_weak_disabled;
+ else
+ diag_available_here = diag::note_arc_weak_no_runtime;
+ break;
+
+ case UnavailableAttr::IR_ARCForbiddenConversion:
+ flagARCError();
+ diag_available_here = diag::note_performs_forbidden_arc_conversion;
+ break;
+
+ case UnavailableAttr::IR_ARCInitReturnsUnrelated:
+ flagARCError();
+ diag_available_here = diag::note_arc_init_returns_unrelated;
+ break;
+
+ case UnavailableAttr::IR_ARCFieldWithOwnership:
+ flagARCError();
+ diag_available_here = diag::note_arc_field_with_ownership;
+ break;
+ }
+ }
+ }
+ break;
+
+ case AR_Available:
+ llvm_unreachable("Warning for availability of available declaration?");
+ }
+
+ SmallVector<FixItHint, 12> FixIts;
+ if (K == AR_Deprecated) {
+ StringRef Replacement;
+ if (auto AL = OffendingDecl->getAttr<DeprecatedAttr>())
+ Replacement = AL->getReplacement();
+ if (auto AL = getAttrForPlatform(S.Context, OffendingDecl))
+ Replacement = AL->getReplacement();
+
+ CharSourceRange UseRange;
+ if (!Replacement.empty())
+ UseRange =
+ CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc));
+ if (UseRange.isValid()) {
+ if (const auto *MethodDecl = dyn_cast<ObjCMethodDecl>(ReferringDecl)) {
+ Selector Sel = MethodDecl->getSelector();
+ SmallVector<StringRef, 12> SelectorSlotNames;
+ Optional<unsigned> NumParams = tryParseObjCMethodName(
+ Replacement, SelectorSlotNames, S.getLangOpts());
+ if (NumParams && NumParams.getValue() == Sel.getNumArgs()) {
+ assert(SelectorSlotNames.size() == Locs.size());
+ for (unsigned I = 0; I < Locs.size(); ++I) {
+ if (!Sel.getNameForSlot(I).empty()) {
+ CharSourceRange NameRange = CharSourceRange::getCharRange(
+ Locs[I], S.getLocForEndOfToken(Locs[I]));
+ FixIts.push_back(FixItHint::CreateReplacement(
+ NameRange, SelectorSlotNames[I]));
+ } else
+ FixIts.push_back(
+ FixItHint::CreateInsertion(Locs[I], SelectorSlotNames[I]));
+ }
+ } else
+ FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
+ } else
+ FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
+ }
+ }
+
+ if (!Message.empty()) {
+ S.Diag(Loc, diag_message) << ReferringDecl << Message << FixIts;
+ if (ObjCProperty)
+ S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
+ << ObjCProperty->getDeclName() << property_note_select;
+ } else if (!UnknownObjCClass) {
+ S.Diag(Loc, diag) << ReferringDecl << FixIts;
+ if (ObjCProperty)
+ S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
+ << ObjCProperty->getDeclName() << property_note_select;
+ } else {
+ S.Diag(Loc, diag_fwdclass_message) << ReferringDecl << FixIts;
+ S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
+ }
+
+ S.Diag(NoteLocation, diag_available_here)
+ << OffendingDecl << available_here_select_kind;
+}
+
+void Sema::handleDelayedAvailabilityCheck(DelayedDiagnostic &DD, Decl *Ctx) {
+ assert(DD.Kind == DelayedDiagnostic::Availability &&
+ "Expected an availability diagnostic here");
+
+ DD.Triggered = true;
+ DoEmitAvailabilityWarning(
+ *this, DD.getAvailabilityResult(), Ctx, DD.getAvailabilityReferringDecl(),
+ DD.getAvailabilityOffendingDecl(), DD.getAvailabilityMessage(),
+ DD.getAvailabilitySelectorLocs(), DD.getUnknownObjCClass(),
+ DD.getObjCProperty(), false);
+}
+
+static void EmitAvailabilityWarning(Sema &S, AvailabilityResult AR,
+ const NamedDecl *ReferringDecl,
+ const NamedDecl *OffendingDecl,
+ StringRef Message,
+ ArrayRef<SourceLocation> Locs,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
+ bool ObjCPropertyAccess) {
+ // Delay if we're currently parsing a declaration.
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(
+ DelayedDiagnostic::makeAvailability(
+ AR, Locs, ReferringDecl, OffendingDecl, UnknownObjCClass,
+ ObjCProperty, Message, ObjCPropertyAccess));
+ return;
+ }
+
+ Decl *Ctx = cast<Decl>(S.getCurLexicalContext());
+ DoEmitAvailabilityWarning(S, AR, Ctx, ReferringDecl, OffendingDecl,
+ Message, Locs, UnknownObjCClass, ObjCProperty,
+ ObjCPropertyAccess);
+}
+
+namespace {
+
+/// Returns true if the given statement can be a body-like child of \p Parent.
+bool isBodyLikeChildStmt(const Stmt *S, const Stmt *Parent) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::IfStmtClass:
+ return cast<IfStmt>(Parent)->getThen() == S ||
+ cast<IfStmt>(Parent)->getElse() == S;
+ case Stmt::WhileStmtClass:
+ return cast<WhileStmt>(Parent)->getBody() == S;
+ case Stmt::DoStmtClass:
+ return cast<DoStmt>(Parent)->getBody() == S;
+ case Stmt::ForStmtClass:
+ return cast<ForStmt>(Parent)->getBody() == S;
+ case Stmt::CXXForRangeStmtClass:
+ return cast<CXXForRangeStmt>(Parent)->getBody() == S;
+ case Stmt::ObjCForCollectionStmtClass:
+ return cast<ObjCForCollectionStmt>(Parent)->getBody() == S;
+ case Stmt::CaseStmtClass:
+ case Stmt::DefaultStmtClass:
+ return cast<SwitchCase>(Parent)->getSubStmt() == S;
+ default:
+ return false;
+ }
+}
+
+class StmtUSEFinder : public RecursiveASTVisitor<StmtUSEFinder> {
+ const Stmt *Target;
+
+public:
+ bool VisitStmt(Stmt *S) { return S != Target; }
+
+ /// Returns true if the given statement is present in the given declaration.
+ static bool isContained(const Stmt *Target, const Decl *D) {
+ StmtUSEFinder Visitor;
+ Visitor.Target = Target;
+ return !Visitor.TraverseDecl(const_cast<Decl *>(D));
+ }
+};
+
+/// Traverses the AST and finds the last statement that used a given
+/// declaration.
+class LastDeclUSEFinder : public RecursiveASTVisitor<LastDeclUSEFinder> {
+ const Decl *D;
+
+public:
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ if (DRE->getDecl() == D)
+ return false;
+ return true;
+ }
+
+ static const Stmt *findLastStmtThatUsesDecl(const Decl *D,
+ const CompoundStmt *Scope) {
+ LastDeclUSEFinder Visitor;
+ Visitor.D = D;
+ for (auto I = Scope->body_rbegin(), E = Scope->body_rend(); I != E; ++I) {
+ const Stmt *S = *I;
+ if (!Visitor.TraverseStmt(const_cast<Stmt *>(S)))
+ return S;
+ }
+ return nullptr;
+ }
+};
+
+/// This class implements -Wunguarded-availability.
+///
+/// This is done with a traversal of the AST of a function that makes reference
+/// to a partially available declaration. Whenever we encounter an \c if of the
+/// form: \c if(@available(...)), we use the version from the condition to visit
+/// the then statement.
+class DiagnoseUnguardedAvailability
+ : public RecursiveASTVisitor<DiagnoseUnguardedAvailability> {
+ typedef RecursiveASTVisitor<DiagnoseUnguardedAvailability> Base;
+
+ Sema &SemaRef;
+ Decl *Ctx;
+
+ /// Stack of potentially nested 'if (@available(...))'s.
+ SmallVector<VersionTuple, 8> AvailabilityStack;
+ SmallVector<const Stmt *, 16> StmtStack;
+
+ void DiagnoseDeclAvailability(NamedDecl *D, SourceRange Range,
+ ObjCInterfaceDecl *ClassReceiver = nullptr);
+
+public:
+ DiagnoseUnguardedAvailability(Sema &SemaRef, Decl *Ctx)
+ : SemaRef(SemaRef), Ctx(Ctx) {
+ AvailabilityStack.push_back(
+ SemaRef.Context.getTargetInfo().getPlatformMinVersion());
+ }
+
+ bool TraverseDecl(Decl *D) {
+ // Avoid visiting nested functions to prevent duplicate warnings.
+ if (!D || isa<FunctionDecl>(D))
+ return true;
+ return Base::TraverseDecl(D);
+ }
+
+ bool TraverseStmt(Stmt *S) {
+ if (!S)
+ return true;
+ StmtStack.push_back(S);
+ bool Result = Base::TraverseStmt(S);
+ StmtStack.pop_back();
+ return Result;
+ }
+
+ void IssueDiagnostics(Stmt *S) { TraverseStmt(S); }
+
+ bool TraverseIfStmt(IfStmt *If);
+
+ bool TraverseLambdaExpr(LambdaExpr *E) { return true; }
+
+ // for 'case X:' statements, don't bother looking at the 'X'; it can't lead
+ // to any useful diagnostics.
+ bool TraverseCaseStmt(CaseStmt *CS) { return TraverseStmt(CS->getSubStmt()); }
+
+ bool VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *PRE) {
+ if (PRE->isClassReceiver())
+ DiagnoseDeclAvailability(PRE->getClassReceiver(), PRE->getReceiverLocation());
+ return true;
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *Msg) {
+ if (ObjCMethodDecl *D = Msg->getMethodDecl()) {
+ ObjCInterfaceDecl *ID = nullptr;
+ QualType ReceiverTy = Msg->getClassReceiver();
+ if (!ReceiverTy.isNull() && ReceiverTy->getAsObjCInterfaceType())
+ ID = ReceiverTy->getAsObjCInterfaceType()->getInterface();
+
+ DiagnoseDeclAvailability(
+ D, SourceRange(Msg->getSelectorStartLoc(), Msg->getEndLoc()), ID);
+ }
+ return true;
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ DiagnoseDeclAvailability(DRE->getDecl(),
+ SourceRange(DRE->getBeginLoc(), DRE->getEndLoc()));
+ return true;
+ }
+
+ bool VisitMemberExpr(MemberExpr *ME) {
+ DiagnoseDeclAvailability(ME->getMemberDecl(),
+ SourceRange(ME->getBeginLoc(), ME->getEndLoc()));
+ return true;
+ }
+
+ bool VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
+ SemaRef.Diag(E->getBeginLoc(), diag::warn_at_available_unchecked_use)
+ << (!SemaRef.getLangOpts().ObjC);
+ return true;
+ }
+
+ bool VisitTypeLoc(TypeLoc Ty);
+};
+
+void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
+ NamedDecl *D, SourceRange Range, ObjCInterfaceDecl *ReceiverClass) {
+ AvailabilityResult Result;
+ const NamedDecl *OffendingDecl;
+ std::tie(Result, OffendingDecl) =
+ ShouldDiagnoseAvailabilityOfDecl(SemaRef, D, nullptr, ReceiverClass);
+ if (Result != AR_Available) {
+ // All other diagnostic kinds have already been handled in
+ // DiagnoseAvailabilityOfDecl.
+ if (Result != AR_NotYetIntroduced)
+ return;
+
+ const AvailabilityAttr *AA =
+ getAttrForPlatform(SemaRef.getASTContext(), OffendingDecl);
+ VersionTuple Introduced = AA->getIntroduced();
+
+ if (AvailabilityStack.back() >= Introduced)
+ return;
+
+ // If the context of this function is less available than D, we should not
+ // emit a diagnostic.
+ if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx,
+ OffendingDecl))
+ return;
+
+ // We would like to emit the diagnostic even if -Wunguarded-availability is
+ // not specified for deployment targets >= to iOS 11 or equivalent or
+ // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
+ // later.
+ unsigned DiagKind =
+ shouldDiagnoseAvailabilityByDefault(
+ SemaRef.Context,
+ SemaRef.Context.getTargetInfo().getPlatformMinVersion(), Introduced)
+ ? diag::warn_unguarded_availability_new
+ : diag::warn_unguarded_availability;
+
+ std::string PlatformName(AvailabilityAttr::getPrettyPlatformName(
+ SemaRef.getASTContext().getTargetInfo().getPlatformName()));
+
+ SemaRef.Diag(Range.getBegin(), DiagKind)
+ << Range << D << PlatformName << Introduced.getAsString();
+
+ SemaRef.Diag(OffendingDecl->getLocation(),
+ diag::note_partial_availability_specified_here)
+ << OffendingDecl << PlatformName << Introduced.getAsString()
+ << SemaRef.Context.getTargetInfo()
+ .getPlatformMinVersion()
+ .getAsString();
+
+ auto FixitDiag =
+ SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
+ << Range << D
+ << (SemaRef.getLangOpts().ObjC ? /*@available*/ 0
+ : /*__builtin_available*/ 1);
+
+ // Find the statement which should be enclosed in the if @available check.
+ if (StmtStack.empty())
+ return;
+ const Stmt *StmtOfUse = StmtStack.back();
+ const CompoundStmt *Scope = nullptr;
+ for (const Stmt *S : llvm::reverse(StmtStack)) {
+ if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
+ Scope = CS;
+ break;
+ }
+ if (isBodyLikeChildStmt(StmtOfUse, S)) {
+ // The declaration won't be seen outside of the statement, so we don't
+ // have to wrap the uses of any declared variables in if (@available).
+ // Therefore we can avoid setting Scope here.
+ break;
+ }
+ StmtOfUse = S;
+ }
+ const Stmt *LastStmtOfUse = nullptr;
+ if (isa<DeclStmt>(StmtOfUse) && Scope) {
+ for (const Decl *D : cast<DeclStmt>(StmtOfUse)->decls()) {
+ if (StmtUSEFinder::isContained(StmtStack.back(), D)) {
+ LastStmtOfUse = LastDeclUSEFinder::findLastStmtThatUsesDecl(D, Scope);
+ break;
+ }
+ }
+ }
+
+ const SourceManager &SM = SemaRef.getSourceManager();
+ SourceLocation IfInsertionLoc =
+ SM.getExpansionLoc(StmtOfUse->getBeginLoc());
+ SourceLocation StmtEndLoc =
+ SM.getExpansionRange(
+ (LastStmtOfUse ? LastStmtOfUse : StmtOfUse)->getEndLoc())
+ .getEnd();
+ if (SM.getFileID(IfInsertionLoc) != SM.getFileID(StmtEndLoc))
+ return;
+
+ StringRef Indentation = Lexer::getIndentationForLine(IfInsertionLoc, SM);
+ const char *ExtraIndentation = " ";
+ std::string FixItString;
+ llvm::raw_string_ostream FixItOS(FixItString);
+ FixItOS << "if (" << (SemaRef.getLangOpts().ObjC ? "@available"
+ : "__builtin_available")
+ << "("
+ << AvailabilityAttr::getPlatformNameSourceSpelling(
+ SemaRef.getASTContext().getTargetInfo().getPlatformName())
+ << " " << Introduced.getAsString() << ", *)) {\n"
+ << Indentation << ExtraIndentation;
+ FixitDiag << FixItHint::CreateInsertion(IfInsertionLoc, FixItOS.str());
+ SourceLocation ElseInsertionLoc = Lexer::findLocationAfterToken(
+ StmtEndLoc, tok::semi, SM, SemaRef.getLangOpts(),
+ /*SkipTrailingWhitespaceAndNewLine=*/false);
+ if (ElseInsertionLoc.isInvalid())
+ ElseInsertionLoc =
+ Lexer::getLocForEndOfToken(StmtEndLoc, 0, SM, SemaRef.getLangOpts());
+ FixItOS.str().clear();
+ FixItOS << "\n"
+ << Indentation << "} else {\n"
+ << Indentation << ExtraIndentation
+ << "// Fallback on earlier versions\n"
+ << Indentation << "}";
+ FixitDiag << FixItHint::CreateInsertion(ElseInsertionLoc, FixItOS.str());
+ }
+}
+
+bool DiagnoseUnguardedAvailability::VisitTypeLoc(TypeLoc Ty) {
+ const Type *TyPtr = Ty.getTypePtr();
+ SourceRange Range{Ty.getBeginLoc(), Ty.getEndLoc()};
+
+ if (Range.isInvalid())
+ return true;
+
+ if (const auto *TT = dyn_cast<TagType>(TyPtr)) {
+ TagDecl *TD = TT->getDecl();
+ DiagnoseDeclAvailability(TD, Range);
+
+ } else if (const auto *TD = dyn_cast<TypedefType>(TyPtr)) {
+ TypedefNameDecl *D = TD->getDecl();
+ DiagnoseDeclAvailability(D, Range);
+
+ } else if (const auto *ObjCO = dyn_cast<ObjCObjectType>(TyPtr)) {
+ if (NamedDecl *D = ObjCO->getInterface())
+ DiagnoseDeclAvailability(D, Range);
+ }
+
+ return true;
+}
+
+bool DiagnoseUnguardedAvailability::TraverseIfStmt(IfStmt *If) {
+ VersionTuple CondVersion;
+ if (auto *E = dyn_cast<ObjCAvailabilityCheckExpr>(If->getCond())) {
+ CondVersion = E->getVersion();
+
+ // If we're using the '*' case here or if this check is redundant, then we
+ // use the enclosing version to check both branches.
+ if (CondVersion.empty() || CondVersion <= AvailabilityStack.back())
+ return TraverseStmt(If->getThen()) && TraverseStmt(If->getElse());
+ } else {
+ // This isn't an availability checking 'if', we can just continue.
+ return Base::TraverseIfStmt(If);
+ }
+
+ AvailabilityStack.push_back(CondVersion);
+ bool ShouldContinue = TraverseStmt(If->getThen());
+ AvailabilityStack.pop_back();
+
+ return ShouldContinue && TraverseStmt(If->getElse());
+}
+
+} // end anonymous namespace
+
+void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
+ Stmt *Body = nullptr;
+
+ if (auto *FD = D->getAsFunction()) {
+ // FIXME: We only examine the pattern decl for availability violations now,
+ // but we should also examine instantiated templates.
+ if (FD->isTemplateInstantiation())
+ return;
+
+ Body = FD->getBody();
+ } else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ Body = MD->getBody();
+ else if (auto *BD = dyn_cast<BlockDecl>(D))
+ Body = BD->getBody();
+
+ assert(Body && "Need a body here!");
+
+ DiagnoseUnguardedAvailability(*this, D).IssueDiagnostics(Body);
+}
+
+void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D,
+ ArrayRef<SourceLocation> Locs,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ bool ObjCPropertyAccess,
+ bool AvoidPartialAvailabilityChecks,
+ ObjCInterfaceDecl *ClassReceiver) {
+ std::string Message;
+ AvailabilityResult Result;
+ const NamedDecl* OffendingDecl;
+ // See if this declaration is unavailable, deprecated, or partial.
+ std::tie(Result, OffendingDecl) =
+ ShouldDiagnoseAvailabilityOfDecl(*this, D, &Message, ClassReceiver);
+ if (Result == AR_Available)
+ return;
+
+ if (Result == AR_NotYetIntroduced) {
+ if (AvoidPartialAvailabilityChecks)
+ return;
+
+ // We need to know the @available context in the current function to
+ // diagnose this use, let DiagnoseUnguardedAvailabilityViolations do that
+ // when we're done parsing the current function.
+ if (getCurFunctionOrMethodDecl()) {
+ getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
+ return;
+ } else if (getCurBlock() || getCurLambda()) {
+ getCurFunction()->HasPotentialAvailabilityViolations = true;
+ return;
+ }
+ }
+
+ const ObjCPropertyDecl *ObjCPDecl = nullptr;
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) {
+ AvailabilityResult PDeclResult = PD->getAvailability(nullptr);
+ if (PDeclResult == Result)
+ ObjCPDecl = PD;
+ }
+ }
+
+ EmitAvailabilityWarning(*this, Result, D, OffendingDecl, Message, Locs,
+ UnknownObjCClass, ObjCPDecl, ObjCPropertyAccess);
+}
diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp
index 0c61057e1072..283a04683a32 100644
--- a/clang/lib/Sema/SemaCUDA.cpp
+++ b/clang/lib/Sema/SemaCUDA.cpp
@@ -14,8 +14,10 @@
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/Cuda.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Sema/SemaInternal.h"
@@ -210,6 +212,20 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
llvm_unreachable("All cases should've been handled by now.");
}
+template <typename AttrT> static bool hasImplicitAttr(const FunctionDecl *D) {
+ if (!D)
+ return false;
+ if (auto *A = D->getAttr<AttrT>())
+ return A->isImplicit();
+ return D->isImplicit();
+}
+
+bool Sema::isCUDAImplicitHostDeviceFunction(const FunctionDecl *D) {
+ bool IsImplicitDevAttr = hasImplicitAttr<CUDADeviceAttr>(D);
+ bool IsImplicitHostAttr = hasImplicitAttr<CUDAHostAttr>(D);
+ return IsImplicitDevAttr && IsImplicitHostAttr;
+}
+
void Sema::EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
@@ -425,6 +441,10 @@ bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) {
if (CD->getParent()->isDynamicClass())
return false;
+ // Union ctor does not call ctors of its data members.
+ if (CD->getParent()->isUnion())
+ return true;
+
// The only form of initializer allowed is an empty constructor.
// This will recursively check all base classes and member initializers
if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) {
@@ -464,6 +484,11 @@ bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
if (ClassDecl->isDynamicClass())
return false;
+ // Union does not have base class and union dtor does not call dtors of its
+ // data members.
+ if (DD->getParent()->isUnion())
+ return true;
+
// Only empty destructors are allowed. This will recursively check
// destructors for all base classes...
if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) {
@@ -503,9 +528,14 @@ void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
// constructor according to CUDA rules. This deviates from NVCC,
// but allows us to handle things like constexpr constructors.
if (!AllowedInit &&
- (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()))
- AllowedInit = VD->getInit()->isConstantInitializer(
- Context, VD->getType()->isReferenceType());
+ (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>())) {
+ auto *Init = VD->getInit();
+ AllowedInit =
+ ((VD->getType()->isDependentType() || Init->isValueDependent()) &&
+ VD->isConstexpr()) ||
+ Init->isConstantInitializer(Context,
+ VD->getType()->isReferenceType());
+ }
// Also make sure that destructor, if there is one, is empty.
if (AllowedInit)
@@ -602,6 +632,13 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
}
+void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
+ if (getLangOpts().CUDAIsDevice && VD->isConstexpr() &&
+ (VD->isFileVarDecl() || VD->isStaticDataMember())) {
+ VD->addAttr(CUDAConstantAttr::CreateImplicit(getASTContext()));
+ }
+}
+
Sema::DeviceDiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
@@ -674,25 +711,6 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
// Otherwise, mark the call in our call graph so we can traverse it later.
bool CallerKnownEmitted =
getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted;
- if (CallerKnownEmitted) {
- // Host-side references to a __global__ function refer to the stub, so the
- // function itself is never emitted and therefore should not be marked.
- if (!shouldIgnoreInHostDeviceCheck(Callee))
- markKnownEmitted(
- *this, Caller, Callee, Loc, [](Sema &S, FunctionDecl *FD) {
- return S.getEmissionStatus(FD) == FunctionEmissionStatus::Emitted;
- });
- } else {
- // If we have
- // host fn calls kernel fn calls host+device,
- // the HD function does not get instantiated on the host. We model this by
- // omitting at the call to the kernel from the callgraph. This ensures
- // that, when compiling for host, only HD functions actually called from the
- // host get marked as known-emitted.
- if (!shouldIgnoreInHostDeviceCheck(Callee))
- DeviceCallGraph[Caller].insert({Callee, Loc});
- }
-
DeviceDiagBuilder::Kind DiagKind = [this, Caller, Callee,
CallerKnownEmitted] {
switch (IdentifyCUDAPreference(Caller, Callee)) {
@@ -729,20 +747,58 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
DiagKind != DeviceDiagBuilder::K_ImmediateWithCallStack;
}
+// Check the wrong-sided reference capture of lambda for CUDA/HIP.
+// A lambda function may capture a stack variable by reference when it is
+// defined and uses the capture by reference when the lambda is called. When
+// the capture and use happen on different sides, the capture is invalid and
+// should be diagnosed.
+void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
+ const sema::Capture &Capture) {
+ // In host compilation we only need to check lambda functions emitted on host
+ // side. In such lambda functions, a reference capture is invalid only
+ // if the lambda structure is populated by a device function or kernel then
+ // is passed to and called by a host function. However that is impossible,
+ // since a device function or kernel can only call a device function, also a
+ // kernel cannot pass a lambda back to a host function since we cannot
+ // define a kernel argument type which can hold the lambda before the lambda
+ // itself is defined.
+ if (!LangOpts.CUDAIsDevice)
+ return;
+
+ // File-scope lambda can only do init captures for global variables, which
+ // results in passing by value for these global variables.
+ FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ if (!Caller)
+ return;
+
+ // In device compilation, we only need to check lambda functions which are
+ // emitted on device side. For such lambdas, a reference capture is invalid
+ // only if the lambda structure is populated by a host function then passed
+ // to and called in a device function or kernel.
+ bool CalleeIsDevice = Callee->hasAttr<CUDADeviceAttr>();
+ bool CallerIsHost =
+ !Caller->hasAttr<CUDAGlobalAttr>() && !Caller->hasAttr<CUDADeviceAttr>();
+ bool ShouldCheck = CalleeIsDevice && CallerIsHost;
+ if (!ShouldCheck || !Capture.isReferenceCapture())
+ return;
+ auto DiagKind = DeviceDiagBuilder::K_Deferred;
+ if (Capture.isVariableCapture()) {
+ DeviceDiagBuilder(DiagKind, Capture.getLocation(),
+ diag::err_capture_bad_target, Callee, *this)
+ << Capture.getVariable();
+ } else if (Capture.isThisCapture()) {
+ DeviceDiagBuilder(DiagKind, Capture.getLocation(),
+ diag::err_capture_bad_target_this_ptr, Callee, *this);
+ }
+ return;
+}
+
void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>())
return;
- FunctionDecl *CurFn = dyn_cast<FunctionDecl>(CurContext);
- if (!CurFn)
- return;
- CUDAFunctionTarget Target = IdentifyCUDATarget(CurFn);
- if (Target == CFT_Global || Target == CFT_Device) {
- Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
- } else if (Target == CFT_HostDevice) {
- Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
- Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
- }
+ Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
}
void Sema::checkCUDATargetOverload(FunctionDecl *NewFD,
diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp
index a905ebc67305..2efe26052c78 100644
--- a/clang/lib/Sema/SemaCast.cpp
+++ b/clang/lib/Sema/SemaCast.cpp
@@ -48,7 +48,8 @@ enum CastType {
CT_Reinterpret, ///< reinterpret_cast
CT_Dynamic, ///< dynamic_cast
CT_CStyle, ///< (Type)expr
- CT_Functional ///< Type(expr)
+ CT_Functional, ///< Type(expr)
+ CT_Addrspace ///< addrspace_cast
};
namespace {
@@ -88,6 +89,7 @@ namespace {
void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
void CheckCStyleCast();
void CheckBuiltinBitCast();
+ void CheckAddrspaceCast();
void updatePartOfExplicitCastFlags(CastExpr *CE) {
// Walk down from the CE to the OrigSrcExpr, and mark all immediate
@@ -159,6 +161,30 @@ namespace {
PlaceholderKind = (BuiltinType::Kind) 0;
}
};
+
+ void CheckNoDeref(Sema &S, const QualType FromType, const QualType ToType,
+ SourceLocation OpLoc) {
+ if (const auto *PtrType = dyn_cast<PointerType>(FromType)) {
+ if (PtrType->getPointeeType()->hasAttr(attr::NoDeref)) {
+ if (const auto *DestType = dyn_cast<PointerType>(ToType)) {
+ if (!DestType->getPointeeType()->hasAttr(attr::NoDeref)) {
+ S.Diag(OpLoc, diag::warn_noderef_to_dereferenceable_pointer);
+ }
+ }
+ }
+ }
+ }
+
+ struct CheckNoDerefRAII {
+ CheckNoDerefRAII(CastOperation &Op) : Op(Op) {}
+ ~CheckNoDerefRAII() {
+ if (!Op.SrcExpr.isInvalid())
+ CheckNoDeref(Op.Self, Op.SrcExpr.get()->getType(), Op.ResultType,
+ Op.OpRange.getBegin());
+ }
+
+ CastOperation &Op;
+ };
}
static void DiagnoseCastQual(Sema &Self, const ExprResult &SrcExpr,
@@ -225,12 +251,14 @@ static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
unsigned &msg);
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
- SourceRange OpRange,
- unsigned &msg,
+ SourceRange OpRange, unsigned &msg,
CastKind &Kind);
+static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType, bool CStyle,
+ unsigned &msg, CastKind &Kind);
-
-/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
+/// ActOnCXXNamedCast - Parse
+/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult
Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAngleBracketLoc, Declarator &D,
@@ -272,6 +300,16 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
switch (Kind) {
default: llvm_unreachable("Unknown C++ cast!");
+ case tok::kw_addrspace_cast:
+ if (!TypeDependent) {
+ Op.CheckAddrspaceCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+ return Op.complete(CXXAddrspaceCastExpr::Create(
+ Context, Op.ResultType, Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
+ DestTInfo, OpLoc, Parens.getEnd(), AngleBrackets));
+
case tok::kw_const_cast:
if (!TypeDependent) {
Op.CheckConstCast();
@@ -375,6 +413,7 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
case CT_Const:
case CT_Reinterpret:
case CT_Dynamic:
+ case CT_Addrspace:
return false;
// These do.
@@ -708,6 +747,8 @@ static TryCastResult getCastAwayConstnessCastKind(CastAwayConstnessKind CACK,
/// Refer to C++ 5.2.7 for details. Dynamic casts are used mostly for runtime-
/// checked downcasts in class hierarchies.
void CastOperation::CheckDynamicCast() {
+ CheckNoDerefRAII NoderefCheck(*this);
+
if (ValueKind == VK_RValue)
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else if (isPlaceholder())
@@ -861,6 +902,8 @@ void CastOperation::CheckDynamicCast() {
/// const char *str = "literal";
/// legacy_function(const_cast\<char*\>(str));
void CastOperation::CheckConstCast() {
+ CheckNoDerefRAII NoderefCheck(*this);
+
if (ValueKind == VK_RValue)
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else if (isPlaceholder())
@@ -878,6 +921,18 @@ void CastOperation::CheckConstCast() {
SrcExpr = ExprError();
}
+void CastOperation::CheckAddrspaceCast() {
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ auto TCR =
+ TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ false, msg, Kind);
+ if (TCR != TC_Success && msg != 0) {
+ Self.Diag(OpRange.getBegin(), msg)
+ << CT_Addrspace << SrcExpr.get()->getType() << DestType << OpRange;
+ }
+ if (!isValidCast(TCR))
+ SrcExpr = ExprError();
+}
+
/// Check that a reinterpret_cast\<DestType\>(SrcExpr) is not used as upcast
/// or downcast between respective pointers or references.
static void DiagnoseReinterpretUpDownCast(Sema &Self, const Expr *SrcExpr,
@@ -1018,6 +1073,8 @@ void CastOperation::CheckReinterpretCast() {
/// Refer to C++ 5.2.9 for details. Static casts are mostly used for making
/// implicit conversions explicit and getting rid of data loss warnings.
void CastOperation::CheckStaticCast() {
+ CheckNoDerefRAII NoderefCheck(*this);
+
if (isPlaceholder()) {
checkNonOverloadPlaceholders();
if (SrcExpr.isInvalid())
@@ -1961,7 +2018,7 @@ static void DiagnoseCallingConvCast(Sema &Self, const ExprResult &SrcExpr,
<< FD << DstCCName << FixItHint::CreateInsertion(NameLoc, CCAttrText);
}
-static void checkIntToPointerCast(bool CStyle, SourceLocation Loc,
+static void checkIntToPointerCast(bool CStyle, const SourceRange &OpRange,
const Expr *SrcExpr, QualType DestType,
Sema &Self) {
QualType SrcType = SrcExpr->getType();
@@ -1983,7 +2040,7 @@ static void checkIntToPointerCast(bool CStyle, SourceLocation Loc,
unsigned Diag = DestType->isVoidPointerType() ?
diag::warn_int_to_void_pointer_cast
: diag::warn_int_to_pointer_cast;
- Self.Diag(Loc, Diag) << SrcType << DestType;
+ Self.Diag(OpRange.getBegin(), Diag) << SrcType << DestType << OpRange;
}
}
@@ -2062,6 +2119,9 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_NotApplicable;
// FIXME: Use a specific diagnostic for the rest of these cases.
case OK_VectorComponent: inappropriate = "vector element"; break;
+ case OK_MatrixComponent:
+ inappropriate = "matrix element";
+ break;
case OK_ObjCProperty: inappropriate = "property expression"; break;
case OK_ObjCSubscript: inappropriate = "container subscripting expression";
break;
@@ -2204,13 +2264,19 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
// C++ 5.2.10p4: A pointer can be explicitly converted to any integral
// type large enough to hold it; except in Microsoft mode, where the
// integral type size doesn't matter (except we don't allow bool).
- bool MicrosoftException = Self.getLangOpts().MicrosoftExt &&
- !DestType->isBooleanType();
if ((Self.Context.getTypeSize(SrcType) >
- Self.Context.getTypeSize(DestType)) &&
- !MicrosoftException) {
- msg = diag::err_bad_reinterpret_cast_small_int;
- return TC_Failed;
+ Self.Context.getTypeSize(DestType))) {
+ bool MicrosoftException =
+ Self.getLangOpts().MicrosoftExt && !DestType->isBooleanType();
+ if (MicrosoftException) {
+ unsigned Diag = SrcType->isVoidPointerType()
+ ? diag::warn_void_pointer_to_int_cast
+ : diag::warn_pointer_to_int_cast;
+ Self.Diag(OpRange.getBegin(), Diag) << SrcType << DestType << OpRange;
+ } else {
+ msg = diag::err_bad_reinterpret_cast_small_int;
+ return TC_Failed;
+ }
}
Kind = CK_PointerToIntegral;
return TC_Success;
@@ -2218,8 +2284,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
if (SrcType->isIntegralOrEnumerationType()) {
assert(destIsPtr && "One type must be a pointer");
- checkIntToPointerCast(CStyle, OpRange.getBegin(), SrcExpr.get(), DestType,
- Self);
+ checkIntToPointerCast(CStyle, OpRange, SrcExpr.get(), DestType, Self);
// C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
// converted to a pointer.
// C++ 5.2.10p9: [Note: ...a null pointer constant of integral type is not
@@ -2311,6 +2376,24 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return SuccessResult;
}
+ // Diagnose address space conversion in nested pointers.
+ QualType DestPtee = DestType->getPointeeType().isNull()
+ ? DestType->getPointeeType()
+ : DestType->getPointeeType()->getPointeeType();
+ QualType SrcPtee = SrcType->getPointeeType().isNull()
+ ? SrcType->getPointeeType()
+ : SrcType->getPointeeType()->getPointeeType();
+ while (!DestPtee.isNull() && !SrcPtee.isNull()) {
+ if (DestPtee.getAddressSpace() != SrcPtee.getAddressSpace()) {
+ Self.Diag(OpRange.getBegin(),
+ diag::warn_bad_cxx_cast_nested_pointer_addr_space)
+ << CStyle << SrcType << DestType << SrcExpr.get()->getSourceRange();
+ break;
+ }
+ DestPtee = DestPtee->getPointeeType();
+ SrcPtee = SrcPtee->getPointeeType();
+ }
+
// C++ 5.2.10p7: A pointer to an object can be explicitly converted to
// a pointer to an object of different type.
// Void pointers are not specified, but supported by every compiler out there.
@@ -2321,7 +2404,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
- unsigned &msg) {
+ unsigned &msg, CastKind &Kind) {
if (!Self.getLangOpts().OpenCL)
// FIXME: As compiler doesn't have any information about overlapping addr
// spaces at the moment we have to be permissive here.
@@ -2330,6 +2413,9 @@ static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
// non-OpenCL mode too, we fast-path above because no other languages
// define overlapping address spaces currently.
auto SrcType = SrcExpr.get()->getType();
+ // FIXME: Should this be generalized to references? The reference parameter
+ // however becomes a reference pointee type here and therefore rejected.
+ // Perhaps this is the right behavior though according to C++.
auto SrcPtrType = SrcType->getAs<PointerType>();
if (!SrcPtrType)
return TC_NotApplicable;
@@ -2338,9 +2424,7 @@ static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
return TC_NotApplicable;
auto SrcPointeeType = SrcPtrType->getPointeeType();
auto DestPointeeType = DestPtrType->getPointeeType();
- if (SrcPointeeType.getAddressSpace() == DestPointeeType.getAddressSpace())
- return TC_NotApplicable;
- if (!DestPtrType->isAddressSpaceOverlapping(*SrcPtrType)) {
+ if (!DestPointeeType.isAddressSpaceOverlapping(SrcPointeeType)) {
msg = diag::err_bad_cxx_cast_addr_space_mismatch;
return TC_Failed;
}
@@ -2348,10 +2432,15 @@ static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
Self.Context.removeAddrSpaceQualType(SrcPointeeType.getCanonicalType());
auto DestPointeeTypeWithoutAS =
Self.Context.removeAddrSpaceQualType(DestPointeeType.getCanonicalType());
- return Self.Context.hasSameType(SrcPointeeTypeWithoutAS,
- DestPointeeTypeWithoutAS)
- ? TC_Success
- : TC_NotApplicable;
+ if (Self.Context.hasSameType(SrcPointeeTypeWithoutAS,
+ DestPointeeTypeWithoutAS)) {
+ Kind = SrcPointeeType.getAddressSpace() == DestPointeeType.getAddressSpace()
+ ? CK_NoOp
+ : CK_AddressSpaceConversion;
+ return TC_Success;
+ } else {
+ return TC_NotApplicable;
+ }
}
void CastOperation::checkAddressSpaceCast(QualType SrcType, QualType DestType) {
@@ -2378,9 +2467,9 @@ void CastOperation::checkAddressSpaceCast(QualType SrcType, QualType DestType) {
const PointerType *SrcPPtr = cast<PointerType>(SrcPtr);
QualType DestPPointee = DestPPtr->getPointeeType();
QualType SrcPPointee = SrcPPtr->getPointeeType();
- if (Nested ? DestPPointee.getAddressSpace() !=
- SrcPPointee.getAddressSpace()
- : !DestPPtr->isAddressSpaceOverlapping(*SrcPPtr)) {
+ if (Nested
+ ? DestPPointee.getAddressSpace() != SrcPPointee.getAddressSpace()
+ : !DestPPointee.isAddressSpaceOverlapping(SrcPPointee)) {
Self.Diag(OpRange.getBegin(), DiagID)
<< SrcType << DestType << Sema::AA_Casting
<< SrcExpr.get()->getSourceRange();
@@ -2482,22 +2571,21 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
Sema::CheckedConversionKind CCK =
FunctionalStyle ? Sema::CCK_FunctionalCast : Sema::CCK_CStyleCast;
if (tcr == TC_NotApplicable) {
- tcr = TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ true, msg);
+ tcr = TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ true, msg,
+ Kind);
if (SrcExpr.isInvalid())
return;
- if (isValidCast(tcr))
- Kind = CK_AddressSpaceConversion;
-
if (tcr == TC_NotApplicable) {
- // ... or if that is not possible, a static_cast, ignoring const, ...
+ // ... or if that is not possible, a static_cast, ignoring const and
+ // addr space, ...
tcr = TryStaticCast(Self, SrcExpr, DestType, CCK, OpRange, msg, Kind,
BasePath, ListInitialization);
if (SrcExpr.isInvalid())
return;
if (tcr == TC_NotApplicable) {
- // ... and finally a reinterpret_cast, ignoring const.
+ // ... and finally a reinterpret_cast, ignoring const and addr space.
tcr = TryReinterpretCast(Self, SrcExpr, DestType, /*CStyle*/ true,
OpRange, msg, Kind);
if (SrcExpr.isInvalid())
@@ -2629,6 +2717,13 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // Allow casting a sizeless built-in type to itself.
+ if (DestType->isSizelessBuiltinType() &&
+ Self.Context.hasSameUnqualifiedType(DestType, SrcType)) {
+ Kind = CK_NoOp;
+ return;
+ }
+
if (!DestType->isScalarType() && !DestType->isVectorType()) {
const RecordType *DestRecordTy = DestType->getAs<RecordType>();
@@ -2724,6 +2819,20 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // Can't cast to or from bfloat
+ if (DestType->isBFloat16Type() && !SrcType->isBFloat16Type()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_to_bfloat16)
+ << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ if (SrcType->isBFloat16Type() && !DestType->isBFloat16Type()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_from_bfloat16)
+ << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
// If either type is a pointer, the other type has to be either an
// integer or a pointer.
if (!DestType->isArithmeticType()) {
@@ -2734,8 +2843,8 @@ void CastOperation::CheckCStyleCast() {
SrcExpr = ExprError();
return;
}
- checkIntToPointerCast(/* CStyle */ true, OpRange.getBegin(), SrcExpr.get(),
- DestType, Self);
+ checkIntToPointerCast(/* CStyle */ true, OpRange, SrcExpr.get(), DestType,
+ Self);
} else if (!SrcType->isArithmeticType()) {
if (!DestType->isIntegralType(Self.Context) &&
DestType->isArithmeticType()) {
@@ -2745,6 +2854,25 @@ void CastOperation::CheckCStyleCast() {
SrcExpr = ExprError();
return;
}
+
+ if ((Self.Context.getTypeSize(SrcType) >
+ Self.Context.getTypeSize(DestType)) &&
+ !DestType->isBooleanType()) {
+ // C 6.3.2.3p6: Any pointer type may be converted to an integer type.
+ // Except as previously specified, the result is implementation-defined.
+ // If the result cannot be represented in the integer type, the behavior
+ // is undefined. The result need not be in the range of values of any
+ // integer type.
+ unsigned Diag;
+ if (SrcType->isVoidPointerType())
+ Diag = DestType->isEnumeralType() ? diag::warn_void_pointer_to_enum_cast
+ : diag::warn_void_pointer_to_int_cast;
+ else if (DestType->isEnumeralType())
+ Diag = diag::warn_pointer_to_enum_cast;
+ else
+ Diag = diag::warn_pointer_to_int_cast;
+ Self.Diag(OpRange.getBegin(), Diag) << SrcType << DestType << OpRange;
+ }
}
if (Self.getLangOpts().OpenCL &&
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 74742023d1b3..509d88e25000 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -30,6 +30,7 @@
#include "clang/AST/NSAPI.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/OperationKinds.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
@@ -87,6 +88,7 @@
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <bitset>
#include <cassert>
#include <cstddef>
#include <cstdint>
@@ -236,8 +238,8 @@ static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
}
Expr::EvalResult AlignResult;
unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
- // We can't check validity of alignment if it is type dependent.
- if (!AlignOp->isInstantiationDependent() &&
+ // We can't check validity of alignment if it is value dependent.
+ if (!AlignOp->isValueDependent() &&
AlignOp->EvaluateAsInt(AlignResult, S.Context,
Expr::SE_AllowSideEffects)) {
llvm::APSInt AlignValue = AlignResult.Val.getInt();
@@ -282,48 +284,60 @@ static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
return false;
}
-static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
+static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
+ unsigned BuiltinID) {
if (checkArgCount(S, TheCall, 3))
return true;
// First two arguments should be integers.
for (unsigned I = 0; I < 2; ++I) {
- ExprResult Arg = TheCall->getArg(I);
+ ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I));
+ if (Arg.isInvalid()) return true;
+ TheCall->setArg(I, Arg.get());
+
QualType Ty = Arg.get()->getType();
if (!Ty->isIntegerType()) {
S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
<< Ty << Arg.get()->getSourceRange();
return true;
}
- InitializedEntity Entity = InitializedEntity::InitializeParameter(
- S.getASTContext(), Ty, /*consume*/ false);
- Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
- if (Arg.isInvalid())
- return true;
- TheCall->setArg(I, Arg.get());
}
// Third argument should be a pointer to a non-const integer.
// IRGen correctly handles volatile, restrict, and address spaces, and
// the other qualifiers aren't possible.
{
- ExprResult Arg = TheCall->getArg(2);
+ ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2));
+ if (Arg.isInvalid()) return true;
+ TheCall->setArg(2, Arg.get());
+
QualType Ty = Arg.get()->getType();
const auto *PtrTy = Ty->getAs<PointerType>();
- if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
- !PtrTy->getPointeeType().isConstQualified())) {
+ if (!PtrTy ||
+ !PtrTy->getPointeeType()->isIntegerType() ||
+ PtrTy->getPointeeType().isConstQualified()) {
S.Diag(Arg.get()->getBeginLoc(),
diag::err_overflow_builtin_must_be_ptr_int)
- << Ty << Arg.get()->getSourceRange();
+ << Ty << Arg.get()->getSourceRange();
return true;
}
- InitializedEntity Entity = InitializedEntity::InitializeParameter(
- S.getASTContext(), Ty, /*consume*/ false);
- Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
- if (Arg.isInvalid())
- return true;
- TheCall->setArg(2, Arg.get());
}
+
+ // Disallow signed ExtIntType args larger than 128 bits to mul function until
+ // we improve backend support.
+ if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
+ for (unsigned I = 0; I < 3; ++I) {
+ const auto Arg = TheCall->getArg(I);
+ // Third argument will be a pointer.
+ auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
+ if (Ty->isExtIntType() && Ty->isSignedIntegerType() &&
+ S.getASTContext().getIntWidth(Ty) > 128)
+ return S.Diag(Arg->getBeginLoc(),
+ diag::err_overflow_builtin_ext_int_max_size)
+ << 128;
+ }
+ }
+
return false;
}
@@ -390,13 +404,194 @@ static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
return false;
}
+namespace {
+
+class EstimateSizeFormatHandler
+ : public analyze_format_string::FormatStringHandler {
+ size_t Size;
+
+public:
+ EstimateSizeFormatHandler(StringRef Format)
+ : Size(std::min(Format.find(0), Format.size()) +
+ 1 /* null byte always written by sprintf */) {}
+
+ bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
+ const char *, unsigned SpecifierLen) override {
+
+ const size_t FieldWidth = computeFieldWidth(FS);
+ const size_t Precision = computePrecision(FS);
+
+ // The actual format.
+ switch (FS.getConversionSpecifier().getKind()) {
+ // Just a char.
+ case analyze_format_string::ConversionSpecifier::cArg:
+ case analyze_format_string::ConversionSpecifier::CArg:
+ Size += std::max(FieldWidth, (size_t)1);
+ break;
+ // Just an integer.
+ case analyze_format_string::ConversionSpecifier::dArg:
+ case analyze_format_string::ConversionSpecifier::DArg:
+ case analyze_format_string::ConversionSpecifier::iArg:
+ case analyze_format_string::ConversionSpecifier::oArg:
+ case analyze_format_string::ConversionSpecifier::OArg:
+ case analyze_format_string::ConversionSpecifier::uArg:
+ case analyze_format_string::ConversionSpecifier::UArg:
+ case analyze_format_string::ConversionSpecifier::xArg:
+ case analyze_format_string::ConversionSpecifier::XArg:
+ Size += std::max(FieldWidth, Precision);
+ break;
+
+ // %g style conversion switches between %f or %e style dynamically.
+ // %f always takes less space, so default to it.
+ case analyze_format_string::ConversionSpecifier::gArg:
+ case analyze_format_string::ConversionSpecifier::GArg:
+
+ // Floating point number in the form '[+]ddd.ddd'.
+ case analyze_format_string::ConversionSpecifier::fArg:
+ case analyze_format_string::ConversionSpecifier::FArg:
+ Size += std::max(FieldWidth, 1 /* integer part */ +
+ (Precision ? 1 + Precision
+ : 0) /* period + decimal */);
+ break;
+
+ // Floating point number in the form '[-]d.ddde[+-]dd'.
+ case analyze_format_string::ConversionSpecifier::eArg:
+ case analyze_format_string::ConversionSpecifier::EArg:
+ Size +=
+ std::max(FieldWidth,
+ 1 /* integer part */ +
+ (Precision ? 1 + Precision : 0) /* period + decimal */ +
+ 1 /* e or E letter */ + 2 /* exponent */);
+ break;
+
+ // Floating point number in the form '[-]0xh.hhhhp±dd'.
+ case analyze_format_string::ConversionSpecifier::aArg:
+ case analyze_format_string::ConversionSpecifier::AArg:
+ Size +=
+ std::max(FieldWidth,
+ 2 /* 0x */ + 1 /* integer part */ +
+ (Precision ? 1 + Precision : 0) /* period + decimal */ +
+ 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
+ break;
+
+ // Just a string.
+ case analyze_format_string::ConversionSpecifier::sArg:
+ case analyze_format_string::ConversionSpecifier::SArg:
+ Size += FieldWidth;
+ break;
+
+ // Just a pointer in the form '0xddd'.
+ case analyze_format_string::ConversionSpecifier::pArg:
+ Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
+ break;
+
+ // A plain percent.
+ case analyze_format_string::ConversionSpecifier::PercentArg:
+ Size += 1;
+ break;
+
+ default:
+ break;
+ }
+
+ Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
+
+ if (FS.hasAlternativeForm()) {
+ switch (FS.getConversionSpecifier().getKind()) {
+ default:
+ break;
+ // Force a leading '0'.
+ case analyze_format_string::ConversionSpecifier::oArg:
+ Size += 1;
+ break;
+ // Force a leading '0x'.
+ case analyze_format_string::ConversionSpecifier::xArg:
+ case analyze_format_string::ConversionSpecifier::XArg:
+ Size += 2;
+ break;
+ // Force a period '.' before decimal, even if precision is 0.
+ case analyze_format_string::ConversionSpecifier::aArg:
+ case analyze_format_string::ConversionSpecifier::AArg:
+ case analyze_format_string::ConversionSpecifier::eArg:
+ case analyze_format_string::ConversionSpecifier::EArg:
+ case analyze_format_string::ConversionSpecifier::fArg:
+ case analyze_format_string::ConversionSpecifier::FArg:
+ case analyze_format_string::ConversionSpecifier::gArg:
+ case analyze_format_string::ConversionSpecifier::GArg:
+ Size += (Precision ? 0 : 1);
+ break;
+ }
+ }
+ assert(SpecifierLen <= Size && "no underflow");
+ Size -= SpecifierLen;
+ return true;
+ }
+
+ size_t getSizeLowerBound() const { return Size; }
+
+private:
+ static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
+ const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
+ size_t FieldWidth = 0;
+ if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant)
+ FieldWidth = FW.getConstantAmount();
+ return FieldWidth;
+ }
+
+ static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
+ const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
+ size_t Precision = 0;
+
+ // See man 3 printf for default precision value based on the specifier.
+ switch (FW.getHowSpecified()) {
+ case analyze_format_string::OptionalAmount::NotSpecified:
+ switch (FS.getConversionSpecifier().getKind()) {
+ default:
+ break;
+ case analyze_format_string::ConversionSpecifier::dArg: // %d
+ case analyze_format_string::ConversionSpecifier::DArg: // %D
+ case analyze_format_string::ConversionSpecifier::iArg: // %i
+ Precision = 1;
+ break;
+ case analyze_format_string::ConversionSpecifier::oArg: // %d
+ case analyze_format_string::ConversionSpecifier::OArg: // %D
+ case analyze_format_string::ConversionSpecifier::uArg: // %d
+ case analyze_format_string::ConversionSpecifier::UArg: // %D
+ case analyze_format_string::ConversionSpecifier::xArg: // %d
+ case analyze_format_string::ConversionSpecifier::XArg: // %D
+ Precision = 1;
+ break;
+ case analyze_format_string::ConversionSpecifier::fArg: // %f
+ case analyze_format_string::ConversionSpecifier::FArg: // %F
+ case analyze_format_string::ConversionSpecifier::eArg: // %e
+ case analyze_format_string::ConversionSpecifier::EArg: // %E
+ case analyze_format_string::ConversionSpecifier::gArg: // %g
+ case analyze_format_string::ConversionSpecifier::GArg: // %G
+ Precision = 6;
+ break;
+ case analyze_format_string::ConversionSpecifier::pArg: // %d
+ Precision = 1;
+ break;
+ }
+ break;
+ case analyze_format_string::OptionalAmount::Constant:
+ Precision = FW.getConstantAmount();
+ break;
+ default:
+ break;
+ }
+ return Precision;
+ }
+};
+
+} // namespace
+
/// Check a call to BuiltinID for buffer overflows. If BuiltinID is a
/// __builtin_*_chk function, then use the object size argument specified in the
/// source. Otherwise, infer the object size using __builtin_object_size.
void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
CallExpr *TheCall) {
// FIXME: There are some more useful checks we could be doing here:
- // - Analyze the format string of sprintf to see how much of buffer is used.
// - Evaluate strlen of strcpy arguments, use as object size.
if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
@@ -407,12 +602,55 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
if (!BuiltinID)
return;
+ const TargetInfo &TI = getASTContext().getTargetInfo();
+ unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
+
unsigned DiagID = 0;
bool IsChkVariant = false;
+ Optional<llvm::APSInt> UsedSize;
unsigned SizeIndex, ObjectIndex;
switch (BuiltinID) {
default:
return;
+ case Builtin::BIsprintf:
+ case Builtin::BI__builtin___sprintf_chk: {
+ size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
+ auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
+
+ if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
+
+ if (!Format->isAscii() && !Format->isUTF8())
+ return;
+
+ StringRef FormatStrRef = Format->getString();
+ EstimateSizeFormatHandler H(FormatStrRef);
+ const char *FormatBytes = FormatStrRef.data();
+ const ConstantArrayType *T =
+ Context.getAsConstantArrayType(Format->getType());
+ assert(T && "String literal not of constant array type!");
+ size_t TypeSize = T->getSize().getZExtValue();
+
+ // In case there's a null byte somewhere.
+ size_t StrLen =
+ std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
+ if (!analyze_format_string::ParsePrintfString(
+ H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
+ Context.getTargetInfo(), false)) {
+ DiagID = diag::warn_fortify_source_format_overflow;
+ UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
+ .extOrTrunc(SizeTypeWidth);
+ if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
+ IsChkVariant = true;
+ ObjectIndex = 2;
+ } else {
+ IsChkVariant = false;
+ ObjectIndex = 0;
+ }
+ break;
+ }
+ }
+ return;
+ }
case Builtin::BI__builtin___memcpy_chk:
case Builtin::BI__builtin___memmove_chk:
case Builtin::BI__builtin___memset_chk:
@@ -505,19 +743,19 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
return;
// Get the object size in the target's size_t width.
- const TargetInfo &TI = getASTContext().getTargetInfo();
- unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
}
// Evaluate the number of bytes of the object that this call will use.
- Expr::EvalResult Result;
- Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
- if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
- return;
- llvm::APSInt UsedSize = Result.Val.getInt();
+ if (!UsedSize) {
+ Expr::EvalResult Result;
+ Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
+ if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
+ return;
+ UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth);
+ }
- if (UsedSize.ule(ObjectSize))
+ if (UsedSize.getValue().ule(ObjectSize))
return;
StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
@@ -533,7 +771,7 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
PDiag(DiagID)
<< FunctionName << ObjectSize.toString(/*Radix=*/10)
- << UsedSize.toString(/*Radix=*/10));
+ << UsedSize.getValue().toString(/*Radix=*/10));
}
static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
@@ -1152,6 +1390,49 @@ CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
return true;
}
+static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
+ SourceLocation CallSiteLoc);
+
+bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (TI.getTriple().getArch()) {
+ default:
+ // Some builtins don't require additional checking, so just consider these
+ // acceptable.
+ return false;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be:
+ return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::bpfeb:
+ case llvm::Triple::bpfel:
+ return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
+ case llvm::Triple::hexagon:
+ return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::systemz:
+ return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::amdgcn:
+ return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
+ }
+}
+
ExprResult
Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CallExpr *TheCall) {
@@ -1421,6 +1702,19 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_nontemporal_load:
case Builtin::BI__builtin_nontemporal_store:
return SemaBuiltinNontemporalOverloaded(TheCallResult);
+ case Builtin::BI__builtin_memcpy_inline: {
+ clang::Expr *SizeOp = TheCall->getArg(2);
+ // We warn about copying to or from `nullptr` pointers when `size` is
+ // greater than 0. When `size` is value dependent we cannot evaluate its
+ // value so we bail out.
+ if (SizeOp->isValueDependent())
+ break;
+ if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) {
+ CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
+ CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
+ }
+ break;
+ }
#define BUILTIN(ID, TYPE, ATTRS)
#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
case Builtin::BI##ID: \
@@ -1447,7 +1741,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow:
- if (SemaBuiltinOverflow(*this, TheCall))
+ if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_operator_new:
@@ -1515,6 +1809,36 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
TheCall->setType(Context.IntTy);
break;
}
+ case Builtin::BI__builtin_expect_with_probability: {
+ // We first want to ensure we are called with 3 arguments
+ if (checkArgCount(*this, TheCall, 3))
+ return ExprError();
+ // then check probability is constant float in range [0.0, 1.0]
+ const Expr *ProbArg = TheCall->getArg(2);
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ Expr::EvalResult Eval;
+ Eval.Diag = &Notes;
+ if ((!ProbArg->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen,
+ Context)) ||
+ !Eval.Val.isFloat()) {
+ Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
+ << ProbArg->getSourceRange();
+ for (const PartialDiagnosticAt &PDiag : Notes)
+ Diag(PDiag.first, PDiag.second);
+ return ExprError();
+ }
+ llvm::APFloat Probability = Eval.Val.getFloat();
+ bool LoseInfo = false;
+ Probability.convert(llvm::APFloat::IEEEdouble(),
+ llvm::RoundingMode::Dynamic, &LoseInfo);
+ if (!(Probability >= llvm::APFloat(0.0) &&
+ Probability <= llvm::APFloat(1.0))) {
+ Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
+ << ProbArg->getSourceRange();
+ return ExprError();
+ }
+ break;
+ }
case Builtin::BI__builtin_preserve_access_index:
if (SemaBuiltinPreserveAI(*this, TheCall))
return ExprError();
@@ -1608,62 +1932,55 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_os_log_format:
+ Cleanup.setExprNeedsCleanups(true);
+ LLVM_FALLTHROUGH;
case Builtin::BI__builtin_os_log_format_buffer_size:
if (SemaBuiltinOSLogFormat(TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_frame_address:
+ case Builtin::BI__builtin_return_address: {
+ if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
+ return ExprError();
+
+ // -Wframe-address warning if non-zero passed to builtin
+ // return/frame address.
+ Expr::EvalResult Result;
+ if (TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
+ Result.Val.getInt() != 0)
+ Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
+ << ((BuiltinID == Builtin::BI__builtin_return_address)
+ ? "__builtin_return_address"
+ : "__builtin_frame_address")
+ << TheCall->getSourceRange();
+ break;
+ }
+
+ case Builtin::BI__builtin_matrix_transpose:
+ return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
+
+ case Builtin::BI__builtin_matrix_column_major_load:
+ return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
+
+ case Builtin::BI__builtin_matrix_column_major_store:
+ return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
}
// Since the target specific builtins for each arch overlap, only check those
// of the arch we are compiling for.
if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
- switch (Context.getTargetInfo().getTriple().getArch()) {
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_32:
- case llvm::Triple::aarch64_be:
- if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::bpfeb:
- case llvm::Triple::bpfel:
- if (CheckBPFBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::hexagon:
- if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::systemz:
- if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall))
- return ExprError();
- break;
- default:
- break;
+ if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
+ assert(Context.getAuxTargetInfo() &&
+ "Aux Target Builtin, but not an aux target?");
+
+ if (CheckTSBuiltinFunctionCall(
+ *Context.getAuxTargetInfo(),
+ Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
+ return ExprError();
+ } else {
+ if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
+ TheCall))
+ return ExprError();
}
}
@@ -1697,6 +2014,9 @@ static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
case NeonTypeFlags::Float64:
assert(!shift && "cannot shift float types!");
return (1 << IsQuad) - 1;
+ case NeonTypeFlags::BFloat16:
+ assert(!shift && "cannot shift float types!");
+ return (4 << IsQuad) - 1;
}
llvm_unreachable("Invalid NeonTypeFlag!");
}
@@ -1736,11 +2056,135 @@ static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
return Context.FloatTy;
case NeonTypeFlags::Float64:
return Context.DoubleTy;
+ case NeonTypeFlags::BFloat16:
+ return Context.BFloat16Ty;
}
llvm_unreachable("Invalid NeonTypeFlag!");
}
-bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ // Range check SVE intrinsics that take immediate values.
+ SmallVector<std::tuple<int,int,int>, 3> ImmChecks;
+
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_SVE_IMMEDIATE_CHECK
+#include "clang/Basic/arm_sve_sema_rangechecks.inc"
+#undef GET_SVE_IMMEDIATE_CHECK
+ }
+
+ // Perform all the immediate checks for this builtin call.
+ bool HasError = false;
+ for (auto &I : ImmChecks) {
+ int ArgNum, CheckTy, ElementSizeInBits;
+ std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
+
+ typedef bool(*OptionSetCheckFnTy)(int64_t Value);
+
+ // Function that checks whether the operand (ArgNum) is an immediate
+ // that is one of the predefined values.
+ auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
+ int ErrDiag) -> bool {
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ llvm::APSInt Imm;
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
+ return true;
+
+ if (!CheckImm(Imm.getSExtValue()))
+ return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
+ return false;
+ };
+
+ switch ((SVETypeFlags::ImmCheckType)CheckTy) {
+ case SVETypeFlags::ImmCheck0_31:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_13:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_16:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_7:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckExtract:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (2048 / ElementSizeInBits) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftRight:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftRightNarrow:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
+ ElementSizeInBits / 2))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftLeft:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ ElementSizeInBits - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndex:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (1 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndexCompRotate:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (2 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndexDot:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (4 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckComplexRot90_270:
+ if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
+ diag::err_rotation_argument_to_cadd))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckComplexRotAll90:
+ if (CheckImmediateInSet(
+ [](int64_t V) {
+ return V == 0 || V == 90 || V == 180 || V == 270;
+ },
+ diag::err_rotation_argument_to_cmla))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_1:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_2:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_3:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
+ HasError = true;
+ break;
+ }
+ }
+
+ return HasError;
+}
+
+bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID, CallExpr *TheCall) {
llvm::APSInt Result;
uint64_t mask = 0;
unsigned TV = 0;
@@ -1774,12 +2218,11 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
QualType RHSTy = RHS.get()->getType();
- llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
+ llvm::Triple::ArchType Arch = TI.getTriple().getArch();
bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be;
- bool IsInt64Long =
- Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
+ bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
QualType EltTy =
getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
if (HasConstPtr)
@@ -1817,6 +2260,47 @@ bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
}
}
+bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ bool Err = false;
+ switch (BuiltinID) {
+ default:
+ return false;
+#include "clang/Basic/arm_cde_builtin_sema.inc"
+ }
+
+ if (Err)
+ return true;
+
+ return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
+}
+
+bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
+ const Expr *CoprocArg, bool WantCDE) {
+ if (isConstantEvaluated())
+ return false;
+
+ // We can't check the value of a dependent argument.
+ if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
+ return false;
+
+ llvm::APSInt CoprocNoAP;
+ bool IsICE = CoprocArg->isIntegerConstantExpr(CoprocNoAP, Context);
+ (void)IsICE;
+ assert(IsICE && "Coprocossor immediate is not a constant expression");
+ int64_t CoprocNo = CoprocNoAP.getExtValue();
+ assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
+
+ uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
+ bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
+
+ if (IsCDECoproc != WantCDE)
+ return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
+ << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
+
+ return false;
+}
+
bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
@@ -1932,7 +2416,8 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
return false;
}
-bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
@@ -1955,10 +2440,12 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
BuiltinID == ARM::BI__builtin_arm_wsrp)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
- if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+ if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
return true;
if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
return true;
+ if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
@@ -1981,11 +2468,33 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case ARM::BI__builtin_arm_isb:
case ARM::BI__builtin_arm_dbg:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
- }
-}
-
-bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
+ case ARM::BI__builtin_arm_cdp:
+ case ARM::BI__builtin_arm_cdp2:
+ case ARM::BI__builtin_arm_mcr:
+ case ARM::BI__builtin_arm_mcr2:
+ case ARM::BI__builtin_arm_mrc:
+ case ARM::BI__builtin_arm_mrc2:
+ case ARM::BI__builtin_arm_mcrr:
+ case ARM::BI__builtin_arm_mcrr2:
+ case ARM::BI__builtin_arm_mrrc:
+ case ARM::BI__builtin_arm_mrrc2:
+ case ARM::BI__builtin_arm_ldc:
+ case ARM::BI__builtin_arm_ldcl:
+ case ARM::BI__builtin_arm_ldc2:
+ case ARM::BI__builtin_arm_ldc2l:
+ case ARM::BI__builtin_arm_stc:
+ case ARM::BI__builtin_arm_stcl:
+ case ARM::BI__builtin_arm_stc2:
+ case ARM::BI__builtin_arm_stc2l:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) ||
+ CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
+ /*WantCDE*/ false);
+ }
+}
+
+bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_strex ||
@@ -2030,7 +2539,10 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
if (BuiltinID == AArch64::BI__getReg)
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
- if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
+ if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
+
+ if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
return true;
// For intrinsics which take an immediate value as part of the instruction,
@@ -2049,17 +2561,33 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
- assert(BuiltinID == BPF::BI__builtin_preserve_field_info &&
+ assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
+ BuiltinID == BPF::BI__builtin_btf_type_id) &&
"unexpected ARM builtin");
if (checkArgCount(*this, TheCall, 2))
return true;
+ Expr *Arg;
+ if (BuiltinID == BPF::BI__builtin_btf_type_id) {
+ // The second argument needs to be a constant int
+ llvm::APSInt Value;
+ Arg = TheCall->getArg(1);
+ if (!Arg->isIntegerConstantExpr(Value, Context)) {
+ Diag(Arg->getBeginLoc(), diag::err_btf_type_id_not_const)
+ << 2 << Arg->getSourceRange();
+ return true;
+ }
+
+ TheCall->setType(Context.UnsignedIntTy);
+ return false;
+ }
+
// The first argument needs to be a record field access.
// If it is an array element access, we delay decision
// to BPF backend to check whether the access is a
// field access or not.
- Expr *Arg = TheCall->getArg(0);
+ Arg = TheCall->getArg(0);
if (Arg->getType()->getAsPlaceholderType() ||
(Arg->IgnoreParens()->getObjectKind() != OK_BitField &&
!dyn_cast<MemberExpr>(Arg->IgnoreParens()) &&
@@ -2070,8 +2598,9 @@ bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
}
// The second argument needs to be a constant int
+ Arg = TheCall->getArg(1);
llvm::APSInt Value;
- if (!TheCall->getArg(1)->isIntegerConstantExpr(Value, Context)) {
+ if (!Arg->isIntegerConstantExpr(Value, Context)) {
Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_const)
<< 2 << Arg->getSourceRange();
return true;
@@ -2081,825 +2610,6 @@ bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
return false;
}
-bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
- struct BuiltinAndString {
- unsigned BuiltinID;
- const char *Str;
- };
-
- static BuiltinAndString ValidCPU[] = {
- { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" },
- { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" },
- { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" },
- { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" },
- };
-
- static BuiltinAndString ValidHVX[] = {
- { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" },
- { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" },
- };
-
- // Sort the tables on first execution so we can binary search them.
- auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) {
- return LHS.BuiltinID < RHS.BuiltinID;
- };
- static const bool SortOnce =
- (llvm::sort(ValidCPU, SortCmp),
- llvm::sort(ValidHVX, SortCmp), true);
- (void)SortOnce;
- auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) {
- return BI.BuiltinID < BuiltinID;
- };
-
- const TargetInfo &TI = Context.getTargetInfo();
-
- const BuiltinAndString *FC =
- llvm::lower_bound(ValidCPU, BuiltinID, LowerBoundCmp);
- if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) {
- const TargetOptions &Opts = TI.getTargetOpts();
- StringRef CPU = Opts.CPU;
- if (!CPU.empty()) {
- assert(CPU.startswith("hexagon") && "Unexpected CPU name");
- CPU.consume_front("hexagon");
- SmallVector<StringRef, 3> CPUs;
- StringRef(FC->Str).split(CPUs, ',');
- if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; }))
- return Diag(TheCall->getBeginLoc(),
- diag::err_hexagon_builtin_unsupported_cpu);
- }
- }
-
- const BuiltinAndString *FH =
- llvm::lower_bound(ValidHVX, BuiltinID, LowerBoundCmp);
- if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) {
- if (!TI.hasFeature("hvx"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_hexagon_builtin_requires_hvx);
-
- SmallVector<StringRef, 3> HVXs;
- StringRef(FH->Str).split(HVXs, ',');
- bool IsValid = llvm::any_of(HVXs,
- [&TI] (StringRef V) {
- std::string F = "hvx" + V.str();
- return TI.hasFeature(F);
- });
- if (!IsValid)
- return Diag(TheCall->getBeginLoc(),
- diag::err_hexagon_builtin_unsupported_hvx);
- }
-
- return false;
-}
-
bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
struct ArgInfo {
uint8_t OpNum;
@@ -2916,7 +2626,7 @@ bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
{ Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
{ Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
{ Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} },
+ { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
{ Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
{ Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
{ Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
@@ -3137,17 +2847,17 @@ bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
- return CheckHexagonBuiltinCpu(BuiltinID, TheCall) ||
- CheckHexagonBuiltinArgument(BuiltinID, TheCall);
+ return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
}
-bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- return CheckMipsBuiltinCpu(BuiltinID, TheCall) ||
+bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID, CallExpr *TheCall) {
+ return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
CheckMipsBuiltinArgument(BuiltinID, TheCall);
}
-bool Sema::CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
- const TargetInfo &TI = Context.getTargetInfo();
+bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
BuiltinID <= Mips::BI__builtin_mips_lwx) {
@@ -3340,10 +3050,14 @@ bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
}
if (!m)
@@ -3353,15 +3067,13 @@ bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
SemaBuiltinConstantArgMultiple(TheCall, i, m);
}
-bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
unsigned i = 0, l = 0, u = 0;
bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde ||
BuiltinID == PPC::BI__builtin_divdeu ||
BuiltinID == PPC::BI__builtin_bpermd;
- bool IsTarget64Bit = Context.getTargetInfo()
- .getTypeWidth(Context
- .getTargetInfo()
- .getIntPtrType()) == 64;
+ bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe ||
BuiltinID == PPC::BI__builtin_divweu ||
BuiltinID == PPC::BI__builtin_divde ||
@@ -3371,14 +3083,13 @@ bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
<< TheCall->getSourceRange();
- if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) ||
- (BuiltinID == PPC::BI__builtin_bpermd &&
- !Context.getTargetInfo().hasFeature("bpermd")))
+ if ((IsBltinExtDiv && !TI.hasFeature("extdiv")) ||
+ (BuiltinID == PPC::BI__builtin_bpermd && !TI.hasFeature("bpermd")))
return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
<< TheCall->getSourceRange();
auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool {
- if (!Context.getTargetInfo().hasFeature("vsx"))
+ if (!TI.hasFeature("vsx"))
return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
<< TheCall->getSourceRange();
return false;
@@ -3414,10 +3125,75 @@ bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
case PPC::BI__builtin_pack_vector_int128:
return SemaVSXCheck(TheCall);
+ case PPC::BI__builtin_altivec_vgnb:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
+ case PPC::BI__builtin_vsx_xxeval:
+ return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
+ case PPC::BI__builtin_altivec_vsldbi:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case PPC::BI__builtin_altivec_vsrdbi:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case PPC::BI__builtin_vsx_xxpermx:
+ return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
+bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ // position of memory order and scope arguments in the builtin
+ unsigned OrderIndex, ScopeIndex;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
+ OrderIndex = 2;
+ ScopeIndex = 3;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_fence:
+ OrderIndex = 0;
+ ScopeIndex = 1;
+ break;
+ default:
+ return false;
+ }
+
+ ExprResult Arg = TheCall->getArg(OrderIndex);
+ auto ArgExpr = Arg.get();
+ Expr::EvalResult ArgResult;
+
+ if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
+ return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
+ << ArgExpr->getType();
+ int ord = ArgResult.Val.getInt().getZExtValue();
+
+ // Check valididty of memory ordering as per C11 / C++11's memody model.
+ switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
+ case llvm::AtomicOrderingCABI::acquire:
+ case llvm::AtomicOrderingCABI::release:
+ case llvm::AtomicOrderingCABI::acq_rel:
+ case llvm::AtomicOrderingCABI::seq_cst:
+ break;
+ default: {
+ return Diag(ArgExpr->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << ArgExpr->getSourceRange();
+ }
+ }
+
+ Arg = TheCall->getArg(ScopeIndex);
+ ArgExpr = Arg.get();
+ Expr::EvalResult ArgResult1;
+ // Check that sync scope is a constant literal
+ if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Expr::EvaluateForCodeGen,
+ Context))
+ return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
+ << ArgExpr->getType();
+
+ return false;
+}
+
bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
if (BuiltinID == SystemZ::BI__builtin_tabort) {
@@ -3486,7 +3262,8 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
/// This checks that the target supports __builtin_cpu_supports and
/// that the string argument is constant and valid.
-static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
+static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI,
+ CallExpr *TheCall) {
Expr *Arg = TheCall->getArg(0);
// Check if the argument is a string literal.
@@ -3497,7 +3274,7 @@ static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
// Check the contents of the string.
StringRef Feature =
cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
- if (!S.Context.getTargetInfo().validateCpuSupports(Feature))
+ if (!TI.validateCpuSupports(Feature))
return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
<< Arg->getSourceRange();
return false;
@@ -3506,7 +3283,7 @@ static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
/// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
/// This checks that the target supports __builtin_cpu_is and
/// that the string argument is constant and valid.
-static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) {
+static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) {
Expr *Arg = TheCall->getArg(0);
// Check if the argument is a string literal.
@@ -3517,7 +3294,7 @@ static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) {
// Check the contents of the string.
StringRef Feature =
cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
- if (!S.Context.getTargetInfo().validateCpuIs(Feature))
+ if (!TI.validateCpuIs(Feature))
return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
<< Arg->getSourceRange();
return false;
@@ -3831,6 +3608,64 @@ bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
<< Arg->getSourceRange();
}
+enum { TileRegLow = 0, TileRegHigh = 7 };
+
+bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ for (int ArgNum : ArgNums) {
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
+ return true;
+ }
+ return false;
+}
+
+bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum) {
+ return SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh);
+}
+
+bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ // Because the max number of tile register is TileRegHigh + 1, so here we use
+ // each bit to represent the usage of them in bitset.
+ std::bitset<TileRegHigh + 1> ArgValues;
+ for (int ArgNum : ArgNums) {
+ llvm::APSInt Arg;
+ SemaBuiltinConstantArg(TheCall, ArgNum, Arg);
+ int ArgExtValue = Arg.getExtValue();
+ assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) &&
+ "Incorrect tile register num.");
+ if (ArgValues.test(ArgExtValue))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_x86_builtin_tile_arg_duplicate)
+ << TheCall->getArg(ArgNum)->getSourceRange();
+ ArgValues.set(ArgExtValue);
+ }
+ return false;
+}
+
+bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
+ CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
+}
+
+bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_tileloadd64:
+ case X86::BI__builtin_ia32_tileloaddt164:
+ case X86::BI__builtin_ia32_tilestored64:
+ case X86::BI__builtin_ia32_tilezero:
+ return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
+ case X86::BI__builtin_ia32_tdpbssd:
+ case X86::BI__builtin_ia32_tdpbsud:
+ case X86::BI__builtin_ia32_tdpbusd:
+ case X86::BI__builtin_ia32_tdpbuud:
+ case X86::BI__builtin_ia32_tdpbf16ps:
+ return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
+ }
+}
static bool isX86_32Builtin(unsigned BuiltinID) {
// These builtins only work on x86-32 targets.
switch (BuiltinID) {
@@ -3842,15 +3677,16 @@ static bool isX86_32Builtin(unsigned BuiltinID) {
return false;
}
-bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
if (BuiltinID == X86::BI__builtin_cpu_supports)
- return SemaBuiltinCpuSupports(*this, TheCall);
+ return SemaBuiltinCpuSupports(*this, TI, TheCall);
if (BuiltinID == X86::BI__builtin_cpu_is)
- return SemaBuiltinCpuIs(*this, TheCall);
+ return SemaBuiltinCpuIs(*this, TI, TheCall);
// Check for 32-bit only builtins on a 64-bit target.
- const llvm::Triple &TT = Context.getTargetInfo().getTriple();
+ const llvm::Triple &TT = TI.getTriple();
if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
return Diag(TheCall->getCallee()->getBeginLoc(),
diag::err_32_bit_builtin_64_bit_tgt);
@@ -3863,6 +3699,10 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
return true;
+ // If the intrinsic has a tile arguments, make sure they are valid.
+ if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
+ return true;
+
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
int i = 0, l = 0, u = 0;
@@ -4473,6 +4313,24 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
}
}
+ if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
+ auto *AA = FDecl->getAttr<AllocAlignAttr>();
+ const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
+ if (!Arg->isValueDependent()) {
+ Expr::EvalResult Align;
+ if (Arg->EvaluateAsInt(Align, Context)) {
+ const llvm::APSInt &I = Align.Val.getInt();
+ if (!I.isPowerOf2())
+ Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
+ << Arg->getSourceRange();
+
+ if (I > Sema::MaximumAlignment)
+ Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
+ << Arg->getSourceRange() << Sema::MaximumAlignment;
+ }
+ }
+ }
+
if (FD)
diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
}
@@ -5491,6 +5349,15 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// gracefully.
TheCall->setType(ResultType);
+ // Prohibit use of _ExtInt with atomic builtins.
+ // The arguments would have already been converted to the first argument's
+ // type, so only need to check the first argument.
+ const auto *ExtIntValType = ValType->getAs<ExtIntType>();
+ if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) {
+ Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
+ return ExprError();
+ }
+
return TheCallResult;
}
@@ -6193,11 +6060,9 @@ bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
<< Arg->getSourceRange();
- // Alignment calculations can wrap around if it's greater than 2**29.
- unsigned MaximumAlignment = 536870912;
- if (Result > MaximumAlignment)
+ if (Result > Sema::MaximumAlignment)
Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great)
- << Arg->getSourceRange() << MaximumAlignment;
+ << Arg->getSourceRange() << Sema::MaximumAlignment;
}
if (NumArgs > 2) {
@@ -6412,7 +6277,8 @@ static bool IsShiftedByte(llvm::APSInt Value) {
/// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is
/// a constant expression representing an arbitrary byte value shifted left by
/// a multiple of 8 bits.
-bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum) {
+bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -6424,6 +6290,10 @@ bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum) {
if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
return true;
+ // Truncate to the given size.
+ Result = Result.getLoBits(ArgBits);
+ Result.setIsUnsigned(true);
+
if (IsShiftedByte(Result))
return false;
@@ -6437,7 +6307,8 @@ bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum) {
/// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some
/// Arm MVE intrinsics.
bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
- int ArgNum) {
+ int ArgNum,
+ unsigned ArgBits) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -6449,6 +6320,10 @@ bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
return true;
+ // Truncate to the given size.
+ Result = Result.getLoBits(ArgBits);
+ Result.setIsUnsigned(true);
+
// Check to see if it's in either of the required forms.
if (IsShiftedByte(Result) ||
(Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF))
@@ -10228,6 +10103,9 @@ struct IntRange {
false/*NonNegative*/);
}
+ if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ return IntRange(EIT->getNumBits(), EIT->isUnsigned());
+
const BuiltinType *BT = cast<BuiltinType>(T);
assert(BT->isInteger());
@@ -10251,6 +10129,9 @@ struct IntRange {
if (const EnumType *ET = dyn_cast<EnumType>(T))
T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
+ if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ return IntRange(EIT->getNumBits(), EIT->isUnsigned());
+
const BuiltinType *BT = cast<BuiltinType>(T);
assert(BT->isInteger());
@@ -12064,27 +11945,31 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
}
}
-static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
+static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
SourceLocation CC, QualType T);
static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
SourceLocation CC, bool &ICContext) {
E = E->IgnoreParenImpCasts();
- if (isa<ConditionalOperator>(E))
- return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T);
+ if (auto *CO = dyn_cast<AbstractConditionalOperator>(E))
+ return CheckConditionalOperator(S, CO, CC, T);
AnalyzeImplicitConversions(S, E, CC);
if (E->getType() != T)
return CheckImplicitConversion(S, E, T, CC, &ICContext);
}
-static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
+static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
SourceLocation CC, QualType T) {
AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc());
+ Expr *TrueExpr = E->getTrueExpr();
+ if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E))
+ TrueExpr = BCO->getCommon();
+
bool Suspicious = false;
- CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
+ CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious);
CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
if (T->isBooleanType())
@@ -12103,7 +11988,7 @@ static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
if (E->getType() == T) return;
Suspicious = false;
- CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(),
+ CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(),
E->getType(), CC, &Suspicious);
if (!Suspicious)
CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
@@ -12120,24 +12005,44 @@ static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
}
-/// AnalyzeImplicitConversions - Find and report any interesting
-/// implicit conversions in the given expression. There are a couple
-/// of competing diagnostics here, -Wconversion and -Wsign-compare.
-static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
- bool IsListInit/*= false*/) {
+namespace {
+struct AnalyzeImplicitConversionsWorkItem {
+ Expr *E;
+ SourceLocation CC;
+ bool IsListInit;
+};
+}
+
+/// Data recursive variant of AnalyzeImplicitConversions. Subexpressions
+/// that should be visited are added to WorkList.
+static void AnalyzeImplicitConversions(
+ Sema &S, AnalyzeImplicitConversionsWorkItem Item,
+ llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) {
+ Expr *OrigE = Item.E;
+ SourceLocation CC = Item.CC;
+
QualType T = OrigE->getType();
Expr *E = OrigE->IgnoreParenImpCasts();
// Propagate whether we are in a C++ list initialization expression.
// If so, we do not issue warnings for implicit int-float conversion
// precision loss, because C++11 narrowing already handles it.
- IsListInit =
- IsListInit || (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus);
+ bool IsListInit = Item.IsListInit ||
+ (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus);
if (E->isTypeDependent() || E->isValueDependent())
return;
- if (const auto *UO = dyn_cast<UnaryOperator>(E))
+ Expr *SourceExpr = E;
+ // Examine, but don't traverse into the source expression of an
+ // OpaqueValueExpr, since it may have multiple parents and we don't want to
+ // emit duplicate diagnostics. Its fine to examine the form or attempt to
+ // evaluate it in the context of checking the specific conversion to T though.
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(E))
+ if (auto *Src = OVE->getSourceExpr())
+ SourceExpr = Src;
+
+ if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr))
if (UO->getOpcode() == UO_Not &&
UO->getSubExpr()->isKnownToHaveBooleanValue())
S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool)
@@ -12146,21 +12051,20 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
// For conditional operators, we analyze the arguments as if they
// were being fed directly into the output.
- if (isa<ConditionalOperator>(E)) {
- ConditionalOperator *CO = cast<ConditionalOperator>(E);
+ if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) {
CheckConditionalOperator(S, CO, CC, T);
return;
}
// Check implicit argument conversions for function calls.
- if (CallExpr *Call = dyn_cast<CallExpr>(E))
+ if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr))
CheckImplicitArgumentConversions(S, Call, CC);
// Go ahead and check any implicit conversions we might have skipped.
// The non-canonical typecheck is just an optimization;
// CheckImplicitConversion will filter out dead implicit conversions.
- if (E->getType() != T)
- CheckImplicitConversion(S, E, T, CC, nullptr, IsListInit);
+ if (SourceExpr->getType() != T)
+ CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit);
// Now continue drilling into this expression.
@@ -12170,7 +12074,7 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
// FIXME: Use a more uniform representation for this.
for (auto *SE : POE->semantics())
if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE))
- AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC, IsListInit);
+ WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit});
}
// Skip past explicit casts.
@@ -12178,7 +12082,8 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
E = CE->getSubExpr()->IgnoreParenImpCasts();
if (!CE->getType()->isVoidType() && E->getType()->isAtomicType())
S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
- return AnalyzeImplicitConversions(S, E, CC, IsListInit);
+ WorkList.push_back({E, CC, IsListInit});
+ return;
}
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
@@ -12217,7 +12122,7 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
// Ignore checking string literals that are in logical and operators.
// This is a common pattern for asserts.
continue;
- AnalyzeImplicitConversions(S, ChildExpr, CC, IsListInit);
+ WorkList.push_back({ChildExpr, CC, IsListInit});
}
if (BO && BO->isLogicalOp()) {
@@ -12241,6 +12146,17 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
}
}
+/// AnalyzeImplicitConversions - Find and report any interesting
+/// implicit conversions in the given expression. There are a couple
+/// of competing diagnostics here, -Wconversion and -Wsign-compare.
+static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
+ bool IsListInit/*= false*/) {
+ llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList;
+ WorkList.push_back({OrigE, CC, IsListInit});
+ while (!WorkList.empty())
+ AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList);
+}
+
/// Diagnose integer type and any valid implicit conversion to it.
static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
// Taking into account implicit conversions,
@@ -13182,6 +13098,11 @@ public:
}
void VisitCallExpr(const CallExpr *CE) {
+ // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
+
+ if (CE->isUnevaluatedBuiltinCall(Context))
+ return;
+
// C++11 [intro.execution]p15:
// When calling a function [...], every value computation and side effect
// associated with any argument expression, or with the postfix expression
@@ -13189,10 +13110,165 @@ public:
// expression or statement in the body of the function [and thus before
// the value computation of its result].
SequencedSubexpression Sequenced(*this);
- SemaRef.runWithSufficientStackSpace(CE->getExprLoc(),
- [&] { Base::VisitCallExpr(CE); });
+ SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] {
+ // C++17 [expr.call]p5
+ // The postfix-expression is sequenced before each expression in the
+ // expression-list and any default argument. [...]
+ SequenceTree::Seq CalleeRegion;
+ SequenceTree::Seq OtherRegion;
+ if (SemaRef.getLangOpts().CPlusPlus17) {
+ CalleeRegion = Tree.allocate(Region);
+ OtherRegion = Tree.allocate(Region);
+ } else {
+ CalleeRegion = Region;
+ OtherRegion = Region;
+ }
+ SequenceTree::Seq OldRegion = Region;
- // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
+ // Visit the callee expression first.
+ Region = CalleeRegion;
+ if (SemaRef.getLangOpts().CPlusPlus17) {
+ SequencedSubexpression Sequenced(*this);
+ Visit(CE->getCallee());
+ } else {
+ Visit(CE->getCallee());
+ }
+
+ // Then visit the argument expressions.
+ Region = OtherRegion;
+ for (const Expr *Argument : CE->arguments())
+ Visit(Argument);
+
+ Region = OldRegion;
+ if (SemaRef.getLangOpts().CPlusPlus17) {
+ Tree.merge(CalleeRegion);
+ Tree.merge(OtherRegion);
+ }
+ });
+ }
+
+ void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) {
+ // C++17 [over.match.oper]p2:
+ // [...] the operator notation is first transformed to the equivalent
+ // function-call notation as summarized in Table 12 (where @ denotes one
+ // of the operators covered in the specified subclause). However, the
+ // operands are sequenced in the order prescribed for the built-in
+ // operator (Clause 8).
+ //
+ // From the above only overloaded binary operators and overloaded call
+ // operators have sequencing rules in C++17 that we need to handle
+ // separately.
+ if (!SemaRef.getLangOpts().CPlusPlus17 ||
+ (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call))
+ return VisitCallExpr(CXXOCE);
+
+ enum {
+ NoSequencing,
+ LHSBeforeRHS,
+ RHSBeforeLHS,
+ LHSBeforeRest
+ } SequencingKind;
+ switch (CXXOCE->getOperator()) {
+ case OO_Equal:
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ case OO_StarEqual:
+ case OO_SlashEqual:
+ case OO_PercentEqual:
+ case OO_CaretEqual:
+ case OO_AmpEqual:
+ case OO_PipeEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ SequencingKind = RHSBeforeLHS;
+ break;
+
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ case OO_AmpAmp:
+ case OO_PipePipe:
+ case OO_Comma:
+ case OO_ArrowStar:
+ case OO_Subscript:
+ SequencingKind = LHSBeforeRHS;
+ break;
+
+ case OO_Call:
+ SequencingKind = LHSBeforeRest;
+ break;
+
+ default:
+ SequencingKind = NoSequencing;
+ break;
+ }
+
+ if (SequencingKind == NoSequencing)
+ return VisitCallExpr(CXXOCE);
+
+ // This is a call, so all subexpressions are sequenced before the result.
+ SequencedSubexpression Sequenced(*this);
+
+ SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] {
+ assert(SemaRef.getLangOpts().CPlusPlus17 &&
+ "Should only get there with C++17 and above!");
+ assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) &&
+ "Should only get there with an overloaded binary operator"
+ " or an overloaded call operator!");
+
+ if (SequencingKind == LHSBeforeRest) {
+ assert(CXXOCE->getOperator() == OO_Call &&
+ "We should only have an overloaded call operator here!");
+
+ // This is very similar to VisitCallExpr, except that we only have the
+ // C++17 case. The postfix-expression is the first argument of the
+ // CXXOperatorCallExpr. The expressions in the expression-list, if any,
+ // are in the following arguments.
+ //
+ // Note that we intentionally do not visit the callee expression since
+ // it is just a decayed reference to a function.
+ SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region);
+ SequenceTree::Seq ArgsRegion = Tree.allocate(Region);
+ SequenceTree::Seq OldRegion = Region;
+
+ assert(CXXOCE->getNumArgs() >= 1 &&
+ "An overloaded call operator must have at least one argument"
+ " for the postfix-expression!");
+ const Expr *PostfixExpr = CXXOCE->getArgs()[0];
+ llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1,
+ CXXOCE->getNumArgs() - 1);
+
+ // Visit the postfix-expression first.
+ {
+ Region = PostfixExprRegion;
+ SequencedSubexpression Sequenced(*this);
+ Visit(PostfixExpr);
+ }
+
+ // Then visit the argument expressions.
+ Region = ArgsRegion;
+ for (const Expr *Arg : Args)
+ Visit(Arg);
+
+ Region = OldRegion;
+ Tree.merge(PostfixExprRegion);
+ Tree.merge(ArgsRegion);
+ } else {
+ assert(CXXOCE->getNumArgs() == 2 &&
+ "Should only have two arguments here!");
+ assert((SequencingKind == LHSBeforeRHS ||
+ SequencingKind == RHSBeforeLHS) &&
+ "Unexpected sequencing kind!");
+
+ // We do not visit the callee expression since it is just a decayed
+ // reference to a function.
+ const Expr *E1 = CXXOCE->getArg(0);
+ const Expr *E2 = CXXOCE->getArg(1);
+ if (SequencingKind == RHSBeforeLHS)
+ std::swap(E1, E2);
+
+ return VisitSequencedExpressions(E1, E2);
+ }
+ });
}
void VisitCXXConstructExpr(const CXXConstructExpr *CCE) {
@@ -13323,11 +13399,12 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
// C99 6.9.1p5: If the declarator includes a parameter type list, the
// declaration of each parameter shall include an identifier.
- if (CheckParameterNames &&
- Param->getIdentifier() == nullptr &&
- !Param->isImplicit() &&
- !getLangOpts().CPlusPlus)
- Diag(Param->getLocation(), diag::err_parameter_name_omitted);
+ if (CheckParameterNames && Param->getIdentifier() == nullptr &&
+ !Param->isImplicit() && !getLangOpts().CPlusPlus) {
+ // Diagnose this as an extension in C17 and earlier.
+ if (!getLangOpts().C2x)
+ Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x);
+ }
// C99 6.7.5.3p12:
// If the function declarator is not part of a definition of that
@@ -13380,17 +13457,233 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
return HasInvalidParm;
}
-/// A helper function to get the alignment of a Decl referred to by DeclRefExpr
-/// or MemberExpr.
-static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign,
- ASTContext &Context) {
- if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
- return Context.getDeclAlign(DRE->getDecl());
+Optional<std::pair<CharUnits, CharUnits>>
+static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx);
+
+/// Compute the alignment and offset of the base class object given the
+/// derived-to-base cast expression and the alignment and offset of the derived
+/// class object.
+static std::pair<CharUnits, CharUnits>
+getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType,
+ CharUnits BaseAlignment, CharUnits Offset,
+ ASTContext &Ctx) {
+ for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE;
+ ++PathI) {
+ const CXXBaseSpecifier *Base = *PathI;
+ const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
+ if (Base->isVirtual()) {
+ // The complete object may have a lower alignment than the non-virtual
+ // alignment of the base, in which case the base may be misaligned. Choose
+ // the smaller of the non-virtual alignment and BaseAlignment, which is a
+ // conservative lower bound of the complete object alignment.
+ CharUnits NonVirtualAlignment =
+ Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment();
+ BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment);
+ Offset = CharUnits::Zero();
+ } else {
+ const ASTRecordLayout &RL =
+ Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl());
+ Offset += RL.getBaseClassOffset(BaseDecl);
+ }
+ DerivedType = Base->getType();
+ }
+
+ return std::make_pair(BaseAlignment, Offset);
+}
- if (const auto *ME = dyn_cast<MemberExpr>(E))
- return Context.getDeclAlign(ME->getMemberDecl());
+/// Compute the alignment and offset of a binary additive operator.
+static Optional<std::pair<CharUnits, CharUnits>>
+getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE,
+ bool IsSub, ASTContext &Ctx) {
+ QualType PointeeType = PtrE->getType()->getPointeeType();
- return TypeAlign;
+ if (!PointeeType->isConstantSizeType())
+ return llvm::None;
+
+ auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx);
+
+ if (!P)
+ return llvm::None;
+
+ llvm::APSInt IdxRes;
+ CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType);
+ if (IntE->isIntegerConstantExpr(IdxRes, Ctx)) {
+ CharUnits Offset = EltSize * IdxRes.getExtValue();
+ if (IsSub)
+ Offset = -Offset;
+ return std::make_pair(P->first, P->second + Offset);
+ }
+
+ // If the integer expression isn't a constant expression, compute the lower
+ // bound of the alignment using the alignment and offset of the pointer
+ // expression and the element size.
+ return std::make_pair(
+ P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize),
+ CharUnits::Zero());
+}
+
+/// This helper function takes an lvalue expression and returns the alignment of
+/// a VarDecl and a constant offset from the VarDecl.
+Optional<std::pair<CharUnits, CharUnits>>
+static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) {
+ E = E->IgnoreParens();
+ switch (E->getStmtClass()) {
+ default:
+ break;
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::ImplicitCastExprClass: {
+ auto *CE = cast<CastExpr>(E);
+ const Expr *From = CE->getSubExpr();
+ switch (CE->getCastKind()) {
+ default:
+ break;
+ case CK_NoOp:
+ return getBaseAlignmentAndOffsetFromLValue(From, Ctx);
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx);
+ if (!P)
+ break;
+ return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first,
+ P->second, Ctx);
+ }
+ }
+ break;
+ }
+ case Stmt::ArraySubscriptExprClass: {
+ auto *ASE = cast<ArraySubscriptExpr>(E);
+ return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(),
+ false, Ctx);
+ }
+ case Stmt::DeclRefExprClass: {
+ if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) {
+ // FIXME: If VD is captured by copy or is an escaping __block variable,
+ // use the alignment of VD's type.
+ if (!VD->getType()->isReferenceType())
+ return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero());
+ if (VD->hasInit())
+ return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx);
+ }
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ auto *ME = cast<MemberExpr>(E);
+ auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!FD || FD->getType()->isReferenceType())
+ break;
+ Optional<std::pair<CharUnits, CharUnits>> P;
+ if (ME->isArrow())
+ P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx);
+ else
+ P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx);
+ if (!P)
+ break;
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent());
+ uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex());
+ return std::make_pair(P->first,
+ P->second + CharUnits::fromQuantity(Offset));
+ }
+ case Stmt::UnaryOperatorClass: {
+ auto *UO = cast<UnaryOperator>(E);
+ switch (UO->getOpcode()) {
+ default:
+ break;
+ case UO_Deref:
+ return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx);
+ }
+ break;
+ }
+ case Stmt::BinaryOperatorClass: {
+ auto *BO = cast<BinaryOperator>(E);
+ auto Opcode = BO->getOpcode();
+ switch (Opcode) {
+ default:
+ break;
+ case BO_Comma:
+ return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx);
+ }
+ break;
+ }
+ }
+ return llvm::None;
+}
+
+/// This helper function takes a pointer expression and returns the alignment of
+/// a VarDecl and a constant offset from the VarDecl.
+Optional<std::pair<CharUnits, CharUnits>>
+static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) {
+ E = E->IgnoreParens();
+ switch (E->getStmtClass()) {
+ default:
+ break;
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::ImplicitCastExprClass: {
+ auto *CE = cast<CastExpr>(E);
+ const Expr *From = CE->getSubExpr();
+ switch (CE->getCastKind()) {
+ default:
+ break;
+ case CK_NoOp:
+ return getBaseAlignmentAndOffsetFromPtr(From, Ctx);
+ case CK_ArrayToPointerDecay:
+ return getBaseAlignmentAndOffsetFromLValue(From, Ctx);
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx);
+ if (!P)
+ break;
+ return getDerivedToBaseAlignmentAndOffset(
+ CE, From->getType()->getPointeeType(), P->first, P->second, Ctx);
+ }
+ }
+ break;
+ }
+ case Stmt::CXXThisExprClass: {
+ auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl();
+ CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment();
+ return std::make_pair(Alignment, CharUnits::Zero());
+ }
+ case Stmt::UnaryOperatorClass: {
+ auto *UO = cast<UnaryOperator>(E);
+ if (UO->getOpcode() == UO_AddrOf)
+ return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx);
+ break;
+ }
+ case Stmt::BinaryOperatorClass: {
+ auto *BO = cast<BinaryOperator>(E);
+ auto Opcode = BO->getOpcode();
+ switch (Opcode) {
+ default:
+ break;
+ case BO_Add:
+ case BO_Sub: {
+ const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS();
+ if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType())
+ std::swap(LHS, RHS);
+ return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub,
+ Ctx);
+ }
+ case BO_Comma:
+ return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx);
+ }
+ break;
+ }
+ }
+ return llvm::None;
+}
+
+static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) {
+ // See if we can compute the alignment of a VarDecl and an offset from it.
+ Optional<std::pair<CharUnits, CharUnits>> P =
+ getBaseAlignmentAndOffsetFromPtr(E, S.Context);
+
+ if (P)
+ return P->first.alignmentAtOffset(P->second);
+
+ // If that failed, return the type's alignment.
+ return S.Context.getTypeAlignInChars(E->getType()->getPointeeType());
}
/// CheckCastAlign - Implements -Wcast-align, which warns when a
@@ -13420,21 +13713,13 @@ void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
if (!SrcPtr) return;
QualType SrcPointee = SrcPtr->getPointeeType();
- // Whitelist casts from cv void*. We already implicitly
- // whitelisted casts to cv void*, since they have alignment 1.
- // Also whitelist casts involving incomplete types, which implicitly
+ // Explicitly allow casts from cv void*. We already implicitly
+ // allowed casts to cv void*, since they have alignment 1.
+ // Also allow casts involving incomplete types, which implicitly
// includes 'void'.
if (SrcPointee->isIncompleteType()) return;
- CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee);
-
- if (auto *CE = dyn_cast<CastExpr>(Op)) {
- if (CE->getCastKind() == CK_ArrayToPointerDecay)
- SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context);
- } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) {
- if (UO->getOpcode() == UO_AddrOf)
- SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context);
- }
+ CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this);
if (SrcAlign >= DestAlign) return;
@@ -13916,7 +14201,7 @@ static bool isSetterLikeSelector(Selector sel) {
if (str.startswith("set"))
str = str.substr(3);
else if (str.startswith("add")) {
- // Specially whitelist 'addOperationWithBlock:'.
+ // Specially allow 'addOperationWithBlock:'.
if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
return false;
str = str.substr(3);
@@ -14242,12 +14527,12 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
return;
unsigned Attributes = PD->getPropertyAttributes();
- if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) {
+ if (Attributes & ObjCPropertyAttribute::kind_assign) {
// when 'assign' attribute was not explicitly specified
// by user, ignore it and rely on property type itself
// for lifetime info.
unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
- if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) &&
+ if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) &&
LHSType->isObjCRetainableType())
return;
@@ -14259,8 +14544,7 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
}
RHS = cast->getSubExpr();
}
- }
- else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) {
+ } else if (Attributes & ObjCPropertyAttribute::kind_weak) {
if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true))
return;
}
@@ -15045,3 +15329,259 @@ void Sema::CheckAddressOfPackedMember(Expr *rhs) {
rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1,
_2, _3, _4));
}
+
+ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
+ ExprResult CallResult) {
+ if (checkArgCount(*this, TheCall, 1))
+ return ExprError();
+
+ ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0));
+ if (MatrixArg.isInvalid())
+ return MatrixArg;
+ Expr *Matrix = MatrixArg.get();
+
+ auto *MType = Matrix->getType()->getAs<ConstantMatrixType>();
+ if (!MType) {
+ Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg);
+ return ExprError();
+ }
+
+ // Create returned matrix type by swapping rows and columns of the argument
+ // matrix type.
+ QualType ResultType = Context.getConstantMatrixType(
+ MType->getElementType(), MType->getNumColumns(), MType->getNumRows());
+
+ // Change the return type to the type of the returned matrix.
+ TheCall->setType(ResultType);
+
+ // Update call argument to use the possibly converted matrix argument.
+ TheCall->setArg(0, Matrix);
+ return CallResult;
+}
+
+// Get and verify the matrix dimensions.
+static llvm::Optional<unsigned>
+getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
+ llvm::APSInt Value(64);
+ SourceLocation ErrorPos;
+ if (!Expr->isIntegerConstantExpr(Value, S.Context, &ErrorPos)) {
+ S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
+ << Name;
+ return {};
+ }
+ uint64_t Dim = Value.getZExtValue();
+ if (!ConstantMatrixType::isDimensionValid(Dim)) {
+ S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension)
+ << Name << ConstantMatrixType::getMaxElementsPerDimension();
+ return {};
+ }
+ return Dim;
+}
+
+ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
+ ExprResult CallResult) {
+ if (!getLangOpts().MatrixTypes) {
+ Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled);
+ return ExprError();
+ }
+
+ if (checkArgCount(*this, TheCall, 4))
+ return ExprError();
+
+ unsigned PtrArgIdx = 0;
+ Expr *PtrExpr = TheCall->getArg(PtrArgIdx);
+ Expr *RowsExpr = TheCall->getArg(1);
+ Expr *ColumnsExpr = TheCall->getArg(2);
+ Expr *StrideExpr = TheCall->getArg(3);
+
+ bool ArgError = false;
+
+ // Check pointer argument.
+ {
+ ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr);
+ if (PtrConv.isInvalid())
+ return PtrConv;
+ PtrExpr = PtrConv.get();
+ TheCall->setArg(0, PtrExpr);
+ if (PtrExpr->isTypeDependent()) {
+ TheCall->setType(Context.DependentTy);
+ return TheCall;
+ }
+ }
+
+ auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
+ QualType ElementTy;
+ if (!PtrTy) {
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
+ << PtrArgIdx + 1;
+ ArgError = true;
+ } else {
+ ElementTy = PtrTy->getPointeeType().getUnqualifiedType();
+
+ if (!ConstantMatrixType::isValidElementType(ElementTy)) {
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
+ << PtrArgIdx + 1;
+ ArgError = true;
+ }
+ }
+
+ // Apply default Lvalue conversions and convert the expression to size_t.
+ auto ApplyArgumentConversions = [this](Expr *E) {
+ ExprResult Conv = DefaultLvalueConversion(E);
+ if (Conv.isInvalid())
+ return Conv;
+
+ return tryConvertExprToType(Conv.get(), Context.getSizeType());
+ };
+
+ // Apply conversion to row and column expressions.
+ ExprResult RowsConv = ApplyArgumentConversions(RowsExpr);
+ if (!RowsConv.isInvalid()) {
+ RowsExpr = RowsConv.get();
+ TheCall->setArg(1, RowsExpr);
+ } else
+ RowsExpr = nullptr;
+
+ ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr);
+ if (!ColumnsConv.isInvalid()) {
+ ColumnsExpr = ColumnsConv.get();
+ TheCall->setArg(2, ColumnsExpr);
+ } else
+ ColumnsExpr = nullptr;
+
+ // If any any part of the result matrix type is still pending, just use
+ // Context.DependentTy, until all parts are resolved.
+ if ((RowsExpr && RowsExpr->isTypeDependent()) ||
+ (ColumnsExpr && ColumnsExpr->isTypeDependent())) {
+ TheCall->setType(Context.DependentTy);
+ return CallResult;
+ }
+
+ // Check row and column dimenions.
+ llvm::Optional<unsigned> MaybeRows;
+ if (RowsExpr)
+ MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this);
+
+ llvm::Optional<unsigned> MaybeColumns;
+ if (ColumnsExpr)
+ MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this);
+
+ // Check stride argument.
+ ExprResult StrideConv = ApplyArgumentConversions(StrideExpr);
+ if (StrideConv.isInvalid())
+ return ExprError();
+ StrideExpr = StrideConv.get();
+ TheCall->setArg(3, StrideExpr);
+
+ llvm::APSInt Value(64);
+ if (MaybeRows && StrideExpr->isIntegerConstantExpr(Value, Context)) {
+ uint64_t Stride = Value.getZExtValue();
+ if (Stride < *MaybeRows) {
+ Diag(StrideExpr->getBeginLoc(),
+ diag::err_builtin_matrix_stride_too_small);
+ ArgError = true;
+ }
+ }
+
+ if (ArgError || !MaybeRows || !MaybeColumns)
+ return ExprError();
+
+ TheCall->setType(
+ Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns));
+ return CallResult;
+}
+
+ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
+ ExprResult CallResult) {
+ if (checkArgCount(*this, TheCall, 3))
+ return ExprError();
+
+ unsigned PtrArgIdx = 1;
+ Expr *MatrixExpr = TheCall->getArg(0);
+ Expr *PtrExpr = TheCall->getArg(PtrArgIdx);
+ Expr *StrideExpr = TheCall->getArg(2);
+
+ bool ArgError = false;
+
+ {
+ ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr);
+ if (MatrixConv.isInvalid())
+ return MatrixConv;
+ MatrixExpr = MatrixConv.get();
+ TheCall->setArg(0, MatrixExpr);
+ }
+ if (MatrixExpr->isTypeDependent()) {
+ TheCall->setType(Context.DependentTy);
+ return TheCall;
+ }
+
+ auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>();
+ if (!MatrixTy) {
+ Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0;
+ ArgError = true;
+ }
+
+ {
+ ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr);
+ if (PtrConv.isInvalid())
+ return PtrConv;
+ PtrExpr = PtrConv.get();
+ TheCall->setArg(1, PtrExpr);
+ if (PtrExpr->isTypeDependent()) {
+ TheCall->setType(Context.DependentTy);
+ return TheCall;
+ }
+ }
+
+ // Check pointer argument.
+ auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
+ if (!PtrTy) {
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
+ << PtrArgIdx + 1;
+ ArgError = true;
+ } else {
+ QualType ElementTy = PtrTy->getPointeeType();
+ if (ElementTy.isConstQualified()) {
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const);
+ ArgError = true;
+ }
+ ElementTy = ElementTy.getUnqualifiedType().getCanonicalType();
+ if (MatrixTy &&
+ !Context.hasSameType(ElementTy, MatrixTy->getElementType())) {
+ Diag(PtrExpr->getBeginLoc(),
+ diag::err_builtin_matrix_pointer_arg_mismatch)
+ << ElementTy << MatrixTy->getElementType();
+ ArgError = true;
+ }
+ }
+
+ // Apply default Lvalue conversions and convert the stride expression to
+ // size_t.
+ {
+ ExprResult StrideConv = DefaultLvalueConversion(StrideExpr);
+ if (StrideConv.isInvalid())
+ return StrideConv;
+
+ StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType());
+ if (StrideConv.isInvalid())
+ return StrideConv;
+ StrideExpr = StrideConv.get();
+ TheCall->setArg(2, StrideExpr);
+ }
+
+ // Check stride argument.
+ llvm::APSInt Value(64);
+ if (MatrixTy && StrideExpr->isIntegerConstantExpr(Value, Context)) {
+ uint64_t Stride = Value.getZExtValue();
+ if (Stride < MatrixTy->getNumRows()) {
+ Diag(StrideExpr->getBeginLoc(),
+ diag::err_builtin_matrix_stride_too_small);
+ ArgError = true;
+ }
+ }
+
+ if (ArgError)
+ return ExprError();
+
+ return CallResult;
+}
diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp
index 7260977c634d..0a8a27068ebf 100644
--- a/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/clang/lib/Sema/SemaCodeComplete.cpp
@@ -9,25 +9,36 @@
// This file defines the code-completion semantic actions.
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/QualTypeNames.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Designator.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -36,7 +47,9 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
#include <list>
#include <map>
#include <string>
@@ -1676,11 +1689,9 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Results.AddResult(Result("class", CCP_Type));
Results.AddResult(Result("wchar_t", CCP_Type));
- // typename qualified-id
+ // typename name
Builder.AddTypedTextChunk("typename");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddPlaceholderChunk("qualifier");
- Builder.AddTextChunk("::");
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
@@ -1807,6 +1818,18 @@ static void AddTypedefResult(ResultBuilder &Results) {
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
+// using name = type
+static void AddUsingAliasResult(CodeCompletionBuilder &Builder,
+ ResultBuilder &Results) {
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Builder.AddChunk(CodeCompletionString::CK_Equal);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+}
+
static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
const LangOptions &LangOpts) {
switch (CCC) {
@@ -2050,6 +2073,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Builder.TakeString()));
+ if (SemaRef.getLangOpts().CPlusPlus11)
+ AddUsingAliasResult(Builder, Results);
+
// using typename qualifier::name (only in a dependent context)
if (SemaRef.CurContext->isDependentContext()) {
Builder.AddTypedTextChunk("using typename");
@@ -2130,6 +2156,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
case Sema::PCC_RecoveryInFunction:
case Sema::PCC_Statement: {
+ if (SemaRef.getLangOpts().CPlusPlus11)
+ AddUsingAliasResult(Builder, Results);
+
AddTypedefResult(Results);
if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns() &&
@@ -2748,7 +2777,7 @@ FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
std::string Result;
if (Param->getIdentifier() && !ObjCMethodParam && !SuppressName)
- Result = Param->getIdentifier()->getName();
+ Result = std::string(Param->getIdentifier()->getName());
QualType Type = Param->getType();
if (ObjCSubsts)
@@ -2787,7 +2816,7 @@ FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
// for the block; just use the parameter type as a placeholder.
std::string Result;
if (!ObjCMethodParam && Param->getIdentifier())
- Result = Param->getIdentifier()->getName();
+ Result = std::string(Param->getIdentifier()->getName());
QualType Type = Param->getType().getUnqualifiedType();
@@ -3002,7 +3031,7 @@ static void AddTemplateParameterChunks(
} else if (NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
- PlaceholderStr = NTTP->getIdentifier()->getName();
+ PlaceholderStr = std::string(NTTP->getIdentifier()->getName());
NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
HasDefaultArg = NTTP->hasDefaultArgument();
} else {
@@ -3705,8 +3734,11 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
Result.addBriefComment(RC->getBriefText(S.getASTContext()));
}
AddResultTypeChunk(S.Context, Policy, FDecl, QualType(), Result);
- Result.AddTextChunk(
- Result.getAllocator().CopyString(FDecl->getNameAsString()));
+
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ FDecl->getDeclName().print(OS, Policy);
+ Result.AddTextChunk(Result.getAllocator().CopyString(OS.str()));
} else {
Result.AddResultTypeChunk(Result.getAllocator().CopyString(
Proto->getReturnType().getAsString(Policy)));
@@ -4329,7 +4361,7 @@ static void AddLambdaCompletion(ResultBuilder &Results,
First = false;
constexpr llvm::StringLiteral NamePlaceholder = "!#!NAME_GOES_HERE!#!";
- std::string Type = NamePlaceholder;
+ std::string Type = std::string(NamePlaceholder);
Parameter.getAsStringInternal(Type, PrintingPolicy(LangOpts));
llvm::StringRef Prefix, Suffix;
std::tie(Prefix, Suffix) = llvm::StringRef(Type).split(NamePlaceholder);
@@ -4719,6 +4751,386 @@ static void AddRecordMembersCompletionResults(
}
}
+// Returns the RecordDecl inside the BaseType, falling back to primary template
+// in case of specializations. Since we might not have a decl for the
+// instantiation/specialization yet, e.g. dependent code.
+static RecordDecl *getAsRecordDecl(const QualType BaseType) {
+ if (auto *RD = BaseType->getAsRecordDecl())
+ return RD;
+
+ if (const auto *TST = BaseType->getAs<TemplateSpecializationType>()) {
+ if (const auto *TD = dyn_cast_or_null<ClassTemplateDecl>(
+ TST->getTemplateName().getAsTemplateDecl())) {
+ return TD->getTemplatedDecl();
+ }
+ }
+
+ return nullptr;
+}
+
+namespace {
+// Collects completion-relevant information about a concept-constrainted type T.
+// In particular, examines the constraint expressions to find members of T.
+//
+// The design is very simple: we walk down each constraint looking for
+// expressions of the form T.foo().
+// If we're extra lucky, the return type is specified.
+// We don't do any clever handling of && or || in constraint expressions, we
+// take members from both branches.
+//
+// For example, given:
+// template <class T> concept X = requires (T t, string& s) { t.print(s); };
+// template <X U> void foo(U u) { u.^ }
+// We want to suggest the inferred member function 'print(string)'.
+// We see that u has type U, so X<U> holds.
+// X<U> requires t.print(s) to be valid, where t has type U (substituted for T).
+// By looking at the CallExpr we find the signature of print().
+//
+// While we tend to know in advance which kind of members (access via . -> ::)
+// we want, it's simpler just to gather them all and post-filter.
+//
+// FIXME: some of this machinery could be used for non-concept type-parms too,
+// enabling completion for type parameters based on other uses of that param.
+//
+// FIXME: there are other cases where a type can be constrained by a concept,
+// e.g. inside `if constexpr(ConceptSpecializationExpr) { ... }`
+class ConceptInfo {
+public:
+ // Describes a likely member of a type, inferred by concept constraints.
+ // Offered as a code completion for T. T-> and T:: contexts.
+ struct Member {
+ // Always non-null: we only handle members with ordinary identifier names.
+ const IdentifierInfo *Name = nullptr;
+ // Set for functions we've seen called.
+ // We don't have the declared parameter types, only the actual types of
+ // arguments we've seen. These are still valuable, as it's hard to render
+ // a useful function completion with neither parameter types nor names!
+ llvm::Optional<SmallVector<QualType, 1>> ArgTypes;
+ // Whether this is accessed as T.member, T->member, or T::member.
+ enum AccessOperator {
+ Colons,
+ Arrow,
+ Dot,
+ } Operator = Dot;
+ // What's known about the type of a variable or return type of a function.
+ const TypeConstraint *ResultType = nullptr;
+ // FIXME: also track:
+ // - kind of entity (function/variable/type), to expose structured results
+ // - template args kinds/types, as a proxy for template params
+
+ // For now we simply return these results as "pattern" strings.
+ CodeCompletionString *render(Sema &S, CodeCompletionAllocator &Alloc,
+ CodeCompletionTUInfo &Info) const {
+ CodeCompletionBuilder B(Alloc, Info);
+ // Result type
+ if (ResultType) {
+ std::string AsString;
+ {
+ llvm::raw_string_ostream OS(AsString);
+ QualType ExactType = deduceType(*ResultType);
+ if (!ExactType.isNull())
+ ExactType.print(OS, getCompletionPrintingPolicy(S));
+ else
+ ResultType->print(OS, getCompletionPrintingPolicy(S));
+ }
+ B.AddResultTypeChunk(Alloc.CopyString(AsString));
+ }
+ // Member name
+ B.AddTypedTextChunk(Alloc.CopyString(Name->getName()));
+ // Function argument list
+ if (ArgTypes) {
+ B.AddChunk(clang::CodeCompletionString::CK_LeftParen);
+ bool First = true;
+ for (QualType Arg : *ArgTypes) {
+ if (First)
+ First = false;
+ else {
+ B.AddChunk(clang::CodeCompletionString::CK_Comma);
+ B.AddChunk(clang::CodeCompletionString::CK_HorizontalSpace);
+ }
+ B.AddPlaceholderChunk(Alloc.CopyString(
+ Arg.getAsString(getCompletionPrintingPolicy(S))));
+ }
+ B.AddChunk(clang::CodeCompletionString::CK_RightParen);
+ }
+ return B.TakeString();
+ }
+ };
+
+ // BaseType is the type parameter T to infer members from.
+ // T must be accessible within S, as we use it to find the template entity
+ // that T is attached to in order to gather the relevant constraints.
+ ConceptInfo(const TemplateTypeParmType &BaseType, Scope *S) {
+ auto *TemplatedEntity = getTemplatedEntity(BaseType.getDecl(), S);
+ for (const Expr *E : constraintsForTemplatedEntity(TemplatedEntity))
+ believe(E, &BaseType);
+ }
+
+ std::vector<Member> members() {
+ std::vector<Member> Results;
+ for (const auto &E : this->Results)
+ Results.push_back(E.second);
+ llvm::sort(Results, [](const Member &L, const Member &R) {
+ return L.Name->getName() < R.Name->getName();
+ });
+ return Results;
+ }
+
+private:
+ // Infer members of T, given that the expression E (dependent on T) is true.
+ void believe(const Expr *E, const TemplateTypeParmType *T) {
+ if (!E || !T)
+ return;
+ if (auto *CSE = dyn_cast<ConceptSpecializationExpr>(E)) {
+ // If the concept is
+ // template <class A, class B> concept CD = f<A, B>();
+ // And the concept specialization is
+ // CD<int, T>
+ // Then we're substituting T for B, so we want to make f<A, B>() true
+ // by adding members to B - i.e. believe(f<A, B>(), B);
+ //
+ // For simplicity:
+ // - we don't attempt to substitute int for A
+ // - when T is used in other ways (like CD<T*>) we ignore it
+ ConceptDecl *CD = CSE->getNamedConcept();
+ TemplateParameterList *Params = CD->getTemplateParameters();
+ unsigned Index = 0;
+ for (const auto &Arg : CSE->getTemplateArguments()) {
+ if (Index >= Params->size())
+ break; // Won't happen in valid code.
+ if (isApprox(Arg, T)) {
+ auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Params->getParam(Index));
+ if (!TTPD)
+ continue;
+ // T was used as an argument, and bound to the parameter TT.
+ auto *TT = cast<TemplateTypeParmType>(TTPD->getTypeForDecl());
+ // So now we know the constraint as a function of TT is true.
+ believe(CD->getConstraintExpr(), TT);
+ // (concepts themselves have no associated constraints to require)
+ }
+
+ ++Index;
+ }
+ } else if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ // For A && B, we can infer members from both branches.
+ // For A || B, the union is still more useful than the intersection.
+ if (BO->getOpcode() == BO_LAnd || BO->getOpcode() == BO_LOr) {
+ believe(BO->getLHS(), T);
+ believe(BO->getRHS(), T);
+ }
+ } else if (auto *RE = dyn_cast<RequiresExpr>(E)) {
+ // A requires(){...} lets us infer members from each requirement.
+ for (const concepts::Requirement *Req : RE->getRequirements()) {
+ if (!Req->isDependent())
+ continue; // Can't tell us anything about T.
+ // Now Req cannot a substitution-error: those aren't dependent.
+
+ if (auto *TR = dyn_cast<concepts::TypeRequirement>(Req)) {
+ // Do a full traversal so we get `foo` from `typename T::foo::bar`.
+ QualType AssertedType = TR->getType()->getType();
+ ValidVisitor(this, T).TraverseType(AssertedType);
+ } else if (auto *ER = dyn_cast<concepts::ExprRequirement>(Req)) {
+ ValidVisitor Visitor(this, T);
+ // If we have a type constraint on the value of the expression,
+ // AND the whole outer expression describes a member, then we'll
+ // be able to use the constraint to provide the return type.
+ if (ER->getReturnTypeRequirement().isTypeConstraint()) {
+ Visitor.OuterType =
+ ER->getReturnTypeRequirement().getTypeConstraint();
+ Visitor.OuterExpr = ER->getExpr();
+ }
+ Visitor.TraverseStmt(ER->getExpr());
+ } else if (auto *NR = dyn_cast<concepts::NestedRequirement>(Req)) {
+ believe(NR->getConstraintExpr(), T);
+ }
+ }
+ }
+ }
+
+ // This visitor infers members of T based on traversing expressions/types
+ // that involve T. It is invoked with code known to be valid for T.
+ class ValidVisitor : public RecursiveASTVisitor<ValidVisitor> {
+ ConceptInfo *Outer;
+ const TemplateTypeParmType *T;
+
+ CallExpr *Caller = nullptr;
+ Expr *Callee = nullptr;
+
+ public:
+ // If set, OuterExpr is constrained by OuterType.
+ Expr *OuterExpr = nullptr;
+ const TypeConstraint *OuterType = nullptr;
+
+ ValidVisitor(ConceptInfo *Outer, const TemplateTypeParmType *T)
+ : Outer(Outer), T(T) {
+ assert(T);
+ }
+
+ // In T.foo or T->foo, `foo` is a member function/variable.
+ bool VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E) {
+ const Type *Base = E->getBaseType().getTypePtr();
+ bool IsArrow = E->isArrow();
+ if (Base->isPointerType() && IsArrow) {
+ IsArrow = false;
+ Base = Base->getPointeeType().getTypePtr();
+ }
+ if (isApprox(Base, T))
+ addValue(E, E->getMember(), IsArrow ? Member::Arrow : Member::Dot);
+ return true;
+ }
+
+ // In T::foo, `foo` is a static member function/variable.
+ bool VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
+ if (E->getQualifier() && isApprox(E->getQualifier()->getAsType(), T))
+ addValue(E, E->getDeclName(), Member::Colons);
+ return true;
+ }
+
+ // In T::typename foo, `foo` is a type.
+ bool VisitDependentNameType(DependentNameType *DNT) {
+ const auto *Q = DNT->getQualifier();
+ if (Q && isApprox(Q->getAsType(), T))
+ addType(DNT->getIdentifier());
+ return true;
+ }
+
+ // In T::foo::bar, `foo` must be a type.
+ // VisitNNS() doesn't exist, and TraverseNNS isn't always called :-(
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSL) {
+ if (NNSL) {
+ NestedNameSpecifier *NNS = NNSL.getNestedNameSpecifier();
+ const auto *Q = NNS->getPrefix();
+ if (Q && isApprox(Q->getAsType(), T))
+ addType(NNS->getAsIdentifier());
+ }
+ // FIXME: also handle T::foo<X>::bar
+ return RecursiveASTVisitor::TraverseNestedNameSpecifierLoc(NNSL);
+ }
+
+ // FIXME also handle T::foo<X>
+
+ // Track the innermost caller/callee relationship so we can tell if a
+ // nested expr is being called as a function.
+ bool VisitCallExpr(CallExpr *CE) {
+ Caller = CE;
+ Callee = CE->getCallee();
+ return true;
+ }
+
+ private:
+ void addResult(Member &&M) {
+ auto R = Outer->Results.try_emplace(M.Name);
+ Member &O = R.first->second;
+ // Overwrite existing if the new member has more info.
+ // The preference of . vs :: vs -> is fairly arbitrary.
+ if (/*Inserted*/ R.second ||
+ std::make_tuple(M.ArgTypes.hasValue(), M.ResultType != nullptr,
+ M.Operator) > std::make_tuple(O.ArgTypes.hasValue(),
+ O.ResultType != nullptr,
+ O.Operator))
+ O = std::move(M);
+ }
+
+ void addType(const IdentifierInfo *Name) {
+ if (!Name)
+ return;
+ Member M;
+ M.Name = Name;
+ M.Operator = Member::Colons;
+ addResult(std::move(M));
+ }
+
+ void addValue(Expr *E, DeclarationName Name,
+ Member::AccessOperator Operator) {
+ if (!Name.isIdentifier())
+ return;
+ Member Result;
+ Result.Name = Name.getAsIdentifierInfo();
+ Result.Operator = Operator;
+ // If this is the callee of an immediately-enclosing CallExpr, then
+ // treat it as a method, otherwise it's a variable.
+ if (Caller != nullptr && Callee == E) {
+ Result.ArgTypes.emplace();
+ for (const auto *Arg : Caller->arguments())
+ Result.ArgTypes->push_back(Arg->getType());
+ if (Caller == OuterExpr) {
+ Result.ResultType = OuterType;
+ }
+ } else {
+ if (E == OuterExpr)
+ Result.ResultType = OuterType;
+ }
+ addResult(std::move(Result));
+ }
+ };
+
+ static bool isApprox(const TemplateArgument &Arg, const Type *T) {
+ return Arg.getKind() == TemplateArgument::Type &&
+ isApprox(Arg.getAsType().getTypePtr(), T);
+ }
+
+ static bool isApprox(const Type *T1, const Type *T2) {
+ return T1 && T2 &&
+ T1->getCanonicalTypeUnqualified() ==
+ T2->getCanonicalTypeUnqualified();
+ }
+
+ // Returns the DeclContext immediately enclosed by the template parameter
+ // scope. For primary templates, this is the templated (e.g.) CXXRecordDecl.
+ // For specializations, this is e.g. ClassTemplatePartialSpecializationDecl.
+ static DeclContext *getTemplatedEntity(const TemplateTypeParmDecl *D,
+ Scope *S) {
+ if (D == nullptr)
+ return nullptr;
+ Scope *Inner = nullptr;
+ while (S) {
+ if (S->isTemplateParamScope() && S->isDeclScope(D))
+ return Inner ? Inner->getEntity() : nullptr;
+ Inner = S;
+ S = S->getParent();
+ }
+ return nullptr;
+ }
+
+ // Gets all the type constraint expressions that might apply to the type
+ // variables associated with DC (as returned by getTemplatedEntity()).
+ static SmallVector<const Expr *, 1>
+ constraintsForTemplatedEntity(DeclContext *DC) {
+ SmallVector<const Expr *, 1> Result;
+ if (DC == nullptr)
+ return Result;
+ // Primary templates can have constraints.
+ if (const auto *TD = cast<Decl>(DC)->getDescribedTemplate())
+ TD->getAssociatedConstraints(Result);
+ // Partial specializations may have constraints.
+ if (const auto *CTPSD =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(DC))
+ CTPSD->getAssociatedConstraints(Result);
+ if (const auto *VTPSD = dyn_cast<VarTemplatePartialSpecializationDecl>(DC))
+ VTPSD->getAssociatedConstraints(Result);
+ return Result;
+ }
+
+ // Attempt to find the unique type satisfying a constraint.
+ // This lets us show e.g. `int` instead of `std::same_as<int>`.
+ static QualType deduceType(const TypeConstraint &T) {
+ // Assume a same_as<T> return type constraint is std::same_as or equivalent.
+ // In this case the return type is T.
+ DeclarationName DN = T.getNamedConcept()->getDeclName();
+ if (DN.isIdentifier() && DN.getAsIdentifierInfo()->isStr("same_as"))
+ if (const auto *Args = T.getTemplateArgsAsWritten())
+ if (Args->getNumTemplateArgs() == 1) {
+ const auto &Arg = Args->arguments().front().getArgument();
+ if (Arg.getKind() == TemplateArgument::Type)
+ return Arg.getAsType();
+ }
+ return {};
+ }
+
+ llvm::DenseMap<const IdentifierInfo *, Member> Results;
+};
+} // namespace
+
void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
@@ -4767,37 +5179,46 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
Base = ConvertedBase.get();
QualType BaseType = Base->getType();
+ if (BaseType.isNull())
+ return false;
ExprValueKind BaseKind = Base->getValueKind();
if (IsArrow) {
if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
BaseType = Ptr->getPointeeType();
BaseKind = VK_LValue;
- } else if (BaseType->isObjCObjectPointerType())
- /*Do nothing*/;
- else
+ } else if (BaseType->isObjCObjectPointerType() ||
+ BaseType->isTemplateTypeParmType()) {
+ // Both cases (dot/arrow) handled below.
+ } else {
return false;
+ }
}
- if (const RecordType *Record = BaseType->getAs<RecordType>()) {
+ if (RecordDecl *RD = getAsRecordDecl(BaseType)) {
AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
- Record->getDecl(),
- std::move(AccessOpFixIt));
- } else if (const auto *TST =
- BaseType->getAs<TemplateSpecializationType>()) {
- TemplateName TN = TST->getTemplateName();
- if (const auto *TD =
- dyn_cast_or_null<ClassTemplateDecl>(TN.getAsTemplateDecl())) {
- CXXRecordDecl *RD = TD->getTemplatedDecl();
- AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
- RD, std::move(AccessOpFixIt));
+ RD, std::move(AccessOpFixIt));
+ } else if (const auto *TTPT =
+ dyn_cast<TemplateTypeParmType>(BaseType.getTypePtr())) {
+ auto Operator =
+ IsArrow ? ConceptInfo::Member::Arrow : ConceptInfo::Member::Dot;
+ for (const auto &R : ConceptInfo(*TTPT, S).members()) {
+ if (R.Operator != Operator)
+ continue;
+ CodeCompletionResult Result(
+ R.render(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo()));
+ if (AccessOpFixIt)
+ Result.FixIts.push_back(*AccessOpFixIt);
+ Results.AddResult(std::move(Result));
}
- } else if (const auto *ICNT = BaseType->getAs<InjectedClassNameType>()) {
- if (auto *RD = ICNT->getDecl())
- AddRecordMembersCompletionResults(*this, Results, S, BaseType, BaseKind,
- RD, std::move(AccessOpFixIt));
} else if (!IsArrow && BaseType->isObjCObjectPointerType()) {
- // Objective-C property reference.
+ // Objective-C property reference. Bail if we're performing fix-it code
+ // completion since Objective-C properties are normally backed by ivars,
+ // most Objective-C fix-its here would have little value.
+ if (AccessOpFixIt.hasValue()) {
+ return false;
+ }
AddedPropertiesSet AddedProperties;
if (const ObjCObjectPointerType *ObjCPtr =
@@ -4817,7 +5238,12 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
/*InOriginalClass*/ false);
} else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
(!IsArrow && BaseType->isObjCObjectType())) {
- // Objective-C instance variable access.
+ // Objective-C instance variable access. Bail if we're performing fix-it
+ // code completion since Objective-C properties are normally backed by
+ // ivars, most Objective-C fix-its here would have little value.
+ if (AccessOpFixIt.hasValue()) {
+ return false;
+ }
ObjCInterfaceDecl *Class = nullptr;
if (const ObjCObjectPointerType *ObjCPtr =
BaseType->getAs<ObjCObjectPointerType>())
@@ -5282,6 +5708,44 @@ QualType Sema::ProduceCtorInitMemberSignatureHelp(
return QualType();
}
+void Sema::CodeCompleteDesignator(const QualType BaseType,
+ llvm::ArrayRef<Expr *> InitExprs,
+ const Designation &D) {
+ if (BaseType.isNull())
+ return;
+ // FIXME: Handle nested designations, e.g. : .x.^
+ if (!D.empty())
+ return;
+
+ const auto *RD = getAsRecordDecl(BaseType);
+ if (!RD)
+ return;
+ if (const auto *CTSD = llvm::dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ // Template might not be instantiated yet, fall back to primary template in
+ // such cases.
+ if (CTSD->getTemplateSpecializationKind() == TSK_Undeclared)
+ RD = CTSD->getSpecializedTemplate()->getTemplatedDecl();
+ }
+ if (RD->fields().empty())
+ return;
+
+ CodeCompletionContext CCC(CodeCompletionContext::CCC_DotMemberAccess,
+ BaseType);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(), CCC);
+
+ Results.EnterNewScope();
+ for (const auto *FD : RD->fields()) {
+ // FIXME: Make use of previous designators to mark any fields before those
+ // inaccessible, and also compute the next initializer priority.
+ ResultBuilder::Result Result(FD, Results.getBasePriority(FD));
+ Results.AddResult(Result, CurContext, /*Hiding=*/nullptr);
+ }
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
ValueDecl *VD = dyn_cast_or_null<ValueDecl>(D);
if (!VD) {
@@ -5297,7 +5761,7 @@ void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
CodeCompleteExpression(S, Data);
}
-void Sema::CodeCompleteAfterIf(Scope *S) {
+void Sema::CodeCompleteAfterIf(Scope *S, bool IsBracedThen) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
mapCodeCompletionContext(*this, PCC_Statement));
@@ -5314,15 +5778,25 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
// "else" block
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
+
+ auto AddElseBodyPattern = [&] {
+ if (IsBracedThen) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ } else {
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("statement");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ }
+ };
Builder.AddTypedTextChunk("else");
- if (Results.includeCodePatterns()) {
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddPlaceholderChunk("statements");
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddChunk(CodeCompletionString::CK_RightBrace);
- }
+ if (Results.includeCodePatterns())
+ AddElseBodyPattern();
Results.AddResult(Builder.TakeString());
// "else if" block
@@ -5335,12 +5809,7 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
if (Results.includeCodePatterns()) {
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddPlaceholderChunk("statements");
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ AddElseBodyPattern();
}
Results.AddResult(Builder.TakeString());
@@ -5393,13 +5862,14 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// Always pretend to enter a context to ensure that a dependent type
// resolves to a dependent record.
DeclContext *Ctx = computeDeclContext(SS, /*EnteringContext=*/true);
- if (!Ctx)
- return;
// Try to instantiate any non-dependent declaration contexts before
- // we look in them.
- if (!isDependentScopeSpecifier(SS) && RequireCompleteDeclContext(SS, Ctx))
- return;
+ // we look in them. Bail out if we fail.
+ NestedNameSpecifier *NNS = SS.getScopeRep();
+ if (NNS != nullptr && SS.isValid() && !NNS->isDependent()) {
+ if (Ctx == nullptr || RequireCompleteDeclContext(SS, Ctx))
+ return;
+ }
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), CC);
@@ -5409,21 +5879,34 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// The "template" keyword can follow "::" in the grammar, but only
// put it into the grammar if the nested-name-specifier is dependent.
- NestedNameSpecifier *NNS = SS.getScopeRep();
+ // FIXME: results is always empty, this appears to be dead.
if (!Results.empty() && NNS->isDependent())
Results.AddResult("template");
+ // If the scope is a concept-constrained type parameter, infer nested
+ // members based on the constraints.
+ if (const auto *TTPT =
+ dyn_cast_or_null<TemplateTypeParmType>(NNS->getAsType())) {
+ for (const auto &R : ConceptInfo(*TTPT, S).members()) {
+ if (R.Operator != ConceptInfo::Member::Colons)
+ continue;
+ Results.AddResult(CodeCompletionResult(
+ R.render(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo())));
+ }
+ }
+
// Add calls to overridden virtual functions, if there are any.
//
// FIXME: This isn't wonderful, because we don't know whether we're actually
// in a context that permits expressions. This is a general issue with
// qualified-id completions.
- if (!EnteringContext)
+ if (Ctx && !EnteringContext)
MaybeAddOverrideCalls(*this, Ctx, Results);
Results.ExitScope();
- if (CodeCompleter->includeNamespaceLevelDecls() ||
- (!Ctx->isNamespace() && !Ctx->isTranslationUnit())) {
+ if (Ctx &&
+ (CodeCompleter->includeNamespaceLevelDecls() || !Ctx->isFileContext())) {
CodeCompletionDeclConsumer Consumer(Results, Ctx, BaseType);
LookupVisibleDecls(Ctx, LookupOrdinaryName, Consumer,
/*IncludeGlobalScope=*/true,
@@ -5785,6 +6268,53 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
Results.data(), Results.size());
}
+void Sema::CodeCompleteAfterFunctionEquals(Declarator &D) {
+ if (!LangOpts.CPlusPlus11)
+ return;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ auto ShouldAddDefault = [&D, this]() {
+ if (!D.isFunctionDeclarator())
+ return false;
+ auto &Id = D.getName();
+ if (Id.getKind() == UnqualifiedIdKind::IK_DestructorName)
+ return true;
+ // FIXME(liuhui): Ideally, we should check the constructor parameter list to
+ // verify that it is the default, copy or move constructor?
+ if (Id.getKind() == UnqualifiedIdKind::IK_ConstructorName &&
+ D.getFunctionTypeInfo().NumParams <= 1)
+ return true;
+ if (Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId) {
+ auto Op = Id.OperatorFunctionId.Operator;
+ // FIXME(liuhui): Ideally, we should check the function parameter list to
+ // verify that it is the copy or move assignment?
+ if (Op == OverloadedOperatorKind::OO_Equal)
+ return true;
+ if (LangOpts.CPlusPlus20 &&
+ (Op == OverloadedOperatorKind::OO_EqualEqual ||
+ Op == OverloadedOperatorKind::OO_ExclaimEqual ||
+ Op == OverloadedOperatorKind::OO_Less ||
+ Op == OverloadedOperatorKind::OO_LessEqual ||
+ Op == OverloadedOperatorKind::OO_Greater ||
+ Op == OverloadedOperatorKind::OO_GreaterEqual ||
+ Op == OverloadedOperatorKind::OO_Spaceship))
+ return true;
+ }
+ return false;
+ };
+
+ Results.EnterNewScope();
+ if (ShouldAddDefault())
+ Results.AddResult("default");
+ // FIXME(liuhui): Ideally, we should only provide `delete` completion for the
+ // first function declaration.
+ Results.AddResult("delete");
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
/// Macro that optionally prepends an "@" to the string literal passed in via
/// Keyword, depending on whether NeedAt is true or false.
#define OBJC_AT_KEYWORD_NAME(NeedAt, Keyword) ((NeedAt) ? "@" Keyword : Keyword)
@@ -6063,22 +6593,24 @@ static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
Attributes |= NewFlag;
// Check for collisions with "readonly".
- if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- (Attributes & ObjCDeclSpec::DQ_PR_readwrite))
+ if ((Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ (Attributes & ObjCPropertyAttribute::kind_readwrite))
return true;
// Check for more than one of { assign, copy, retain, strong, weak }.
unsigned AssignCopyRetMask =
Attributes &
- (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_unsafe_unretained |
- ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain |
- ObjCDeclSpec::DQ_PR_strong | ObjCDeclSpec::DQ_PR_weak);
- if (AssignCopyRetMask && AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_weak)
+ (ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained |
+ ObjCPropertyAttribute::kind_copy | ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong | ObjCPropertyAttribute::kind_weak);
+ if (AssignCopyRetMask &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_assign &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_unsafe_unretained &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_copy &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_retain &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_strong &&
+ AssignCopyRetMask != ObjCPropertyAttribute::kind_weak)
return true;
return false;
@@ -6094,32 +6626,41 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readonly))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_readonly))
Results.AddResult(CodeCompletionResult("readonly"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_assign))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_assign))
Results.AddResult(CodeCompletionResult("assign"));
if (!ObjCPropertyFlagConflicts(Attributes,
- ObjCDeclSpec::DQ_PR_unsafe_unretained))
+ ObjCPropertyAttribute::kind_unsafe_unretained))
Results.AddResult(CodeCompletionResult("unsafe_unretained"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readwrite))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_readwrite))
Results.AddResult(CodeCompletionResult("readwrite"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_retain))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_retain))
Results.AddResult(CodeCompletionResult("retain"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_strong))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_strong))
Results.AddResult(CodeCompletionResult("strong"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_copy))
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCPropertyAttribute::kind_copy))
Results.AddResult(CodeCompletionResult("copy"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nonatomic))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_nonatomic))
Results.AddResult(CodeCompletionResult("nonatomic"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_atomic))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_atomic))
Results.AddResult(CodeCompletionResult("atomic"));
// Only suggest "weak" if we're compiling for ARC-with-weak-references or GC.
if (getLangOpts().ObjCWeak || getLangOpts().getGC() != LangOptions::NonGC)
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_weak))
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_weak))
Results.AddResult(CodeCompletionResult("weak"));
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_setter)) {
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_setter)) {
CodeCompletionBuilder Setter(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Setter.AddTypedTextChunk("setter");
@@ -6127,7 +6668,8 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Setter.AddPlaceholderChunk("method");
Results.AddResult(CodeCompletionResult(Setter.TakeString()));
}
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_getter)) {
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_getter)) {
CodeCompletionBuilder Getter(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Getter.AddTypedTextChunk("getter");
@@ -6135,7 +6677,8 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Getter.AddPlaceholderChunk("method");
Results.AddResult(CodeCompletionResult(Getter.TakeString()));
}
- if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nullability)) {
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCPropertyAttribute::kind_nullability)) {
Results.AddResult(CodeCompletionResult("nonnull"));
Results.AddResult(CodeCompletionResult("nullable"));
Results.AddResult(CodeCompletionResult("null_unspecified"));
@@ -7602,7 +8145,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
} Key(Allocator, PropName->getName());
// The uppercased name of the property name.
- std::string UpperKey = PropName->getName();
+ std::string UpperKey = std::string(PropName->getName());
if (!UpperKey.empty())
UpperKey[0] = toUppercase(UpperKey[0]);
@@ -7660,8 +8203,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
- Builder.AddTypedTextChunk(Allocator.CopyString(SelectorId->getName()));
- Builder.AddTypedTextChunk(":");
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName() + ":"));
AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0, Context, Policy,
Builder);
Builder.AddTextChunk(Key);
@@ -8249,39 +8792,43 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
Selector Sel = Method->getSelector();
- // Add the first part of the selector to the pattern.
- Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
-
- // Add parameters to the pattern.
- unsigned I = 0;
- for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
- PEnd = Method->param_end();
- P != PEnd; (void)++P, ++I) {
- // Add the part of the selector name.
- if (I == 0)
- Builder.AddTypedTextChunk(":");
- else if (I < Sel.getNumArgs()) {
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
- } else
- break;
-
- // Add the parameter type.
- QualType ParamType;
- if ((*P)->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
- ParamType = (*P)->getType();
- else
- ParamType = (*P)->getOriginalType();
- ParamType = ParamType.substObjCTypeArgs(
- Context, {}, ObjCSubstitutionContext::Parameter);
- AttributedType::stripOuterNullability(ParamType);
- AddObjCPassingTypeChunk(ParamType, (*P)->getObjCDeclQualifier(), Context,
- Policy, Builder);
+ if (Sel.isUnarySelector()) {
+ // Unary selectors have no arguments.
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
+ } else {
+ // Add all parameters to the pattern.
+ unsigned I = 0;
+ for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; (void)++P, ++I) {
+ // Add the part of the selector name.
+ if (I == 0)
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
+ else if (I < Sel.getNumArgs()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
+ } else
+ break;
- if (IdentifierInfo *Id = (*P)->getIdentifier())
- Builder.AddTextChunk(Builder.getAllocator().CopyString(Id->getName()));
+ // Add the parameter type.
+ QualType ParamType;
+ if ((*P)->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
+ ParamType = (*P)->getType();
+ else
+ ParamType = (*P)->getOriginalType();
+ ParamType = ParamType.substObjCTypeArgs(
+ Context, {}, ObjCSubstitutionContext::Parameter);
+ AttributedType::stripOuterNullability(ParamType);
+ AddObjCPassingTypeChunk(ParamType, (*P)->getObjCDeclQualifier(),
+ Context, Policy, Builder);
+
+ if (IdentifierInfo *Id = (*P)->getIdentifier())
+ Builder.AddTextChunk(
+ Builder.getAllocator().CopyString(Id->getName()));
+ }
}
if (Method->isVariadic()) {
@@ -8723,7 +9270,16 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
if (++Count == 2500) // If we happen to hit a huge directory,
break; // bail out early so we're not too slow.
StringRef Filename = llvm::sys::path::filename(It->path());
- switch (It->type()) {
+
+ // To know whether a symlink should be treated as file or a directory, we
+ // have to stat it. This should be cheap enough as there shouldn't be many
+ // symlinks.
+ llvm::sys::fs::file_type Type = It->type();
+ if (Type == llvm::sys::fs::file_type::symlink_file) {
+ if (auto FileStatus = FS.status(It->path()))
+ Type = FileStatus->getType();
+ }
+ switch (Type) {
case llvm::sys::fs::file_type::directory_file:
// All entries in a framework directory must have a ".framework" suffix,
// but the suffix does not appear in the source code's include/import.
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 018ac2d7dc9d..ddd95faebe99 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -17,7 +17,10 @@
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/Template.h"
-#include "clang/AST/ExprCXX.h"
+#include "clang/Sema/Overload.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "llvm/ADT/DenseMap.h"
@@ -25,21 +28,47 @@
using namespace clang;
using namespace sema;
-bool
-Sema::CheckConstraintExpression(Expr *ConstraintExpression, Token NextToken,
- bool *PossibleNonPrimary,
- bool IsTrailingRequiresClause) {
+namespace {
+class LogicalBinOp {
+ OverloadedOperatorKind Op = OO_None;
+ const Expr *LHS = nullptr;
+ const Expr *RHS = nullptr;
+
+public:
+ LogicalBinOp(const Expr *E) {
+ if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ Op = BinaryOperator::getOverloadedOperator(BO->getOpcode());
+ LHS = BO->getLHS();
+ RHS = BO->getRHS();
+ } else if (auto *OO = dyn_cast<CXXOperatorCallExpr>(E)) {
+ Op = OO->getOperator();
+ LHS = OO->getArg(0);
+ RHS = OO->getArg(1);
+ }
+ }
+
+ bool isAnd() const { return Op == OO_AmpAmp; }
+ bool isOr() const { return Op == OO_PipePipe; }
+ explicit operator bool() const { return isAnd() || isOr(); }
+
+ const Expr *getLHS() const { return LHS; }
+ const Expr *getRHS() const { return RHS; }
+};
+}
+
+bool Sema::CheckConstraintExpression(const Expr *ConstraintExpression,
+ Token NextToken, bool *PossibleNonPrimary,
+ bool IsTrailingRequiresClause) {
// C++2a [temp.constr.atomic]p1
// ..E shall be a constant expression of type bool.
ConstraintExpression = ConstraintExpression->IgnoreParenImpCasts();
- if (auto *BinOp = dyn_cast<BinaryOperator>(ConstraintExpression)) {
- if (BinOp->getOpcode() == BO_LAnd || BinOp->getOpcode() == BO_LOr)
- return CheckConstraintExpression(BinOp->getLHS(), NextToken,
- PossibleNonPrimary) &&
- CheckConstraintExpression(BinOp->getRHS(), NextToken,
- PossibleNonPrimary);
+ if (LogicalBinOp BO = ConstraintExpression) {
+ return CheckConstraintExpression(BO.getLHS(), NextToken,
+ PossibleNonPrimary) &&
+ CheckConstraintExpression(BO.getRHS(), NextToken,
+ PossibleNonPrimary);
} else if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpression))
return CheckConstraintExpression(C->getSubExpr(), NextToken,
PossibleNonPrimary);
@@ -57,7 +86,7 @@ Sema::CheckConstraintExpression(Expr *ConstraintExpression, Token NextToken,
(NextToken.is(tok::l_paren) &&
(IsTrailingRequiresClause ||
(Type->isDependentType() &&
- IsDependentFunctionNameExpr(ConstraintExpression)) ||
+ isa<UnresolvedLookupExpr>(ConstraintExpression)) ||
Type->isFunctionType() ||
Type->isSpecificBuiltinType(BuiltinType::Overload))) ||
// We have the following case:
@@ -96,39 +125,37 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
AtomicEvaluator &&Evaluator) {
ConstraintExpr = ConstraintExpr->IgnoreParenImpCasts();
- if (auto *BO = dyn_cast<BinaryOperator>(ConstraintExpr)) {
- if (BO->getOpcode() == BO_LAnd || BO->getOpcode() == BO_LOr) {
- if (calculateConstraintSatisfaction(S, BO->getLHS(), Satisfaction,
- Evaluator))
- return true;
+ if (LogicalBinOp BO = ConstraintExpr) {
+ if (calculateConstraintSatisfaction(S, BO.getLHS(), Satisfaction,
+ Evaluator))
+ return true;
- bool IsLHSSatisfied = Satisfaction.IsSatisfied;
+ bool IsLHSSatisfied = Satisfaction.IsSatisfied;
- if (BO->getOpcode() == BO_LOr && IsLHSSatisfied)
- // [temp.constr.op] p3
- // A disjunction is a constraint taking two operands. To determine if
- // a disjunction is satisfied, the satisfaction of the first operand
- // is checked. If that is satisfied, the disjunction is satisfied.
- // Otherwise, the disjunction is satisfied if and only if the second
- // operand is satisfied.
- return false;
+ if (BO.isOr() && IsLHSSatisfied)
+ // [temp.constr.op] p3
+ // A disjunction is a constraint taking two operands. To determine if
+ // a disjunction is satisfied, the satisfaction of the first operand
+ // is checked. If that is satisfied, the disjunction is satisfied.
+ // Otherwise, the disjunction is satisfied if and only if the second
+ // operand is satisfied.
+ return false;
- if (BO->getOpcode() == BO_LAnd && !IsLHSSatisfied)
- // [temp.constr.op] p2
- // A conjunction is a constraint taking two operands. To determine if
- // a conjunction is satisfied, the satisfaction of the first operand
- // is checked. If that is not satisfied, the conjunction is not
- // satisfied. Otherwise, the conjunction is satisfied if and only if
- // the second operand is satisfied.
- return false;
+ if (BO.isAnd() && !IsLHSSatisfied)
+ // [temp.constr.op] p2
+ // A conjunction is a constraint taking two operands. To determine if
+ // a conjunction is satisfied, the satisfaction of the first operand
+ // is checked. If that is not satisfied, the conjunction is not
+ // satisfied. Otherwise, the conjunction is satisfied if and only if
+ // the second operand is satisfied.
+ return false;
- return calculateConstraintSatisfaction(S, BO->getRHS(), Satisfaction,
- std::forward<AtomicEvaluator>(Evaluator));
- }
- }
- else if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpr))
+ return calculateConstraintSatisfaction(
+ S, BO.getRHS(), Satisfaction, std::forward<AtomicEvaluator>(Evaluator));
+ } else if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpr)) {
return calculateConstraintSatisfaction(S, C->getSubExpr(), Satisfaction,
std::forward<AtomicEvaluator>(Evaluator));
+ }
// An atomic constraint expression
ExprResult SubstitutedAtomicExpr = Evaluator(ConstraintExpr);
@@ -164,9 +191,8 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
return false;
}
-template <typename TemplateDeclT>
static bool calculateConstraintSatisfaction(
- Sema &S, TemplateDeclT *Template, ArrayRef<TemplateArgument> TemplateArgs,
+ Sema &S, const NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs,
SourceLocation TemplateNameLoc, MultiLevelTemplateArgumentList &MLTAL,
const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction) {
return calculateConstraintSatisfaction(
@@ -179,8 +205,9 @@ static bool calculateConstraintSatisfaction(
{
TemplateDeductionInfo Info(TemplateNameLoc);
Sema::InstantiatingTemplate Inst(S, AtomicExpr->getBeginLoc(),
- Sema::InstantiatingTemplate::ConstraintSubstitution{}, Template,
- Info, AtomicExpr->getSourceRange());
+ Sema::InstantiatingTemplate::ConstraintSubstitution{},
+ const_cast<NamedDecl *>(Template), Info,
+ AtomicExpr->getSourceRange());
if (Inst.isInvalid())
return ExprError();
// We do not want error diagnostics escaping here.
@@ -227,8 +254,7 @@ static bool calculateConstraintSatisfaction(
});
}
-template<typename TemplateDeclT>
-static bool CheckConstraintSatisfaction(Sema &S, TemplateDeclT *Template,
+static bool CheckConstraintSatisfaction(Sema &S, const NamedDecl *Template,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
@@ -246,8 +272,8 @@ static bool CheckConstraintSatisfaction(Sema &S, TemplateDeclT *Template,
}
Sema::InstantiatingTemplate Inst(S, TemplateIDRange.getBegin(),
- Sema::InstantiatingTemplate::ConstraintsCheck{}, Template, TemplateArgs,
- TemplateIDRange);
+ Sema::InstantiatingTemplate::ConstraintsCheck{},
+ const_cast<NamedDecl *>(Template), TemplateArgs, TemplateIDRange);
if (Inst.isInvalid())
return true;
@@ -269,36 +295,45 @@ static bool CheckConstraintSatisfaction(Sema &S, TemplateDeclT *Template,
return false;
}
-bool Sema::CheckConstraintSatisfaction(TemplateDecl *Template,
- ArrayRef<const Expr *> ConstraintExprs,
- ArrayRef<TemplateArgument> TemplateArgs,
- SourceRange TemplateIDRange,
- ConstraintSatisfaction &Satisfaction) {
- return ::CheckConstraintSatisfaction(*this, Template, ConstraintExprs,
- TemplateArgs, TemplateIDRange,
- Satisfaction);
-}
+bool Sema::CheckConstraintSatisfaction(
+ const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
+ ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange,
+ ConstraintSatisfaction &OutSatisfaction) {
+ if (ConstraintExprs.empty()) {
+ OutSatisfaction.IsSatisfied = true;
+ return false;
+ }
-bool
-Sema::CheckConstraintSatisfaction(ClassTemplatePartialSpecializationDecl* Part,
- ArrayRef<const Expr *> ConstraintExprs,
- ArrayRef<TemplateArgument> TemplateArgs,
- SourceRange TemplateIDRange,
- ConstraintSatisfaction &Satisfaction) {
- return ::CheckConstraintSatisfaction(*this, Part, ConstraintExprs,
- TemplateArgs, TemplateIDRange,
- Satisfaction);
-}
+ llvm::FoldingSetNodeID ID;
+ void *InsertPos;
+ ConstraintSatisfaction *Satisfaction = nullptr;
+ bool ShouldCache = LangOpts.ConceptSatisfactionCaching && Template;
+ if (ShouldCache) {
+ ConstraintSatisfaction::Profile(ID, Context, Template, TemplateArgs);
+ Satisfaction = SatisfactionCache.FindNodeOrInsertPos(ID, InsertPos);
+ if (Satisfaction) {
+ OutSatisfaction = *Satisfaction;
+ return false;
+ }
+ Satisfaction = new ConstraintSatisfaction(Template, TemplateArgs);
+ } else {
+ Satisfaction = &OutSatisfaction;
+ }
+ if (::CheckConstraintSatisfaction(*this, Template, ConstraintExprs,
+ TemplateArgs, TemplateIDRange,
+ *Satisfaction)) {
+ if (ShouldCache)
+ delete Satisfaction;
+ return true;
+ }
-bool
-Sema::CheckConstraintSatisfaction(VarTemplatePartialSpecializationDecl* Partial,
- ArrayRef<const Expr *> ConstraintExprs,
- ArrayRef<TemplateArgument> TemplateArgs,
- SourceRange TemplateIDRange,
- ConstraintSatisfaction &Satisfaction) {
- return ::CheckConstraintSatisfaction(*this, Partial, ConstraintExprs,
- TemplateArgs, TemplateIDRange,
- Satisfaction);
+ if (ShouldCache) {
+ // We cannot use InsertNode here because CheckConstraintSatisfaction might
+ // have invalidated it.
+ SatisfactionCache.InsertNode(Satisfaction);
+ OutSatisfaction = *Satisfaction;
+ }
+ return false;
}
bool Sema::CheckConstraintSatisfaction(const Expr *ConstraintExpr,
@@ -310,6 +345,30 @@ bool Sema::CheckConstraintSatisfaction(const Expr *ConstraintExpr,
});
}
+bool Sema::CheckFunctionConstraints(const FunctionDecl *FD,
+ ConstraintSatisfaction &Satisfaction,
+ SourceLocation UsageLoc) {
+ const Expr *RC = FD->getTrailingRequiresClause();
+ if (RC->isInstantiationDependent()) {
+ Satisfaction.IsSatisfied = true;
+ return false;
+ }
+ Qualifiers ThisQuals;
+ CXXRecordDecl *Record = nullptr;
+ if (auto *Method = dyn_cast<CXXMethodDecl>(FD)) {
+ ThisQuals = Method->getMethodQualifiers();
+ Record = const_cast<CXXRecordDecl *>(Method->getParent());
+ }
+ CXXThisScopeRAII ThisScope(*this, Record, ThisQuals, Record != nullptr);
+ // We substitute with empty arguments in order to rebuild the atomic
+ // constraint in a constant-evaluated context.
+ // FIXME: Should this be a dedicated TreeTransform?
+ return CheckConstraintSatisfaction(
+ FD, {RC}, /*TemplateArgs=*/{},
+ SourceRange(UsageLoc.isValid() ? UsageLoc : FD->getLocation()),
+ Satisfaction);
+}
+
bool Sema::EnsureTemplateArgumentListConstraints(
TemplateDecl *TD, ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange) {
@@ -336,6 +395,118 @@ bool Sema::EnsureTemplateArgumentListConstraints(
return false;
}
+static void diagnoseUnsatisfiedRequirement(Sema &S,
+ concepts::ExprRequirement *Req,
+ bool First) {
+ assert(!Req->isSatisfied()
+ && "Diagnose() can only be used on an unsatisfied requirement");
+ switch (Req->getSatisfactionStatus()) {
+ case concepts::ExprRequirement::SS_Dependent:
+ llvm_unreachable("Diagnosing a dependent requirement");
+ break;
+ case concepts::ExprRequirement::SS_ExprSubstitutionFailure: {
+ auto *SubstDiag = Req->getExprSubstitutionDiagnostic();
+ if (!SubstDiag->DiagMessage.empty())
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_expr_requirement_expr_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity
+ << SubstDiag->DiagMessage;
+ else
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_expr_requirement_expr_unknown_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity;
+ break;
+ }
+ case concepts::ExprRequirement::SS_NoexceptNotMet:
+ S.Diag(Req->getNoexceptLoc(),
+ diag::note_expr_requirement_noexcept_not_met)
+ << (int)First << Req->getExpr();
+ break;
+ case concepts::ExprRequirement::SS_TypeRequirementSubstitutionFailure: {
+ auto *SubstDiag =
+ Req->getReturnTypeRequirement().getSubstitutionDiagnostic();
+ if (!SubstDiag->DiagMessage.empty())
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_expr_requirement_type_requirement_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity
+ << SubstDiag->DiagMessage;
+ else
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_expr_requirement_type_requirement_unknown_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity;
+ break;
+ }
+ case concepts::ExprRequirement::SS_ConstraintsNotSatisfied: {
+ ConceptSpecializationExpr *ConstraintExpr =
+ Req->getReturnTypeRequirementSubstitutedConstraintExpr();
+ if (ConstraintExpr->getTemplateArgsAsWritten()->NumTemplateArgs == 1)
+ // A simple case - expr type is the type being constrained and the concept
+ // was not provided arguments.
+ S.Diag(ConstraintExpr->getBeginLoc(),
+ diag::note_expr_requirement_constraints_not_satisfied_simple)
+ << (int)First << S.BuildDecltypeType(Req->getExpr(),
+ Req->getExpr()->getBeginLoc())
+ << ConstraintExpr->getNamedConcept();
+ else
+ S.Diag(ConstraintExpr->getBeginLoc(),
+ diag::note_expr_requirement_constraints_not_satisfied)
+ << (int)First << ConstraintExpr;
+ S.DiagnoseUnsatisfiedConstraint(ConstraintExpr->getSatisfaction());
+ break;
+ }
+ case concepts::ExprRequirement::SS_Satisfied:
+ llvm_unreachable("We checked this above");
+ }
+}
+
+static void diagnoseUnsatisfiedRequirement(Sema &S,
+ concepts::TypeRequirement *Req,
+ bool First) {
+ assert(!Req->isSatisfied()
+ && "Diagnose() can only be used on an unsatisfied requirement");
+ switch (Req->getSatisfactionStatus()) {
+ case concepts::TypeRequirement::SS_Dependent:
+ llvm_unreachable("Diagnosing a dependent requirement");
+ return;
+ case concepts::TypeRequirement::SS_SubstitutionFailure: {
+ auto *SubstDiag = Req->getSubstitutionDiagnostic();
+ if (!SubstDiag->DiagMessage.empty())
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_type_requirement_substitution_error) << (int)First
+ << SubstDiag->SubstitutedEntity << SubstDiag->DiagMessage;
+ else
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_type_requirement_unknown_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity;
+ return;
+ }
+ default:
+ llvm_unreachable("Unknown satisfaction status");
+ return;
+ }
+}
+
+static void diagnoseUnsatisfiedRequirement(Sema &S,
+ concepts::NestedRequirement *Req,
+ bool First) {
+ if (Req->isSubstitutionFailure()) {
+ concepts::Requirement::SubstitutionDiagnostic *SubstDiag =
+ Req->getSubstitutionDiagnostic();
+ if (!SubstDiag->DiagMessage.empty())
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_nested_requirement_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity
+ << SubstDiag->DiagMessage;
+ else
+ S.Diag(SubstDiag->DiagLoc,
+ diag::note_nested_requirement_unknown_substitution_error)
+ << (int)First << SubstDiag->SubstitutedEntity;
+ return;
+ }
+ S.DiagnoseUnsatisfiedConstraint(Req->getConstraintSatisfaction(), First);
+}
+
+
static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
Expr *SubstExpr,
bool First = true) {
@@ -412,6 +583,19 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
}
S.DiagnoseUnsatisfiedConstraint(CSE->getSatisfaction());
return;
+ } else if (auto *RE = dyn_cast<RequiresExpr>(SubstExpr)) {
+ for (concepts::Requirement *Req : RE->getRequirements())
+ if (!Req->isDependent() && !Req->isSatisfied()) {
+ if (auto *E = dyn_cast<concepts::ExprRequirement>(Req))
+ diagnoseUnsatisfiedRequirement(S, E, First);
+ else if (auto *T = dyn_cast<concepts::TypeRequirement>(Req))
+ diagnoseUnsatisfiedRequirement(S, T, First);
+ else
+ diagnoseUnsatisfiedRequirement(
+ S, cast<concepts::NestedRequirement>(Req), First);
+ break;
+ }
+ return;
}
S.Diag(SubstExpr->getSourceRange().getBegin(),
@@ -434,11 +618,11 @@ static void diagnoseUnsatisfiedConstraintExpr(
Record.template get<Expr *>(), First);
}
-void Sema::DiagnoseUnsatisfiedConstraint(
- const ConstraintSatisfaction& Satisfaction) {
+void
+Sema::DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction& Satisfaction,
+ bool First) {
assert(!Satisfaction.IsSatisfied &&
"Attempted to diagnose a satisfied constraint");
- bool First = true;
for (auto &Pair : Satisfaction.Details) {
diagnoseUnsatisfiedConstraintExpr(*this, Pair.first, Pair.second, First);
First = false;
@@ -446,10 +630,10 @@ void Sema::DiagnoseUnsatisfiedConstraint(
}
void Sema::DiagnoseUnsatisfiedConstraint(
- const ASTConstraintSatisfaction &Satisfaction) {
+ const ASTConstraintSatisfaction &Satisfaction,
+ bool First) {
assert(!Satisfaction.IsSatisfied &&
"Attempted to diagnose a satisfied constraint");
- bool First = true;
for (auto &Pair : Satisfaction) {
diagnoseUnsatisfiedConstraintExpr(*this, Pair.first, Pair.second, First);
First = false;
@@ -523,6 +707,10 @@ static bool substituteParameterMappings(Sema &S, NormalizedConstraint &N,
ArgsAsWritten->arguments().back().getSourceRange().getEnd()));
if (S.SubstTemplateArguments(*Atomic.ParameterMapping, MLTAL, SubstArgs))
return true;
+ Atomic.ParameterMapping.emplace(
+ MutableArrayRef<TemplateArgumentLoc>(
+ new (S.Context) TemplateArgumentLoc[SubstArgs.size()],
+ SubstArgs.size()));
std::copy(SubstArgs.arguments().begin(), SubstArgs.arguments().end(),
N.getAtomicConstraint()->ParameterMapping->begin());
return false;
@@ -561,19 +749,16 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, NamedDecl *D, const Expr *E) {
// - The normal form of an expression (E) is the normal form of E.
// [...]
E = E->IgnoreParenImpCasts();
- if (auto *BO = dyn_cast<const BinaryOperator>(E)) {
- if (BO->getOpcode() == BO_LAnd || BO->getOpcode() == BO_LOr) {
- auto LHS = fromConstraintExpr(S, D, BO->getLHS());
- if (!LHS)
- return None;
- auto RHS = fromConstraintExpr(S, D, BO->getRHS());
- if (!RHS)
- return None;
+ if (LogicalBinOp BO = E) {
+ auto LHS = fromConstraintExpr(S, D, BO.getLHS());
+ if (!LHS)
+ return None;
+ auto RHS = fromConstraintExpr(S, D, BO.getRHS());
+ if (!RHS)
+ return None;
- return NormalizedConstraint(
- S.Context, std::move(*LHS), std::move(*RHS),
- BO->getOpcode() == BO_LAnd ? CCK_Conjunction : CCK_Disjunction);
- }
+ return NormalizedConstraint(S.Context, std::move(*LHS), std::move(*RHS),
+ BO.isAnd() ? CCK_Conjunction : CCK_Disjunction);
} else if (auto *CSE = dyn_cast<const ConceptSpecializationExpr>(E)) {
const NormalizedConstraint *SubNF;
{
@@ -826,3 +1011,67 @@ bool Sema::MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
<< AmbiguousAtomic2->getSourceRange();
return true;
}
+
+concepts::ExprRequirement::ExprRequirement(
+ Expr *E, bool IsSimple, SourceLocation NoexceptLoc,
+ ReturnTypeRequirement Req, SatisfactionStatus Status,
+ ConceptSpecializationExpr *SubstitutedConstraintExpr) :
+ Requirement(IsSimple ? RK_Simple : RK_Compound, Status == SS_Dependent,
+ Status == SS_Dependent &&
+ (E->containsUnexpandedParameterPack() ||
+ Req.containsUnexpandedParameterPack()),
+ Status == SS_Satisfied), Value(E), NoexceptLoc(NoexceptLoc),
+ TypeReq(Req), SubstitutedConstraintExpr(SubstitutedConstraintExpr),
+ Status(Status) {
+ assert((!IsSimple || (Req.isEmpty() && NoexceptLoc.isInvalid())) &&
+ "Simple requirement must not have a return type requirement or a "
+ "noexcept specification");
+ assert((Status > SS_TypeRequirementSubstitutionFailure && Req.isTypeConstraint()) ==
+ (SubstitutedConstraintExpr != nullptr));
+}
+
+concepts::ExprRequirement::ExprRequirement(
+ SubstitutionDiagnostic *ExprSubstDiag, bool IsSimple,
+ SourceLocation NoexceptLoc, ReturnTypeRequirement Req) :
+ Requirement(IsSimple ? RK_Simple : RK_Compound, Req.isDependent(),
+ Req.containsUnexpandedParameterPack(), /*IsSatisfied=*/false),
+ Value(ExprSubstDiag), NoexceptLoc(NoexceptLoc), TypeReq(Req),
+ Status(SS_ExprSubstitutionFailure) {
+ assert((!IsSimple || (Req.isEmpty() && NoexceptLoc.isInvalid())) &&
+ "Simple requirement must not have a return type requirement or a "
+ "noexcept specification");
+}
+
+concepts::ExprRequirement::ReturnTypeRequirement::
+ReturnTypeRequirement(TemplateParameterList *TPL) :
+ TypeConstraintInfo(TPL, 0) {
+ assert(TPL->size() == 1);
+ const TypeConstraint *TC =
+ cast<TemplateTypeParmDecl>(TPL->getParam(0))->getTypeConstraint();
+ assert(TC &&
+ "TPL must have a template type parameter with a type constraint");
+ auto *Constraint =
+ cast_or_null<ConceptSpecializationExpr>(
+ TC->getImmediatelyDeclaredConstraint());
+ bool Dependent = false;
+ if (Constraint->getTemplateArgsAsWritten()) {
+ for (auto &ArgLoc :
+ Constraint->getTemplateArgsAsWritten()->arguments().drop_front(1)) {
+ if (ArgLoc.getArgument().isDependent()) {
+ Dependent = true;
+ break;
+ }
+ }
+ }
+ TypeConstraintInfo.setInt(Dependent ? 1 : 0);
+}
+
+concepts::TypeRequirement::TypeRequirement(TypeSourceInfo *T) :
+ Requirement(RK_Type, T->getType()->isDependentType(),
+ T->getType()->containsUnexpandedParameterPack(),
+ // We reach this ctor with either dependent types (in which
+ // IsSatisfied doesn't matter) or with non-dependent type in
+ // which the existence of the type indicates satisfaction.
+ /*IsSatisfied=*/true
+ ), Value(T),
+ Status(T->getType()->isDependentType() ? SS_Dependent : SS_Satisfied) {}
diff --git a/clang/lib/Sema/SemaCoroutine.cpp b/clang/lib/Sema/SemaCoroutine.cpp
index 6dc9e342beb9..992cccac6405 100644
--- a/clang/lib/Sema/SemaCoroutine.cpp
+++ b/clang/lib/Sema/SemaCoroutine.cpp
@@ -24,6 +24,7 @@
#include "clang/Sema/Overload.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/SmallSet.h"
using namespace clang;
using namespace sema;
@@ -390,7 +391,13 @@ static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
return nullptr;
Expr *JustAddress = AddressExpr.get();
- // FIXME: Check that the type of AddressExpr is void*
+
+ // Check that the type of AddressExpr is void*
+ if (!JustAddress->getType().getTypePtr()->isVoidPointerType())
+ S.Diag(cast<CallExpr>(JustAddress)->getCalleeDecl()->getLocation(),
+ diag::warn_coroutine_handle_address_invalid_return_type)
+ << JustAddress->getType();
+
return buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_resume,
JustAddress);
}
@@ -502,8 +509,9 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
return nullptr;
auto *ScopeInfo = getCurFunction();
- // Build a list of arguments, based on the coroutine functions arguments,
- // that will be passed to the promise type's constructor.
+
+ // Build a list of arguments, based on the coroutine function's arguments,
+ // that if present will be passed to the promise type's constructor.
llvm::SmallVector<Expr *, 4> CtorArgExprs;
// Add implicit object parameter.
@@ -519,6 +527,7 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
}
}
+ // Add the coroutine function's parameters.
auto &Moves = ScopeInfo->CoroutineParameterMoves;
for (auto *PD : FD->parameters()) {
if (PD->getType()->isDependentType())
@@ -540,28 +549,33 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
CtorArgExprs.push_back(RefExpr.get());
}
- // Create an initialization sequence for the promise type using the
- // constructor arguments, wrapped in a parenthesized list expression.
- Expr *PLE = ParenListExpr::Create(Context, FD->getLocation(),
- CtorArgExprs, FD->getLocation());
- InitializedEntity Entity = InitializedEntity::InitializeVariable(VD);
- InitializationKind Kind = InitializationKind::CreateForInit(
- VD->getLocation(), /*DirectInit=*/true, PLE);
- InitializationSequence InitSeq(*this, Entity, Kind, CtorArgExprs,
- /*TopLevelOfInitList=*/false,
- /*TreatUnavailableAsInvalid=*/false);
-
- // Attempt to initialize the promise type with the arguments.
- // If that fails, fall back to the promise type's default constructor.
- if (InitSeq) {
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind, CtorArgExprs);
- if (Result.isInvalid()) {
- VD->setInvalidDecl();
- } else if (Result.get()) {
- VD->setInit(MaybeCreateExprWithCleanups(Result.get()));
- VD->setInitStyle(VarDecl::CallInit);
- CheckCompleteVariableDeclaration(VD);
- }
+ // If we have a non-zero number of constructor arguments, try to use them.
+ // Otherwise, fall back to the promise type's default constructor.
+ if (!CtorArgExprs.empty()) {
+ // Create an initialization sequence for the promise type using the
+ // constructor arguments, wrapped in a parenthesized list expression.
+ Expr *PLE = ParenListExpr::Create(Context, FD->getLocation(),
+ CtorArgExprs, FD->getLocation());
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(VD);
+ InitializationKind Kind = InitializationKind::CreateForInit(
+ VD->getLocation(), /*DirectInit=*/true, PLE);
+ InitializationSequence InitSeq(*this, Entity, Kind, CtorArgExprs,
+ /*TopLevelOfInitList=*/false,
+ /*TreatUnavailableAsInvalid=*/false);
+
+ // Attempt to initialize the promise type with the arguments.
+ // If that fails, fall back to the promise type's default constructor.
+ if (InitSeq) {
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, CtorArgExprs);
+ if (Result.isInvalid()) {
+ VD->setInvalidDecl();
+ } else if (Result.get()) {
+ VD->setInit(MaybeCreateExprWithCleanups(Result.get()));
+ VD->setInitStyle(VarDecl::CallInit);
+ CheckCompleteVariableDeclaration(VD);
+ }
+ } else
+ ActOnUninitializedDecl(VD);
} else
ActOnUninitializedDecl(VD);
@@ -597,6 +611,80 @@ static FunctionScopeInfo *checkCoroutineContext(Sema &S, SourceLocation Loc,
return ScopeInfo;
}
+/// Recursively check \p E and all its children to see if any call target
+/// (including constructor call) is declared noexcept. Also any value returned
+/// from the call has a noexcept destructor.
+static void checkNoThrow(Sema &S, const Stmt *E,
+ llvm::SmallPtrSetImpl<const Decl *> &ThrowingDecls) {
+ auto checkDeclNoexcept = [&](const Decl *D, bool IsDtor = false) {
+ // In the case of dtor, the call to dtor is implicit and hence we should
+ // pass nullptr to canCalleeThrow.
+ if (Sema::canCalleeThrow(S, IsDtor ? nullptr : cast<Expr>(E), D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ // co_await promise.final_suspend() could end up calling
+ // __builtin_coro_resume for symmetric transfer if await_suspend()
+ // returns a handle. In that case, even __builtin_coro_resume is not
+ // declared as noexcept and may throw, it does not throw _into_ the
+ // coroutine that just suspended, but rather throws back out from
+ // whoever called coroutine_handle::resume(), hence we claim that
+ // logically it does not throw.
+ if (FD->getBuiltinID() == Builtin::BI__builtin_coro_resume)
+ return;
+ }
+ if (ThrowingDecls.empty()) {
+ // First time seeing an error, emit the error message.
+ S.Diag(cast<FunctionDecl>(S.CurContext)->getLocation(),
+ diag::err_coroutine_promise_final_suspend_requires_nothrow);
+ }
+ ThrowingDecls.insert(D);
+ }
+ };
+ auto SC = E->getStmtClass();
+ if (SC == Expr::CXXConstructExprClass) {
+ auto const *Ctor = cast<CXXConstructExpr>(E)->getConstructor();
+ checkDeclNoexcept(Ctor);
+ // Check the corresponding destructor of the constructor.
+ checkDeclNoexcept(Ctor->getParent()->getDestructor(), true);
+ } else if (SC == Expr::CallExprClass || SC == Expr::CXXMemberCallExprClass ||
+ SC == Expr::CXXOperatorCallExprClass) {
+ if (!cast<CallExpr>(E)->isTypeDependent()) {
+ checkDeclNoexcept(cast<CallExpr>(E)->getCalleeDecl());
+ auto ReturnType = cast<CallExpr>(E)->getCallReturnType(S.getASTContext());
+ // Check the destructor of the call return type, if any.
+ if (ReturnType.isDestructedType() ==
+ QualType::DestructionKind::DK_cxx_destructor) {
+ const auto *T =
+ cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
+ checkDeclNoexcept(
+ dyn_cast<CXXRecordDecl>(T->getDecl())->getDestructor(), true);
+ }
+ }
+ }
+ for (const auto *Child : E->children()) {
+ if (!Child)
+ continue;
+ checkNoThrow(S, Child, ThrowingDecls);
+ }
+}
+
+bool Sema::checkFinalSuspendNoThrow(const Stmt *FinalSuspend) {
+ llvm::SmallPtrSet<const Decl *, 4> ThrowingDecls;
+ // We first collect all declarations that should not throw but not declared
+ // with noexcept. We then sort them based on the location before printing.
+ // This is to avoid emitting the same note multiple times on the same
+ // declaration, and also provide a deterministic order for the messages.
+ checkNoThrow(*this, FinalSuspend, ThrowingDecls);
+ auto SortedDecls = llvm::SmallVector<const Decl *, 4>{ThrowingDecls.begin(),
+ ThrowingDecls.end()};
+ sort(SortedDecls, [](const Decl *A, const Decl *B) {
+ return A->getEndLoc() < B->getEndLoc();
+ });
+ for (const auto *D : SortedDecls) {
+ Diag(D->getEndLoc(), diag::note_coroutine_function_declare_noexcept);
+ }
+ return ThrowingDecls.empty();
+}
+
bool Sema::ActOnCoroutineBodyStart(Scope *SC, SourceLocation KWLoc,
StringRef Keyword) {
if (!checkCoroutineContext(*this, KWLoc, Keyword))
@@ -639,7 +727,7 @@ bool Sema::ActOnCoroutineBodyStart(Scope *SC, SourceLocation KWLoc,
return true;
StmtResult FinalSuspend = buildSuspends("final_suspend");
- if (FinalSuspend.isInvalid())
+ if (FinalSuspend.isInvalid() || !checkFinalSuspendNoThrow(FinalSuspend.get()))
return true;
ScopeInfo->setCoroutineSuspends(InitSuspend.get(), FinalSuspend.get());
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 507e4a6cd436..3e2b61ae8cdf 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/StmtCXX.h"
@@ -47,6 +48,7 @@
#include <algorithm>
#include <cstring>
#include <functional>
+#include <unordered_map>
using namespace clang;
using namespace sema;
@@ -136,6 +138,7 @@ bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_wchar_t:
@@ -747,7 +750,10 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
Diag(IILoc, IsTemplateName ? diag::err_no_member_template
: diag::err_typename_nested_not_found)
<< II << DC << SS->getRange();
- else if (isDependentScopeSpecifier(*SS)) {
+ else if (SS->isValid() && SS->getScopeRep()->containsErrors()) {
+ SuggestedType =
+ ActOnTypenameType(S, SourceLocation(), *SS, *II, IILoc).get();
+ } else if (isDependentScopeSpecifier(*SS)) {
unsigned DiagID = diag::err_typename_missing;
if (getLangOpts().MSVCCompat && isMicrosoftMissingTypename(SS, S))
DiagID = diag::ext_typename_missing;
@@ -924,7 +930,7 @@ Corrected:
return NameClassification::NonType(D);
}
- if (getLangOpts().CPlusPlus2a && SS.isEmpty() && NextToken.is(tok::less)) {
+ if (getLangOpts().CPlusPlus20 && SS.isEmpty() && NextToken.is(tok::less)) {
// In C++20 onwards, this could be an ADL-only call to a function
// template, and we're required to assume that this is a template name.
//
@@ -1067,7 +1073,7 @@ Corrected:
Result, /*AllowFunctionTemplates=*/true,
/*AllowDependent=*/false,
/*AllowNonTemplateFunctions*/ SS.isEmpty() &&
- getLangOpts().CPlusPlus2a))) {
+ getLangOpts().CPlusPlus20))) {
// C++ [temp.names]p3:
// After name lookup (3.4) finds that a name is a template-name or that
// an operator-function-id or a literal- operator-id refers to a set of
@@ -1153,6 +1159,10 @@ Corrected:
return ParsedType::make(T);
}
+ if (isa<ConceptDecl>(FirstDecl))
+ return NameClassification::Concept(
+ TemplateName(cast<TemplateDecl>(FirstDecl)));
+
// We can have a type template here if we're classifying a template argument.
if (isa<TemplateDecl>(FirstDecl) && !isa<FunctionTemplateDecl>(FirstDecl) &&
!isa<VarTemplateDecl>(FirstDecl))
@@ -1250,47 +1260,8 @@ Sema::getTemplateNameKindForDiagnostics(TemplateName Name) {
return TemplateNameKindForDiagnostics::DependentTemplate;
}
-// Determines the context to return to after temporarily entering a
-// context. This depends in an unnecessarily complicated way on the
-// exact ordering of callbacks from the parser.
-DeclContext *Sema::getContainingDC(DeclContext *DC) {
-
- // Functions defined inline within classes aren't parsed until we've
- // finished parsing the top-level class, so the top-level class is
- // the context we'll need to return to.
- // A Lambda call operator whose parent is a class must not be treated
- // as an inline member function. A Lambda can be used legally
- // either as an in-class member initializer or a default argument. These
- // are parsed once the class has been marked complete and so the containing
- // context would be the nested class (when the lambda is defined in one);
- // If the class is not complete, then the lambda is being used in an
- // ill-formed fashion (such as to specify the width of a bit-field, or
- // in an array-bound) - in which case we still want to return the
- // lexically containing DC (which could be a nested class).
- if (isa<FunctionDecl>(DC) && !isLambdaCallOperator(DC)) {
- DC = DC->getLexicalParent();
-
- // A function not defined within a class will always return to its
- // lexical context.
- if (!isa<CXXRecordDecl>(DC))
- return DC;
-
- // A C++ inline method/friend is parsed *after* the topmost class
- // it was declared in is fully parsed ("complete"); the topmost
- // class is the context we need to return to.
- while (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC->getLexicalParent()))
- DC = RD;
-
- // Return the declaration context of the topmost class the inline method is
- // declared in.
- return DC;
- }
-
- return DC->getLexicalParent();
-}
-
void Sema::PushDeclContext(Scope *S, DeclContext *DC) {
- assert(getContainingDC(DC) == CurContext &&
+ assert(DC->getLexicalParent() == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = DC;
S->setEntity(DC);
@@ -1299,7 +1270,7 @@ void Sema::PushDeclContext(Scope *S, DeclContext *DC) {
void Sema::PopDeclContext() {
assert(CurContext && "DeclContext imbalance!");
- CurContext = getContainingDC(CurContext);
+ CurContext = CurContext->getLexicalParent();
assert(CurContext && "Popped translation unit!");
}
@@ -1351,6 +1322,12 @@ void Sema::EnterDeclaratorContext(Scope *S, DeclContext *DC) {
CurContext = DC;
S->setEntity(DC);
+
+ if (S->getParent()->isTemplateParamScope()) {
+ // Also set the corresponding entities for all immediately-enclosing
+ // template parameter scopes.
+ EnterTemplatedContext(S->getParent(), DC);
+ }
}
void Sema::ExitDeclaratorContext(Scope *S) {
@@ -1366,6 +1343,49 @@ void Sema::ExitDeclaratorContext(Scope *S) {
// disappear.
}
+void Sema::EnterTemplatedContext(Scope *S, DeclContext *DC) {
+ assert(S->isTemplateParamScope() &&
+ "expected to be initializing a template parameter scope");
+
+ // C++20 [temp.local]p7:
+ // In the definition of a member of a class template that appears outside
+ // of the class template definition, the name of a member of the class
+ // template hides the name of a template-parameter of any enclosing class
+ // templates (but not a template-parameter of the member if the member is a
+ // class or function template).
+ // C++20 [temp.local]p9:
+ // In the definition of a class template or in the definition of a member
+ // of such a template that appears outside of the template definition, for
+ // each non-dependent base class (13.8.2.1), if the name of the base class
+ // or the name of a member of the base class is the same as the name of a
+ // template-parameter, the base class name or member name hides the
+ // template-parameter name (6.4.10).
+ //
+ // This means that a template parameter scope should be searched immediately
+ // after searching the DeclContext for which it is a template parameter
+ // scope. For example, for
+ // template<typename T> template<typename U> template<typename V>
+ // void N::A<T>::B<U>::f(...)
+ // we search V then B<U> (and base classes) then U then A<T> (and base
+ // classes) then T then N then ::.
+ unsigned ScopeDepth = getTemplateDepth(S);
+ for (; S && S->isTemplateParamScope(); S = S->getParent(), --ScopeDepth) {
+ DeclContext *SearchDCAfterScope = DC;
+ for (; DC; DC = DC->getLookupParent()) {
+ if (const TemplateParameterList *TPL =
+ cast<Decl>(DC)->getDescribedTemplateParams()) {
+ unsigned DCDepth = TPL->getDepth() + 1;
+ if (DCDepth > ScopeDepth)
+ continue;
+ if (ScopeDepth == DCDepth)
+ SearchDCAfterScope = DC = DC->getLookupParent();
+ break;
+ }
+ }
+ S->setLookupEntity(SearchDCAfterScope);
+ }
+}
+
void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
// We assume that the caller has already called
// ActOnReenterTemplateScope so getTemplatedDecl() works.
@@ -2586,11 +2606,15 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
AMK == Sema::AMK_ProtocolImplementation))
NewAttr = nullptr;
else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
- NewAttr = S.mergeUuidAttr(D, *UA, UA->getGuid());
+ NewAttr = S.mergeUuidAttr(D, *UA, UA->getGuid(), UA->getGuidDecl());
else if (const auto *SLHA = dyn_cast<SpeculativeLoadHardeningAttr>(Attr))
NewAttr = S.mergeSpeculativeLoadHardeningAttr(D, *SLHA);
else if (const auto *SLHA = dyn_cast<NoSpeculativeLoadHardeningAttr>(Attr))
NewAttr = S.mergeNoSpeculativeLoadHardeningAttr(D, *SLHA);
+ else if (const auto *IMA = dyn_cast<WebAssemblyImportModuleAttr>(Attr))
+ NewAttr = S.mergeImportModuleAttr(D, *IMA);
+ else if (const auto *INA = dyn_cast<WebAssemblyImportNameAttr>(Attr))
+ NewAttr = S.mergeImportNameAttr(D, *INA);
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -2707,6 +2731,18 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
--E;
continue;
}
+ } else if (isa<LoaderUninitializedAttr>(NewAttribute)) {
+ // If there is a C definition followed by a redeclaration with this
+ // attribute then there are two different definitions. In C++, prefer the
+ // standard diagnostics.
+ if (!S.getLangOpts().CPlusPlus) {
+ S.Diag(NewAttribute->getLocation(),
+ diag::err_loader_uninitialized_redeclaration);
+ S.Diag(Def->getLocation(), diag::note_previous_definition);
+ NewAttributes.erase(NewAttributes.begin() + I);
+ --E;
+ continue;
+ }
} else if (isa<SelectAnyAttr>(NewAttribute) &&
cast<VarDecl>(New)->isInline() &&
!cast<VarDecl>(New)->isInlineSpecified()) {
@@ -2716,6 +2752,11 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
// honored it.
++I;
continue;
+ } else if (isa<OMPDeclareVariantAttr>(NewAttribute)) {
+ // We allow to add OMP[Begin]DeclareVariantAttr to be added to
+ // declarations after defintions.
+ ++I;
+ continue;
}
S.Diag(NewAttribute->getLocation(),
@@ -2736,23 +2777,21 @@ static void diagnoseMissingConstinit(Sema &S, const VarDecl *InitDecl,
// enough of the attribute list spelling information to extract that without
// heroics.
std::string SuitableSpelling;
- if (S.getLangOpts().CPlusPlus2a)
- SuitableSpelling =
- S.PP.getLastMacroWithSpelling(InsertLoc, {tok::kw_constinit});
+ if (S.getLangOpts().CPlusPlus20)
+ SuitableSpelling = std::string(
+ S.PP.getLastMacroWithSpelling(InsertLoc, {tok::kw_constinit}));
if (SuitableSpelling.empty() && S.getLangOpts().CPlusPlus11)
- SuitableSpelling = S.PP.getLastMacroWithSpelling(
- InsertLoc,
- {tok::l_square, tok::l_square, S.PP.getIdentifierInfo("clang"),
- tok::coloncolon,
- S.PP.getIdentifierInfo("require_constant_initialization"),
- tok::r_square, tok::r_square});
+ SuitableSpelling = std::string(S.PP.getLastMacroWithSpelling(
+ InsertLoc, {tok::l_square, tok::l_square,
+ S.PP.getIdentifierInfo("clang"), tok::coloncolon,
+ S.PP.getIdentifierInfo("require_constant_initialization"),
+ tok::r_square, tok::r_square}));
if (SuitableSpelling.empty())
- SuitableSpelling = S.PP.getLastMacroWithSpelling(
- InsertLoc,
- {tok::kw___attribute, tok::l_paren, tok::r_paren,
- S.PP.getIdentifierInfo("require_constant_initialization"),
- tok::r_paren, tok::r_paren});
- if (SuitableSpelling.empty() && S.getLangOpts().CPlusPlus2a)
+ SuitableSpelling = std::string(S.PP.getLastMacroWithSpelling(
+ InsertLoc, {tok::kw___attribute, tok::l_paren, tok::r_paren,
+ S.PP.getIdentifierInfo("require_constant_initialization"),
+ tok::r_paren, tok::r_paren}));
+ if (SuitableSpelling.empty() && S.getLangOpts().CPlusPlus20)
SuitableSpelling = "constinit";
if (SuitableSpelling.empty() && S.getLangOpts().CPlusPlus11)
SuitableSpelling = "[[clang::require_constant_initialization]]";
@@ -3884,11 +3923,11 @@ void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old,
if (!NewArray->isIncompleteArrayType() && !NewArray->isDependentType()) {
for (VarDecl *PrevVD = Old->getMostRecentDecl(); PrevVD;
PrevVD = PrevVD->getPreviousDecl()) {
- const ArrayType *PrevVDTy = Context.getAsArrayType(PrevVD->getType());
+ QualType PrevVDTy = PrevVD->getType();
if (PrevVDTy->isIncompleteArrayType() || PrevVDTy->isDependentType())
continue;
- if (!Context.hasSameType(NewArray, PrevVDTy))
+ if (!Context.hasSameType(New->getType(), PrevVDTy))
return diagnoseVarDeclTypeMismatch(*this, New, PrevVD);
}
}
@@ -4344,6 +4383,87 @@ void Sema::handleTagNumbering(const TagDecl *Tag, Scope *TagScope) {
}
}
+namespace {
+struct NonCLikeKind {
+ enum {
+ None,
+ BaseClass,
+ DefaultMemberInit,
+ Lambda,
+ Friend,
+ OtherMember,
+ Invalid,
+ } Kind = None;
+ SourceRange Range;
+
+ explicit operator bool() { return Kind != None; }
+};
+}
+
+/// Determine whether a class is C-like, according to the rules of C++
+/// [dcl.typedef] for anonymous classes with typedef names for linkage.
+static NonCLikeKind getNonCLikeKindForAnonymousStruct(const CXXRecordDecl *RD) {
+ if (RD->isInvalidDecl())
+ return {NonCLikeKind::Invalid, {}};
+
+ // C++ [dcl.typedef]p9: [P1766R1]
+ // An unnamed class with a typedef name for linkage purposes shall not
+ //
+ // -- have any base classes
+ if (RD->getNumBases())
+ return {NonCLikeKind::BaseClass,
+ SourceRange(RD->bases_begin()->getBeginLoc(),
+ RD->bases_end()[-1].getEndLoc())};
+ bool Invalid = false;
+ for (Decl *D : RD->decls()) {
+ // Don't complain about things we already diagnosed.
+ if (D->isInvalidDecl()) {
+ Invalid = true;
+ continue;
+ }
+
+ // -- have any [...] default member initializers
+ if (auto *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->hasInClassInitializer()) {
+ auto *Init = FD->getInClassInitializer();
+ return {NonCLikeKind::DefaultMemberInit,
+ Init ? Init->getSourceRange() : D->getSourceRange()};
+ }
+ continue;
+ }
+
+ // FIXME: We don't allow friend declarations. This violates the wording of
+ // P1766, but not the intent.
+ if (isa<FriendDecl>(D))
+ return {NonCLikeKind::Friend, D->getSourceRange()};
+
+ // -- declare any members other than non-static data members, member
+ // enumerations, or member classes,
+ if (isa<StaticAssertDecl>(D) || isa<IndirectFieldDecl>(D) ||
+ isa<EnumDecl>(D))
+ continue;
+ auto *MemberRD = dyn_cast<CXXRecordDecl>(D);
+ if (!MemberRD) {
+ if (D->isImplicit())
+ continue;
+ return {NonCLikeKind::OtherMember, D->getSourceRange()};
+ }
+
+ // -- contain a lambda-expression,
+ if (MemberRD->isLambda())
+ return {NonCLikeKind::Lambda, MemberRD->getSourceRange()};
+
+ // and all member classes shall also satisfy these requirements
+ // (recursively).
+ if (MemberRD->isThisDeclarationADefinition()) {
+ if (auto Kind = getNonCLikeKindForAnonymousStruct(MemberRD))
+ return Kind;
+ }
+ }
+
+ return {Invalid ? NonCLikeKind::Invalid : NonCLikeKind::None, {}};
+}
+
void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD) {
if (TagFromDeclSpec->isInvalidDecl())
@@ -4364,27 +4484,51 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
return;
}
- // If we've already computed linkage for the anonymous tag, then
- // adding a typedef name for the anonymous decl can change that
- // linkage, which might be a serious problem. Diagnose this as
- // unsupported and ignore the typedef name. TODO: we should
- // pursue this as a language defect and establish a formal rule
- // for how to handle it.
- if (TagFromDeclSpec->hasLinkageBeenComputed()) {
- Diag(NewTD->getLocation(), diag::err_typedef_changes_linkage);
+ // C++ [dcl.typedef]p9: [P1766R1, applied as DR]
+ // An unnamed class with a typedef name for linkage purposes shall [be
+ // C-like].
+ //
+ // FIXME: Also diagnose if we've already computed the linkage. That ideally
+ // shouldn't happen, but there are constructs that the language rule doesn't
+ // disallow for which we can't reasonably avoid computing linkage early.
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TagFromDeclSpec);
+ NonCLikeKind NonCLike = RD ? getNonCLikeKindForAnonymousStruct(RD)
+ : NonCLikeKind();
+ bool ChangesLinkage = TagFromDeclSpec->hasLinkageBeenComputed();
+ if (NonCLike || ChangesLinkage) {
+ if (NonCLike.Kind == NonCLikeKind::Invalid)
+ return;
+
+ unsigned DiagID = diag::ext_non_c_like_anon_struct_in_typedef;
+ if (ChangesLinkage) {
+ // If the linkage changes, we can't accept this as an extension.
+ if (NonCLike.Kind == NonCLikeKind::None)
+ DiagID = diag::err_typedef_changes_linkage;
+ else
+ DiagID = diag::err_non_c_like_anon_struct_in_typedef;
+ }
- SourceLocation tagLoc = TagFromDeclSpec->getInnerLocStart();
- tagLoc = getLocForEndOfToken(tagLoc);
+ SourceLocation FixitLoc =
+ getLocForEndOfToken(TagFromDeclSpec->getInnerLocStart());
+ llvm::SmallString<40> TextToInsert;
+ TextToInsert += ' ';
+ TextToInsert += NewTD->getIdentifier()->getName();
- llvm::SmallString<40> textToInsert;
- textToInsert += ' ';
- textToInsert += NewTD->getIdentifier()->getName();
- Diag(tagLoc, diag::note_typedef_changes_linkage)
- << FixItHint::CreateInsertion(tagLoc, textToInsert);
- return;
+ Diag(FixitLoc, DiagID)
+ << isa<TypeAliasDecl>(NewTD)
+ << FixItHint::CreateInsertion(FixitLoc, TextToInsert);
+ if (NonCLike.Kind != NonCLikeKind::None) {
+ Diag(NonCLike.Range.getBegin(), diag::note_non_c_like_anon_struct)
+ << NonCLike.Kind - 1 << NonCLike.Range;
+ }
+ Diag(NewTD->getLocation(), diag::note_typedef_for_linkage_here)
+ << NewTD << isa<TypeAliasDecl>(NewTD);
+
+ if (ChangesLinkage)
+ return;
}
- // Otherwise, set this is the anon-decl typedef for the tag.
+ // Otherwise, set this as the anon-decl typedef for the tag.
TagFromDeclSpec->setTypedefNameForAnonDecl(NewTD);
}
@@ -4915,6 +5059,10 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// define non-static data members. [Note: nested types and
// functions cannot be declared within an anonymous union. ]
for (auto *Mem : Record->decls()) {
+ // Ignore invalid declarations; we already diagnosed them.
+ if (Mem->isInvalidDecl())
+ continue;
+
if (auto *FD = dyn_cast<FieldDecl>(Mem)) {
// C++ [class.union]p3:
// An anonymous union shall not have private or protected
@@ -5138,8 +5286,8 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
Chain.push_back(Anon);
RecordDecl *RecordDef = Record->getDefinition();
- if (RequireCompleteType(Anon->getLocation(), RecTy,
- diag::err_field_incomplete) ||
+ if (RequireCompleteSizedType(Anon->getLocation(), RecTy,
+ diag::err_field_incomplete_or_sizeless) ||
InjectAnonymousStructOrUnionMembers(*this, S, CurContext, RecordDef,
AS_none, Chain)) {
Anon->setInvalidDecl();
@@ -6142,6 +6290,8 @@ bool Sema::inferObjCARCLifetime(ValueDecl *decl) {
void Sema::deduceOpenCLAddressSpace(ValueDecl *Decl) {
if (Decl->getType().hasAddressSpace())
return;
+ if (Decl->getType()->isDependentType())
+ return;
if (VarDecl *Var = dyn_cast<VarDecl>(Decl)) {
QualType Type = Var->getType();
if (Type->isSamplerT() || Type->isVoidType())
@@ -6468,6 +6618,8 @@ static bool shouldConsiderLinkage(const VarDecl *VD) {
return true;
if (DC->isRecord())
return false;
+ if (isa<RequiresExprBodyDecl>(DC))
+ return false;
llvm_unreachable("Unexpected context");
}
@@ -6753,28 +6905,49 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (SC == SC_Static && CurContext->isRecord()) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
- if (RD->isLocalClass())
+ // Walk up the enclosing DeclContexts to check for any that are
+ // incompatible with static data members.
+ const DeclContext *FunctionOrMethod = nullptr;
+ const CXXRecordDecl *AnonStruct = nullptr;
+ for (DeclContext *Ctxt = DC; Ctxt; Ctxt = Ctxt->getParent()) {
+ if (Ctxt->isFunctionOrMethod()) {
+ FunctionOrMethod = Ctxt;
+ break;
+ }
+ const CXXRecordDecl *ParentDecl = dyn_cast<CXXRecordDecl>(Ctxt);
+ if (ParentDecl && !ParentDecl->getDeclName()) {
+ AnonStruct = ParentDecl;
+ break;
+ }
+ }
+ if (FunctionOrMethod) {
+ // C++ [class.static.data]p5: A local class shall not have static data
+ // members.
Diag(D.getIdentifierLoc(),
diag::err_static_data_member_not_allowed_in_local_class)
- << Name << RD->getDeclName();
-
- // C++98 [class.union]p1: If a union contains a static data member,
- // the program is ill-formed. C++11 drops this restriction.
- if (RD->isUnion())
+ << Name << RD->getDeclName() << RD->getTagKind();
+ } else if (AnonStruct) {
+ // C++ [class.static.data]p4: Unnamed classes and classes contained
+ // directly or indirectly within unnamed classes shall not contain
+ // static data members.
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_anon_struct)
+ << Name << AnonStruct->getTagKind();
+ Invalid = true;
+ } else if (RD->isUnion()) {
+ // C++98 [class.union]p1: If a union contains a static data member,
+ // the program is ill-formed. C++11 drops this restriction.
Diag(D.getIdentifierLoc(),
getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_static_data_member_in_union
: diag::ext_static_data_member_in_union) << Name;
- // We conservatively disallow static data members in anonymous structs.
- else if (!RD->getDeclName())
- Diag(D.getIdentifierLoc(),
- diag::err_static_data_member_not_allowed_in_anon_struct)
- << Name << RD->isUnion();
+ }
}
}
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
+ bool InvalidScope = false;
TemplateParams = MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
@@ -6782,7 +6955,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
? D.getName().TemplateId
: nullptr,
TemplateParamLists,
- /*never a friend*/ false, IsMemberSpecialization, Invalid);
+ /*never a friend*/ false, IsMemberSpecialization, InvalidScope);
+ Invalid |= InvalidScope;
if (TemplateParams) {
if (!TemplateParams->size() &&
@@ -6918,7 +7092,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
diag::err_thread_non_global)
<< DeclSpec::getSpecifierName(TSCS);
else if (!Context.getTargetInfo().isTLSSupported()) {
- if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ getLangOpts().SYCLIsDevice) {
// Postpone error emission until we've collected attributes required to
// figure out whether it's a host or device variable and whether the
// error should be ignored.
@@ -6946,6 +7121,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
case CSK_constexpr:
NewVD->setConstexpr(true);
+ MaybeAddCUDAConstantAttr(NewVD);
// C++1z [dcl.spec.constexpr]p1:
// A static data member declared with the constexpr specifier is
// implicitly an inline variable.
@@ -7019,13 +7195,18 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
- if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ getLangOpts().SYCLIsDevice) {
if (EmitTLSUnsupportedError &&
((getLangOpts().CUDA && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) ||
(getLangOpts().OpenMPIsDevice &&
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(NewVD))))
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
+
+ if (EmitTLSUnsupportedError &&
+ (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)))
+ targetDiag(D.getIdentifierLoc(), diag::err_thread_unsupported);
// CUDA B.2.5: "__shared__ and __constant__ variables have implied static
// storage [duration]."
if (SC == SC_None && S->getFnParent() != nullptr &&
@@ -7680,6 +7861,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
if (NewVD->isFileVarDecl() || NewVD->isStaticLocal() ||
NewVD->hasExternalStorage()) {
if (!T->isSamplerT() &&
+ !T->isDependentType() &&
!(T.getAddressSpace() == LangAS::opencl_constant ||
(T.getAddressSpace() == LangAS::opencl_global &&
(getLangOpts().OpenCLVersion == 200 ||
@@ -7822,6 +8004,12 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
+ if (!NewVD->hasLocalStorage() && T->isSizelessType()) {
+ Diag(NewVD->getLocation(), diag::err_sizeless_nonlocal) << T;
+ NewVD->setInvalidDecl();
+ return;
+ }
+
if (isVM && NewVD->hasAttr<BlocksAttr>()) {
Diag(NewVD->getLocation(), diag::err_block_on_vm);
NewVD->setInvalidDecl();
@@ -7909,30 +8097,8 @@ struct FindOverriddenMethod {
return false;
}
};
-
-enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted };
} // end anonymous namespace
-/// Report an error regarding overriding, along with any relevant
-/// overridden methods.
-///
-/// \param DiagID the primary error to report.
-/// \param MD the overriding method.
-/// \param OEK which overrides to include as notes.
-static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD,
- OverrideErrorKind OEK = OEK_All) {
- S.Diag(MD->getLocation(), DiagID) << MD->getDeclName();
- for (const CXXMethodDecl *O : MD->overridden_methods()) {
- // This check (& the OEK parameter) could be replaced by a predicate, but
- // without lambdas that would be overkill. This is still nicer than writing
- // out the diag loop 3 times.
- if ((OEK == OEK_All) ||
- (OEK == OEK_NonDeleted && !O->isDeleted()) ||
- (OEK == OEK_Deleted && O->isDeleted()))
- S.Diag(O->getLocation(), diag::note_overridden_virtual_function);
- }
-}
-
/// AddOverriddenMethods - See if a method overrides any in the base classes,
/// and if so, check that it's a valid override and remember it.
bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
@@ -7941,8 +8107,6 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
FindOverriddenMethod FOM;
FOM.Method = MD;
FOM.S = this;
- bool hasDeletedOverridenMethods = false;
- bool hasNonDeletedOverridenMethods = false;
bool AddedAny = false;
if (DC->lookupInBases(FOM, Paths)) {
for (auto *I : Paths.found_decls()) {
@@ -7952,21 +8116,12 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
!CheckOverridingFunctionAttributes(MD, OldMD) &&
!CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
!CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
- hasDeletedOverridenMethods |= OldMD->isDeleted();
- hasNonDeletedOverridenMethods |= !OldMD->isDeleted();
AddedAny = true;
}
}
}
}
- if (hasDeletedOverridenMethods && !MD->isDeleted()) {
- ReportOverrides(*this, diag::err_non_deleted_override, MD, OEK_Deleted);
- }
- if (hasNonDeletedOverridenMethods && MD->isDeleted()) {
- ReportOverrides(*this, diag::err_deleted_override, MD, OEK_NonDeleted);
- }
-
return AddedAny;
}
@@ -8654,11 +8809,24 @@ static Scope *getTagInjectionScope(Scope *S, const LangOptions &LangOpts) {
NamedDecl*
Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo, LookupResult &Previous,
- MultiTemplateParamsArg TemplateParamLists,
+ MultiTemplateParamsArg TemplateParamListsRef,
bool &AddToScope) {
QualType R = TInfo->getType();
assert(R->isFunctionType());
+ if (R.getCanonicalType()->castAs<FunctionType>()->getCmseNSCallAttr())
+ Diag(D.getIdentifierLoc(), diag::err_function_decl_cmse_ns_call);
+
+ SmallVector<TemplateParameterList *, 4> TemplateParamLists;
+ for (TemplateParameterList *TPL : TemplateParamListsRef)
+ TemplateParamLists.push_back(TPL);
+ if (TemplateParameterList *Invented = D.getInventedTemplateParameterList()) {
+ if (!TemplateParamLists.empty() &&
+ Invented->getDepth() == TemplateParamLists.back()->getDepth())
+ TemplateParamLists.back() = Invented;
+ else
+ TemplateParamLists.push_back(Invented);
+ }
// TODO: consider using NameInfo for diagnostic.
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
@@ -8738,15 +8906,16 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
bool Invalid = false;
- if (TemplateParameterList *TemplateParams =
- MatchTemplateParametersToScopeSpecifier(
- D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
- D.getCXXScopeSpec(),
- D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
- ? D.getName().TemplateId
- : nullptr,
- TemplateParamLists, isFriend, isMemberSpecialization,
- Invalid)) {
+ TemplateParameterList *TemplateParams =
+ MatchTemplateParametersToScopeSpecifier(
+ D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
+ D.getCXXScopeSpec(),
+ D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
+ ? D.getName().TemplateId
+ : nullptr,
+ TemplateParamLists, isFriend, isMemberSpecialization,
+ Invalid);
+ if (TemplateParams) {
if (TemplateParams->size() > 0) {
// This is a function template
@@ -8779,7 +8948,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// For source fidelity, store the other template param lists.
if (TemplateParamLists.size() > 1) {
NewFD->setTemplateParameterListsInfo(Context,
- TemplateParamLists.drop_back(1));
+ ArrayRef<TemplateParameterList *>(TemplateParamLists)
+ .drop_back(1));
}
} else {
// This is a function template specialization.
@@ -8914,9 +9084,24 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// C++11 [dcl.constexpr]p3: functions declared constexpr are required to
// be either constructors or to return a literal type. Therefore,
// destructors cannot be declared constexpr.
- if (isa<CXXDestructorDecl>(NewFD) && !getLangOpts().CPlusPlus2a) {
+ if (isa<CXXDestructorDecl>(NewFD) &&
+ (!getLangOpts().CPlusPlus20 || ConstexprKind == CSK_consteval)) {
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor)
<< ConstexprKind;
+ NewFD->setConstexprKind(getLangOpts().CPlusPlus20 ? CSK_unspecified : CSK_constexpr);
+ }
+ // C++20 [dcl.constexpr]p2: An allocation function, or a
+ // deallocation function shall not be declared with the consteval
+ // specifier.
+ if (ConstexprKind == CSK_consteval &&
+ (NewFD->getOverloadedOperator() == OO_New ||
+ NewFD->getOverloadedOperator() == OO_Array_New ||
+ NewFD->getOverloadedOperator() == OO_Delete ||
+ NewFD->getOverloadedOperator() == OO_Array_Delete)) {
+ Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_invalid_consteval_decl_kind)
+ << NewFD;
+ NewFD->setConstexprKind(CSK_constexpr);
}
}
@@ -8945,8 +9130,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
// If a function is defined as defaulted or deleted, mark it as such now.
- // FIXME: Does this ever happen? ActOnStartOfFunctionDef forces the function
- // definition kind to FDK_Definition.
+ // We'll do the relevant checks on defaulted / deleted functions later.
switch (D.getFunctionDefinitionKind()) {
case FDK_Declaration:
case FDK_Definition:
@@ -9808,6 +9992,18 @@ static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
return false;
}
+// Provide a white-list of attributes that are allowed to be combined with
+// multiversion functions.
+static bool AttrCompatibleWithMultiVersion(attr::Kind Kind,
+ MultiVersionKind MVType) {
+ switch (Kind) {
+ default:
+ return false;
+ case attr::Used:
+ return MVType == MultiVersionKind::Target;
+ }
+}
+
static bool HasNonMultiVersionAttributes(const FunctionDecl *FD,
MultiVersionKind MVType) {
for (const Attr *A : FD->attrs()) {
@@ -9823,7 +10019,9 @@ static bool HasNonMultiVersionAttributes(const FunctionDecl *FD,
return true;
break;
default:
- return true;
+ if (!AttrCompatibleWithMultiVersion(A->getKind(), MVType))
+ return true;
+ break;
}
}
return false;
@@ -10562,9 +10760,6 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
return Redeclaration;
}
}
- } else if (CXXConversionDecl *Conversion
- = dyn_cast<CXXConversionDecl>(NewFD)) {
- ActOnConversionDeclarator(Conversion);
} else if (auto *Guide = dyn_cast<CXXDeductionGuideDecl>(NewFD)) {
if (auto *TD = Guide->getDescribedFunctionTemplate())
CheckDeductionGuideTemplate(TD);
@@ -10581,12 +10776,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (!Method->isFunctionTemplateSpecialization() &&
!Method->getDescribedFunctionTemplate() &&
Method->isCanonicalDecl()) {
- if (AddOverriddenMethods(Method->getParent(), Method)) {
- // If the function was marked as "static", we have a problem.
- if (NewFD->getStorageClass() == SC_Static) {
- ReportOverrides(*this, diag::err_static_overrides_virtual, Method);
- }
- }
+ AddOverriddenMethods(Method->getParent(), Method);
}
if (Method->isVirtual() && NewFD->getTrailingRequiresClause())
// C++2a [class.virtual]p6
@@ -10598,6 +10788,9 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
checkThisInStaticMemberFunctionType(Method);
}
+ if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(NewFD))
+ ActOnConversionDeclarator(Conversion);
+
// Extra checking for C++ overloaded operators (C++ [over.oper]).
if (NewFD->isOverloadedOperator() &&
CheckOverloadedOperatorDeclaration(NewFD)) {
@@ -11363,6 +11556,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init) {
+ assert(!Init || !Init->containsErrors());
QualType DeducedType = deduceVarTypeFromInitializer(
VDecl, VDecl->getDeclName(), VDecl->getType(), VDecl->getTypeSourceInfo(),
VDecl->getSourceRange(), DirectInit, Init);
@@ -11396,6 +11590,9 @@ bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
void Sema::checkNonTrivialCUnionInInitializer(const Expr *Init,
SourceLocation Loc) {
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(Init))
+ Init = EWC->getSubExpr();
+
if (auto *CE = dyn_cast<ConstantExpr>(Init))
Init = CE->getSubExpr();
@@ -11698,7 +11895,15 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// TypoExpr.
ExprResult Res = CorrectDelayedTyposInExpr(Init, VDecl);
if (!Res.isUsable()) {
+ // There are unresolved typos in Init, just drop them.
+ // FIXME: improve the recovery strategy to preserve the Init.
+ RealDecl->setInvalidDecl();
+ return;
+ }
+ if (Res.get()->containsErrors()) {
+ // Invalidate the decl as we don't know the type for recovery-expr yet.
RealDecl->setInvalidDecl();
+ VDecl->setInit(Res.get());
return;
}
Init = Res.get();
@@ -11790,6 +11995,13 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
return;
}
+ // The LoaderUninitialized attribute acts as a definition (of undef).
+ if (VDecl->hasAttr<LoaderUninitializedAttr>()) {
+ Diag(VDecl->getLocation(), diag::err_loader_uninitialized_cant_init);
+ VDecl->setInvalidDecl();
+ return;
+ }
+
// Get the decls type and save a reference for later, since
// CheckInitializerTypes may change it.
QualType DclT = VDecl->getType(), SavT = DclT;
@@ -11821,7 +12033,8 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// Try to correct any TypoExprs in the initialization arguments.
for (size_t Idx = 0; Idx < Args.size(); ++Idx) {
ExprResult Res = CorrectDelayedTyposInExpr(
- Args[Idx], VDecl, [this, Entity, Kind](Expr *E) {
+ Args[Idx], VDecl, /*RecoverUncorrectedTypos=*/false,
+ [this, Entity, Kind](Expr *E) {
InitializationSequence Init(*this, Entity, Kind, MultiExprArg(E));
return Init.Failed() ? ExprError() : E;
});
@@ -11839,7 +12052,12 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
/*TreatUnavailableAsInvalid=*/false);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
if (Result.isInvalid()) {
- VDecl->setInvalidDecl();
+ // If the provied initializer fails to initialize the var decl,
+ // we attach a recovery expr for better recovery.
+ auto RecoveryExpr =
+ CreateRecoveryExpr(Init->getBeginLoc(), Init->getEndLoc(), Args);
+ if (RecoveryExpr.get())
+ VDecl->setInit(RecoveryExpr.get());
return;
}
@@ -12100,6 +12318,8 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
VDecl->setInitStyle(VarDecl::ListInit);
}
+ if (LangOpts.OpenMP && VDecl->isFileVarDecl())
+ DeclsToCheckForDeferredDiags.push_back(VDecl);
CheckCompleteVariableDeclaration(VDecl);
}
@@ -12120,7 +12340,7 @@ void Sema::ActOnInitializerError(Decl *D) {
BD->setInvalidDecl();
// Auto types are meaningless if we can't make sense of the initializer.
- if (ParsingInitForAutoVars.count(D)) {
+ if (VD->getType()->isUndeducedType()) {
D->setInvalidDecl();
return;
}
@@ -12203,6 +12423,22 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
return;
}
+ if (!Var->isInvalidDecl() && RealDecl->hasAttr<LoaderUninitializedAttr>()) {
+ if (CXXRecordDecl *RD = Var->getType()->getAsCXXRecordDecl()) {
+ if (!RD->hasTrivialDefaultConstructor()) {
+ Diag(Var->getLocation(), diag::err_loader_uninitialized_trivial_ctor);
+ Var->setInvalidDecl();
+ return;
+ }
+ }
+ if (Var->getStorageClass() == SC_Extern) {
+ Diag(Var->getLocation(), diag::err_loader_uninitialized_extern_decl)
+ << Var;
+ Var->setInvalidDecl();
+ return;
+ }
+ }
+
VarDecl::DefinitionKind DefKind = Var->isThisDeclarationADefinition();
if (!Var->isInvalidDecl() && DefKind != VarDecl::DeclarationOnly &&
Var->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion())
@@ -12260,9 +12496,9 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
if (!Var->isInvalidDecl()) {
if (const IncompleteArrayType *ArrayT
= Context.getAsIncompleteArrayType(Type)) {
- if (RequireCompleteType(Var->getLocation(),
- ArrayT->getElementType(),
- diag::err_illegal_decl_array_incomplete_type))
+ if (RequireCompleteSizedType(
+ Var->getLocation(), ArrayT->getElementType(),
+ diag::err_array_incomplete_or_sizeless_type))
Var->setInvalidDecl();
} else if (Var->getStorageClass() == SC_Static) {
// C99 6.9.2p3: If the declaration of an identifier for an object is
@@ -12378,12 +12614,18 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
InitializationSequence InitSeq(*this, Entity, Kind, None);
ExprResult Init = InitSeq.Perform(*this, Entity, Kind, None);
- if (Init.isInvalid())
- Var->setInvalidDecl();
- else if (Init.get()) {
+
+ if (Init.get()) {
Var->setInit(MaybeCreateExprWithCleanups(Init.get()));
// This is important for template substitution.
Var->setInitStyle(VarDecl::CallInit);
+ } else if (Init.isInvalid()) {
+ // If default-init fails, attach a recovery-expr initializer to track
+ // that initialization was attempted and failed.
+ auto RecoveryExpr =
+ CreateRecoveryExpr(Var->getLocation(), Var->getLocation(), {});
+ if (RecoveryExpr.get())
+ Var->setInit(RecoveryExpr.get());
}
CheckCompleteVariableDeclaration(Var);
@@ -12507,6 +12749,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
var->getDeclContext()->getRedeclContext()->isFileContext() &&
var->isExternallyVisible() && var->hasLinkage() &&
!var->isInline() && !var->getDescribedVarTemplate() &&
+ !isa<VarTemplatePartialSpecializationDecl>(var) &&
!isTemplateInstantiation(var->getTemplateSpecializationKind()) &&
!getDiagnostics().isIgnored(diag::warn_missing_variable_declarations,
var->getLocation())) {
@@ -12559,7 +12802,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (GlobalStorage && var->isThisDeclarationADefinition() &&
!inTemplateInstantiation()) {
PragmaStack<StringLiteral *> *Stack = nullptr;
- int SectionFlags = ASTContext::PSF_Implicit | ASTContext::PSF_Read;
+ int SectionFlags = ASTContext::PSF_Read;
if (var->getType().isConstQualified())
Stack = &ConstSegStack;
else if (!var->getInit()) {
@@ -12569,14 +12812,19 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
Stack = &DataSegStack;
SectionFlags |= ASTContext::PSF_Write;
}
- if (Stack->CurrentValue && !var->hasAttr<SectionAttr>())
+ if (const SectionAttr *SA = var->getAttr<SectionAttr>()) {
+ if (SA->getSyntax() == AttributeCommonInfo::AS_Declspec)
+ SectionFlags |= ASTContext::PSF_Implicit;
+ UnifySection(SA->getName(), SectionFlags, var);
+ } else if (Stack->CurrentValue) {
+ SectionFlags |= ASTContext::PSF_Implicit;
+ auto SectionName = Stack->CurrentValue->getString();
var->addAttr(SectionAttr::CreateImplicit(
- Context, Stack->CurrentValue->getString(),
- Stack->CurrentPragmaLocation, AttributeCommonInfo::AS_Pragma,
- SectionAttr::Declspec_allocate));
- if (const SectionAttr *SA = var->getAttr<SectionAttr>())
- if (UnifySection(SA->getName(), SectionFlags, var))
+ Context, SectionName, Stack->CurrentPragmaLocation,
+ AttributeCommonInfo::AS_Pragma, SectionAttr::Declspec_allocate));
+ if (UnifySection(SectionName, SectionFlags, var))
var->dropAttr<SectionAttr>();
+ }
// Apply the init_seg attribute if this has an initializer. If the
// initializer turns out to not be dynamic, we'll end up ignoring this
@@ -13013,13 +13261,15 @@ Sema::BuildDeclaratorGroup(MutableArrayRef<Decl *> Group) {
DeducedDecl = D;
} else if (!Context.hasSameType(DT->getDeducedType(), Deduced)) {
auto *AT = dyn_cast<AutoType>(DT);
- Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
- diag::err_auto_different_deductions)
- << (AT ? (unsigned)AT->getKeyword() : 3)
- << Deduced << DeducedDecl->getDeclName()
- << DT->getDeducedType() << D->getDeclName()
- << DeducedDecl->getInit()->getSourceRange()
- << D->getInit()->getSourceRange();
+ auto Dia = Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
+ diag::err_auto_different_deductions)
+ << (AT ? (unsigned)AT->getKeyword() : 3) << Deduced
+ << DeducedDecl->getDeclName() << DT->getDeducedType()
+ << D->getDeclName();
+ if (DeducedDecl->hasInit())
+ Dia << DeducedDecl->getInit()->getSourceRange();
+ if (D->getInit())
+ Dia << D->getInit()->getSourceRange();
D->setInvalidDecl();
break;
}
@@ -13398,9 +13648,28 @@ Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
assert(D.isFunctionDeclarator() && "Not a function declarator!");
Scope *ParentScope = FnBodyScope->getParent();
+ // Check if we are in an `omp begin/end declare variant` scope. If we are, and
+ // we define a non-templated function definition, we will create a declaration
+ // instead (=BaseFD), and emit the definition with a mangled name afterwards.
+ // The base function declaration will have the equivalent of an `omp declare
+ // variant` annotation which specifies the mangled definition as a
+ // specialization function under the OpenMP context defined as part of the
+ // `omp begin declare variant`.
+ FunctionDecl *BaseFD = nullptr;
+ if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope() &&
+ TemplateParameterLists.empty())
+ BaseFD = ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
+ ParentScope, D);
+
D.setFunctionDefinitionKind(FDK_Definition);
Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists);
- return ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
+ Decl *Dcl = ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
+
+ if (BaseFD)
+ ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
+ cast<FunctionDecl>(Dcl), BaseFD);
+
+ return Dcl;
}
void Sema::ActOnFinishInlineFunctionDef(FunctionDecl *D) {
@@ -13593,13 +13862,12 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
VarDecl *VD = C.getCapturedVar();
if (VD->isInitCapture())
S.CurrentInstantiationScope->InstantiatedLocal(VD, VD);
- QualType CaptureType = VD->getType();
const bool ByRef = C.getCaptureKind() == LCK_ByRef;
LSI->addCapture(VD, /*IsBlock*/false, ByRef,
/*RefersToEnclosingVariableOrCapture*/true, C.getLocation(),
/*EllipsisLoc*/C.isPackExpansion()
? C.getEllipsisLoc() : SourceLocation(),
- CaptureType, /*Invalid*/false);
+ I->getType(), /*Invalid*/false);
} else if (C.capturesThis()) {
LSI->addThisCapture(/*Nested*/ false, C.getLocation(), I->getType(),
@@ -13632,7 +13900,9 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
// Do not push if it is a lambda because one is already pushed when building
// the lambda in ActOnStartOfLambdaDefinition().
if (!isLambdaCallOperator(FD))
- PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+ PushExpressionEvaluationContext(
+ FD->isConsteval() ? ExpressionEvaluationContext::ConstantEvaluated
+ : ExprEvalContexts.back().Context);
// Check for defining attributes before the check for redefinition.
if (const auto *Attr = FD->getAttr<AliasAttr>()) {
@@ -13996,11 +14266,48 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
: FixItHint{});
}
} else {
+ // Returns true if the token beginning at this Loc is `const`.
+ auto isLocAtConst = [&](SourceLocation Loc, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ if (LocInfo.first.isInvalid())
+ return false;
+
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return false;
+
+ if (LocInfo.second > Buffer.size())
+ return false;
+
+ const char *LexStart = Buffer.data() + LocInfo.second;
+ StringRef StartTok(LexStart, Buffer.size() - LocInfo.second);
+
+ return StartTok.consume_front("const") &&
+ (StartTok.empty() || isWhitespace(StartTok[0]) ||
+ StartTok.startswith("/*") || StartTok.startswith("//"));
+ };
+
+ auto findBeginLoc = [&]() {
+ // If the return type has `const` qualifier, we want to insert
+ // `static` before `const` (and not before the typename).
+ if ((FD->getReturnType()->isAnyPointerType() &&
+ FD->getReturnType()->getPointeeType().isConstQualified()) ||
+ FD->getReturnType().isConstQualified()) {
+ // But only do this if we can determine where the `const` is.
+
+ if (isLocAtConst(FD->getBeginLoc(), getSourceManager(),
+ getLangOpts()))
+
+ return FD->getBeginLoc();
+ }
+ return FD->getTypeSpecStartLoc();
+ };
Diag(FD->getTypeSpecStartLoc(), diag::note_static_for_internal_linkage)
<< /* function */ 1
<< (FD->getStorageClass() == SC_None
- ? FixItHint::CreateInsertion(FD->getTypeSpecStartLoc(),
- "static ")
+ ? FixItHint::CreateInsertion(findBeginLoc(), "static ")
: FixItHint{});
}
@@ -14008,11 +14315,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// Warn if K&R function is defined without a previous declaration.
// This warning is issued only if the definition itself does not provide
// a prototype. Only K&R definitions do not provide a prototype.
- // An empty list in a function declarator that is part of a definition
- // of that function specifies that the function has no parameters
- // (C99 6.7.5.3p14)
- if (!FD->hasWrittenPrototype() && FD->getNumParams() > 0 &&
- !LangOpts.CPlusPlus) {
+ if (!FD->hasWrittenPrototype()) {
TypeSourceInfo *TI = FD->getTypeSourceInfo();
TypeLoc TL = TI->getTypeLoc();
FunctionTypeLoc FTL = TL.getAsAdjusted<FunctionTypeLoc>();
@@ -14142,7 +14445,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
- if (getDiagnostics().hasErrorOccurred() ||
+ if (getDiagnostics().hasUncompilableErrorOccurred() ||
getDiagnostics().getSuppressAllDiagnostics()) {
DiscardCleanupsInEvaluationContext();
}
@@ -14198,10 +14501,17 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
- if (getDiagnostics().hasErrorOccurred()) {
+ if (getDiagnostics().hasUncompilableErrorOccurred()) {
DiscardCleanupsInEvaluationContext();
}
+ if (LangOpts.OpenMP || LangOpts.CUDA || LangOpts.SYCLIsDevice) {
+ auto ES = getEmissionStatus(FD);
+ if (ES == Sema::FunctionEmissionStatus::Emitted ||
+ ES == Sema::FunctionEmissionStatus::Unknown)
+ DeclsToCheckForDeferredDiags.push_back(FD);
+ }
+
return dcl;
}
@@ -14333,6 +14643,77 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
return FD;
}
+/// If this function is a C++ replaceable global allocation function
+/// (C++2a [basic.stc.dynamic.allocation], C++2a [new.delete]),
+/// adds any function attributes that we know a priori based on the standard.
+///
+/// We need to check for duplicate attributes both here and where user-written
+/// attributes are applied to declarations.
+void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
+ FunctionDecl *FD) {
+ if (FD->isInvalidDecl())
+ return;
+
+ if (FD->getDeclName().getCXXOverloadedOperator() != OO_New &&
+ FD->getDeclName().getCXXOverloadedOperator() != OO_Array_New)
+ return;
+
+ Optional<unsigned> AlignmentParam;
+ bool IsNothrow = false;
+ if (!FD->isReplaceableGlobalAllocationFunction(&AlignmentParam, &IsNothrow))
+ return;
+
+ // C++2a [basic.stc.dynamic.allocation]p4:
+ // An allocation function that has a non-throwing exception specification
+ // indicates failure by returning a null pointer value. Any other allocation
+ // function never returns a null pointer value and indicates failure only by
+ // throwing an exception [...]
+ if (!IsNothrow && !FD->hasAttr<ReturnsNonNullAttr>())
+ FD->addAttr(ReturnsNonNullAttr::CreateImplicit(Context, FD->getLocation()));
+
+ // C++2a [basic.stc.dynamic.allocation]p2:
+ // An allocation function attempts to allocate the requested amount of
+ // storage. [...] If the request succeeds, the value returned by a
+ // replaceable allocation function is a [...] pointer value p0 different
+ // from any previously returned value p1 [...]
+ //
+ // However, this particular information is being added in codegen,
+ // because there is an opt-out switch for it (-fno-assume-sane-operator-new)
+
+ // C++2a [basic.stc.dynamic.allocation]p2:
+ // An allocation function attempts to allocate the requested amount of
+ // storage. If it is successful, it returns the address of the start of a
+ // block of storage whose length in bytes is at least as large as the
+ // requested size.
+ if (!FD->hasAttr<AllocSizeAttr>()) {
+ FD->addAttr(AllocSizeAttr::CreateImplicit(
+ Context, /*ElemSizeParam=*/ParamIdx(1, FD),
+ /*NumElemsParam=*/ParamIdx(), FD->getLocation()));
+ }
+
+ // C++2a [basic.stc.dynamic.allocation]p3:
+ // For an allocation function [...], the pointer returned on a successful
+ // call shall represent the address of storage that is aligned as follows:
+ // (3.1) If the allocation function takes an argument of type
+ // std​::​align_­val_­t, the storage will have the alignment
+ // specified by the value of this argument.
+ if (AlignmentParam.hasValue() && !FD->hasAttr<AllocAlignAttr>()) {
+ FD->addAttr(AllocAlignAttr::CreateImplicit(
+ Context, ParamIdx(AlignmentParam.getValue(), FD), FD->getLocation()));
+ }
+
+ // FIXME:
+ // C++2a [basic.stc.dynamic.allocation]p3:
+ // For an allocation function [...], the pointer returned on a successful
+ // call shall represent the address of storage that is aligned as follows:
+ // (3.2) Otherwise, if the allocation function is named operator new[],
+ // the storage is aligned for any object that does not have
+ // new-extended alignment ([basic.align]) and is no larger than the
+ // requested size.
+ // (3.3) Otherwise, the storage is aligned for any object that does not
+ // have new-extended alignment and is of the requested size.
+}
+
/// Adds any function attributes that we know a priori based on
/// the declaration of this function.
///
@@ -14433,6 +14814,8 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
}
}
+ AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(FD);
+
// If C++ exceptions are enabled but we are told extern "C" functions cannot
// throw, add an implicit nothrow attribute to any extern "C" function we come
// across.
@@ -14538,12 +14921,16 @@ bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
if (T->isDependentType())
return false;
+ // This doesn't use 'isIntegralType' despite the error message mentioning
+ // integral type because isIntegralType would also allow enum types in C.
if (const BuiltinType *BT = T->getAs<BuiltinType>())
if (BT->isInteger())
return false;
- Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
- return true;
+ if (T->isExtIntType())
+ return false;
+
+ return Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
}
/// Check whether this is a valid redeclaration of a previous enumeration.
@@ -15302,16 +15689,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) {
const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
-
- // If this is an elaborated-type-specifier for a scoped enumeration,
- // the 'class' keyword is not necessary and not permitted.
- if (TUK == TUK_Reference || TUK == TUK_Friend) {
- if (ScopedEnum)
- Diag(ScopedEnumKWLoc, diag::err_enum_class_reference)
- << PrevEnum->isScoped()
- << FixItHint::CreateRemoval(ScopedEnumKWLoc);
+ if (TUK == TUK_Reference || TUK == TUK_Friend)
return PrevTagDecl;
- }
QualType EnumUnderlyingTy;
if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
@@ -15789,7 +16168,7 @@ Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
assert(isa<ObjCContainerDecl>(IDecl) &&
"ActOnObjCContainerStartDefinition - Not ObjCContainerDecl");
DeclContext *OCD = cast<DeclContext>(IDecl);
- assert(getContainingDC(OCD) == CurContext &&
+ assert(OCD->getLexicalParent() == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = OCD;
return IDecl;
@@ -15900,6 +16279,10 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth) {
+ assert(BitWidth);
+ if (BitWidth->containsErrors())
+ return ExprError();
+
// Default to true; that shouldn't confuse checks for emptiness
if (ZeroWidth)
*ZeroWidth = true;
@@ -15907,8 +16290,9 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
// C99 6.7.2.1p4 - verify the field type.
// C++ 9.6p3: A bit-field shall have integral or enumeration type.
if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
- // Handle incomplete types with specific error.
- if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete))
+ // Handle incomplete and sizeless types with a specific error.
+ if (RequireCompleteSizedType(FieldLoc, FieldTy,
+ diag::err_field_incomplete_or_sizeless))
return ExprError();
if (FieldName)
return Diag(FieldLoc, diag::err_not_integral_type_bitfield)
@@ -16118,14 +16502,15 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// If we receive a broken type, recover by assuming 'int' and
// marking this declaration as invalid.
- if (T.isNull()) {
+ if (T.isNull() || T->containsErrors()) {
InvalidDecl = true;
T = Context.IntTy;
}
QualType EltTy = Context.getBaseElementType(T);
- if (!EltTy->isDependentType()) {
- if (RequireCompleteType(Loc, EltTy, diag::err_field_incomplete)) {
+ if (!EltTy->isDependentType() && !EltTy->containsErrors()) {
+ if (RequireCompleteSizedType(Loc, EltTy,
+ diag::err_field_incomplete_or_sizeless)) {
// Fields of incomplete type force their record to be invalid.
Record->setInvalidDecl();
InvalidDecl = true;
@@ -16214,6 +16599,14 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
BitWidth = nullptr;
ZeroWidth = false;
}
+
+ // Only data members can have in-class initializers.
+ if (BitWidth && !II && InitStyle) {
+ Diag(Loc, diag::err_anon_bitfield_init);
+ InvalidDecl = true;
+ BitWidth = nullptr;
+ ZeroWidth = false;
+ }
}
// Check that 'mutable' is consistent with the type of the declaration.
@@ -16669,8 +17062,9 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// elsewhere, after synthesized ivars are known.
}
} else if (!FDTy->isDependentType() &&
- RequireCompleteType(FD->getLocation(), FD->getType(),
- diag::err_field_incomplete)) {
+ RequireCompleteSizedType(
+ FD->getLocation(), FD->getType(),
+ diag::err_field_incomplete_or_sizeless)) {
// Incomplete type
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
@@ -16728,8 +17122,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
Context, "", UnavailableAttr::IR_ARCFieldWithOwnership,
FD->getLocation()));
} else if (getLangOpts().ObjC &&
- getLangOpts().getGC() != LangOptions::NonGC &&
- Record && !Record->hasObjectMember()) {
+ getLangOpts().getGC() != LangOptions::NonGC && Record &&
+ !Record->hasObjectMember()) {
if (FD->getType()->isObjCObjectPointerType() ||
FD->getType().isObjCGCStrong())
Record->setHasObjectMember(true);
@@ -16793,10 +17187,10 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
I.setAccess((*I)->getAccess());
}
- if (!CXXRecord->isDependentType()) {
- // Add any implicitly-declared members to this class.
- AddImplicitlyDeclaredMembersToClass(CXXRecord);
+ // Add any implicitly-declared members to this class.
+ AddImplicitlyDeclaredMembersToClass(CXXRecord);
+ if (!CXXRecord->isDependentType()) {
if (!CXXRecord->isInvalidDecl()) {
// If we have virtual base classes, we may end up finding multiple
// final overriders for a given virtual function. Check for this
@@ -17355,9 +17749,11 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
typedef SmallVector<std::unique_ptr<ECDVector>, 3> DuplicatesVector;
typedef llvm::PointerUnion<EnumConstantDecl*, ECDVector*> DeclOrVector;
+
+ // DenseMaps cannot contain the all ones int64_t value, so use unordered_map.
typedef std::unordered_map<int64_t, DeclOrVector> ValueToVectorMap;
- // Use int64_t as a key to avoid needing special handling for DenseMap keys.
+ // Use int64_t as a key to avoid needing special handling for map keys.
auto EnumConstantToKey = [](const EnumConstantDecl *D) {
llvm::APSInt Val = D->getInitVal();
return Val.isSigned() ? Val.getSExtValue() : Val.getZExtValue();
@@ -17787,7 +18183,13 @@ Decl *Sema::getObjCDeclContext() const {
return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
}
-Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD) {
+Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
+ bool Final) {
+ // SYCL functions can be template, so we check if they have appropriate
+ // attribute prior to checking if it is a template.
+ if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelAttr>())
+ return FunctionEmissionStatus::Emitted;
+
// Templates are emitted when they're instantiated.
if (FD->isDependentContext())
return FunctionEmissionStatus::TemplateDiscarded;
@@ -17799,8 +18201,10 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD) {
if (DevTy.hasValue()) {
if (*DevTy == OMPDeclareTargetDeclAttr::DT_Host)
OMPES = FunctionEmissionStatus::OMPDiscarded;
- else if (DeviceKnownEmittedFns.count(FD) > 0)
+ else if (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Any) {
OMPES = FunctionEmissionStatus::Emitted;
+ }
}
} else if (LangOpts.OpenMP) {
// In OpenMP 4.5 all the functions are host functions.
@@ -17816,10 +18220,11 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD) {
if (DevTy.hasValue()) {
if (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
OMPES = FunctionEmissionStatus::OMPDiscarded;
- } else if (DeviceKnownEmittedFns.count(FD) > 0) {
+ } else if (*DevTy == OMPDeclareTargetDeclAttr::DT_Host ||
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Any)
OMPES = FunctionEmissionStatus::Emitted;
- }
- }
+ } else if (Final)
+ OMPES = FunctionEmissionStatus::Emitted;
}
}
if (OMPES == FunctionEmissionStatus::OMPDiscarded ||
@@ -17854,9 +18259,7 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD) {
// Otherwise, the function is known-emitted if it's in our set of
// known-emitted functions.
- return (DeviceKnownEmittedFns.count(FD) > 0)
- ? FunctionEmissionStatus::Emitted
- : FunctionEmissionStatus::Unknown;
+ return FunctionEmissionStatus::Unknown;
}
bool Sema::shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee) {
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 5c51b0f9b8cb..1a0594512a60 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -225,8 +225,7 @@ static bool checkAttributeAtMostNumArgs(Sema &S, const ParsedAttr &AL,
/// A helper function to provide Attribute Location for the Attr types
/// AND the ParsedAttr.
template <typename AttrInfo>
-static typename std::enable_if<std::is_base_of<Attr, AttrInfo>::value,
- SourceLocation>::type
+static std::enable_if_t<std::is_base_of<Attr, AttrInfo>::value, SourceLocation>
getAttrLoc(const AttrInfo &AL) {
return AL.getLocation();
}
@@ -1100,7 +1099,7 @@ static void handleNoBuiltinAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AddBuiltinName(BuiltinName);
else
S.Diag(LiteralLoc, diag::warn_attribute_no_builtin_invalid_builtin_name)
- << BuiltinName << AL.getAttrName()->getName();
+ << BuiltinName << AL;
}
// Repeating the same attribute is fine.
@@ -1111,7 +1110,7 @@ static void handleNoBuiltinAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (HasWildcard && Names.size() > 1)
S.Diag(D->getLocation(),
diag::err_attribute_no_builtin_wildcard_or_builtin_name)
- << AL.getAttrName()->getName();
+ << AL;
if (D->hasAttr<NoBuiltinAttr>())
D->dropAttr<NoBuiltinAttr>();
@@ -1177,8 +1176,7 @@ static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD,
if (const CXXRecordDecl *RD = ThisType->getAsCXXRecordDecl()) {
if (!RD->hasAttr<ConsumableAttr>()) {
- S.Diag(AL.getLoc(), diag::warn_attr_on_unconsumable_class) <<
- RD->getNameAsString();
+ S.Diag(AL.getLoc(), diag::warn_attr_on_unconsumable_class) << RD;
return false;
}
@@ -1625,6 +1623,10 @@ void Sema::AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
<< E->getSourceRange();
return;
}
+
+ if (I > Sema::MaximumAlignment)
+ Diag(CI.getLoc(), diag::warn_assume_aligned_too_great)
+ << CI.getRange() << Sema::MaximumAlignment;
}
if (OE) {
@@ -1663,7 +1665,8 @@ void Sema::AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
return;
QualType Ty = getFunctionOrMethodParamType(D, Idx.getASTIndex());
- if (!Ty->isDependentType() && !Ty->isIntegralType(Context)) {
+ if (!Ty->isDependentType() && !Ty->isIntegralType(Context) &&
+ !Ty->isAlignValT()) {
Diag(ParamExpr->getBeginLoc(), diag::err_attribute_integers_only)
<< &TmpAttr
<< FuncDecl->getParamDecl(Idx.getASTIndex())->getSourceRange();
@@ -1989,6 +1992,21 @@ static void handleCommonAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(CA);
}
+static void handleCmseNSEntryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (S.LangOpts.CPlusPlus && !D->getDeclContext()->isExternCContext()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_not_clinkage) << AL;
+ return;
+ }
+
+ const auto *FD = cast<FunctionDecl>(D);
+ if (!FD->isExternallyVisible()) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_cmse_entry_static);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CmseNSEntryAttr(S.Context, AL));
+}
+
static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (checkAttrMutualExclusion<DisableTailCallsAttr>(S, D, AL))
return;
@@ -2809,6 +2827,12 @@ static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef Str;
if ((AL.isCXX11Attribute() || AL.isC2xAttribute()) && !AL.getScopeName()) {
+ // The standard attribute cannot be applied to variable declarations such
+ // as a function pointer.
+ if (isa<VarDecl>(D))
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type_str)
+ << AL << "functions, classes, or enumerations";
+
// If this is spelled as the standard C++17 attribute, but not in C++17,
// warn about using it as an extension. If there are attribute arguments,
// then claim it's a C++2a extension instead.
@@ -2816,8 +2840,8 @@ static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
// extension warning for C2x mode.
const LangOptions &LO = S.getLangOpts();
if (AL.getNumArgs() == 1) {
- if (LO.CPlusPlus && !LO.CPlusPlus2a)
- S.Diag(AL.getLoc(), diag::ext_cxx2a_attr) << AL;
+ if (LO.CPlusPlus && !LO.CPlusPlus20)
+ S.Diag(AL.getLoc(), diag::ext_cxx20_attr) << AL;
// Since this this is spelled [[nodiscard]], get the optional string
// literal. If in C++ mode, but not in C++2a mode, diagnose as an
@@ -3672,7 +3696,7 @@ void Sema::AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E) {
if (!T->isDependentType() && !T->isAnyPointerType() &&
!T->isReferenceType() && !T->isMemberPointerType()) {
Diag(AttrLoc, diag::warn_attribute_pointer_or_reference_only)
- << &TmpAttr /*TmpAttr.getName()*/ << T << D->getSourceRange();
+ << &TmpAttr << T << D->getSourceRange();
return;
}
@@ -3809,13 +3833,12 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
}
}
- // Alignment calculations can wrap around if it's greater than 2**28.
- unsigned MaxValidAlignment =
- Context.getTargetInfo().getTriple().isOSBinFormatCOFF() ? 8192
- : 268435456;
- if (AlignVal > MaxValidAlignment) {
- Diag(AttrLoc, diag::err_attribute_aligned_too_great) << MaxValidAlignment
- << E->getSourceRange();
+ unsigned MaximumAlignment = Sema::MaximumAlignment;
+ if (Context.getTargetInfo().getTriple().isOSBinFormatCOFF())
+ MaximumAlignment = std::min(MaximumAlignment, 8192u);
+ if (AlignVal > MaximumAlignment) {
+ Diag(AttrLoc, diag::err_attribute_aligned_too_great)
+ << MaximumAlignment << E->getSourceRange();
return;
}
@@ -3865,6 +3888,7 @@ void Sema::CheckAlignasUnderalignment(Decl *D) {
// not specify an alignment that is less strict than the alignment that
// would otherwise be required for the entity being declared.
AlignedAttr *AlignasAttr = nullptr;
+ AlignedAttr *LastAlignedAttr = nullptr;
unsigned Align = 0;
for (auto *I : D->specific_attrs<AlignedAttr>()) {
if (I->isAlignmentDependent())
@@ -3872,9 +3896,13 @@ void Sema::CheckAlignasUnderalignment(Decl *D) {
if (I->isAlignas())
AlignasAttr = I;
Align = std::max(Align, I->getAlignment(Context));
+ LastAlignedAttr = I;
}
- if (AlignasAttr && Align) {
+ if (Align && DiagTy->isSizelessType()) {
+ Diag(LastAlignedAttr->getLocation(), diag::err_attribute_sizeless_type)
+ << LastAlignedAttr << DiagTy;
+ } else if (AlignasAttr && Align) {
CharUnits RequestedAlign = Context.toCharUnitsFromBits(Align);
CharUnits NaturalAlign = Context.getTypeAlignInChars(UnderlyingTy);
if (NaturalAlign > RequestedAlign)
@@ -3907,15 +3935,15 @@ bool Sema::checkMSInheritanceAttrOnDefinition(
Diag(Range.getBegin(), diag::err_mismatched_ms_inheritance)
<< 0 /*definition*/;
- Diag(RD->getDefinition()->getLocation(), diag::note_defined_here)
- << RD->getNameAsString();
+ Diag(RD->getDefinition()->getLocation(), diag::note_defined_here) << RD;
return true;
}
/// parseModeAttrArg - Parses attribute mode string and returns parsed type
/// attribute.
static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
- bool &IntegerMode, bool &ComplexMode) {
+ bool &IntegerMode, bool &ComplexMode,
+ bool &ExplicitIEEE) {
IntegerMode = true;
ComplexMode = false;
switch (Str.size()) {
@@ -3936,7 +3964,12 @@ static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
case 'X':
DestWidth = 96;
break;
+ case 'K': // KFmode - IEEE quad precision (__float128)
+ ExplicitIEEE = true;
+ DestWidth = Str[1] == 'I' ? 0 : 128;
+ break;
case 'T':
+ ExplicitIEEE = false;
DestWidth = 128;
break;
}
@@ -3997,6 +4030,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
unsigned DestWidth = 0;
bool IntegerMode = true;
bool ComplexMode = false;
+ bool ExplicitIEEE = false;
llvm::APInt VectorSize(64, 0);
if (Str.size() >= 4 && Str[0] == 'V') {
// Minimal length of vector mode is 4: 'V' + NUMBER(>=1) + TYPE(>=2).
@@ -4009,7 +4043,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
!Str.substr(1, VectorStringLength).getAsInteger(10, VectorSize) &&
VectorSize.isPowerOf2()) {
parseModeAttrArg(*this, Str.substr(VectorStringLength + 1), DestWidth,
- IntegerMode, ComplexMode);
+ IntegerMode, ComplexMode, ExplicitIEEE);
// Avoid duplicate warning from template instantiation.
if (!InInstantiation)
Diag(AttrLoc, diag::warn_vector_mode_deprecated);
@@ -4019,7 +4053,8 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
}
if (!VectorSize)
- parseModeAttrArg(*this, Str, DestWidth, IntegerMode, ComplexMode);
+ parseModeAttrArg(*this, Str, DestWidth, IntegerMode, ComplexMode,
+ ExplicitIEEE);
// FIXME: Sync this with InitializePredefinedMacros; we need to match int8_t
// and friends, at least with glibc.
@@ -4061,8 +4096,9 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
Diag(AttrLoc, diag::err_enum_mode_vector_type) << Name << CI.getRange();
return;
}
- bool IntegralOrAnyEnumType =
- OldElemTy->isIntegralOrEnumerationType() || OldElemTy->getAs<EnumType>();
+ bool IntegralOrAnyEnumType = (OldElemTy->isIntegralOrEnumerationType() &&
+ !OldElemTy->isExtIntType()) ||
+ OldElemTy->getAs<EnumType>();
if (!OldElemTy->getAs<BuiltinType>() && !OldElemTy->isComplexType() &&
!IntegralOrAnyEnumType)
@@ -4084,7 +4120,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
NewElemTy = Context.getIntTypeForBitwidth(DestWidth,
OldElemTy->isSignedIntegerType());
else
- NewElemTy = Context.getRealTypeForBitwidth(DestWidth);
+ NewElemTy = Context.getRealTypeForBitwidth(DestWidth, ExplicitIEEE);
if (NewElemTy.isNull()) {
Diag(AttrLoc, diag::err_machine_mode) << 1 /*Unsupported*/ << Name;
@@ -4333,6 +4369,12 @@ static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(FD->getBeginLoc(), diag::warn_kern_is_inline) << FD;
D->addAttr(::new (S.Context) CUDAGlobalAttr(S.Context, AL));
+ // In host compilation the kernel is emitted as a stub function, which is
+ // a helper function for launching the kernel. The instructions in the helper
+ // function has nothing to do with the source code of the kernel. Do not emit
+ // debug info for the stub function to avoid confusing the debugger.
+ if (S.LangOpts.HIP && !S.LangOpts.CUDAIsDevice)
+ D->addAttr(NoDebugAttr::CreateImplicit(S.Context));
}
static void handleGNUInlineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -4924,9 +4966,9 @@ static void handlePatchableFunctionEntryAttr(Sema &S, Decl *D,
Expr *Arg = AL.getArgAsExpr(1);
if (!checkUInt32Argument(S, AL, Arg, Offset, 1, true))
return;
- if (Offset) {
+ if (Count < Offset) {
S.Diag(getAttrLoc(AL), diag::err_attribute_argument_out_of_range)
- << &AL << 0 << 0 << Arg->getBeginLoc();
+ << &AL << 0 << Count << Arg->getBeginLoc();
return;
}
}
@@ -4934,17 +4976,58 @@ static void handlePatchableFunctionEntryAttr(Sema &S, Decl *D,
PatchableFunctionEntryAttr(S.Context, AL, Count, Offset));
}
-static bool ArmMveAliasValid(unsigned BuiltinID, StringRef AliasName) {
+namespace {
+struct IntrinToName {
+ uint32_t Id;
+ int32_t FullName;
+ int32_t ShortName;
+};
+} // unnamed namespace
+
+static bool ArmBuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
+ ArrayRef<IntrinToName> Map,
+ const char *IntrinNames) {
if (AliasName.startswith("__arm_"))
AliasName = AliasName.substr(6);
- switch (BuiltinID) {
+ const IntrinToName *It = std::lower_bound(
+ Map.begin(), Map.end(), BuiltinID,
+ [](const IntrinToName &L, unsigned Id) { return L.Id < Id; });
+ if (It == Map.end() || It->Id != BuiltinID)
+ return false;
+ StringRef FullName(&IntrinNames[It->FullName]);
+ if (AliasName == FullName)
+ return true;
+ if (It->ShortName == -1)
+ return false;
+ StringRef ShortName(&IntrinNames[It->ShortName]);
+ return AliasName == ShortName;
+}
+
+static bool ArmMveAliasValid(unsigned BuiltinID, StringRef AliasName) {
#include "clang/Basic/arm_mve_builtin_aliases.inc"
+ // The included file defines:
+ // - ArrayRef<IntrinToName> Map
+ // - const char IntrinNames[]
+ return ArmBuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
+}
+
+static bool ArmCdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
+#include "clang/Basic/arm_cde_builtin_aliases.inc"
+ return ArmBuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
+}
+
+static bool ArmSveAliasValid(unsigned BuiltinID, StringRef AliasName) {
+ switch (BuiltinID) {
default:
return false;
+#define GET_SVE_BUILTINS
+#define BUILTIN(name, types, attr) case SVE::BI##name:
+#include "clang/Basic/arm_sve_builtins.inc"
+ return true;
}
}
-static void handleArmMveAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
<< AL << 1 << AANT_ArgumentIdentifier;
@@ -4953,14 +5036,17 @@ static void handleArmMveAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
IdentifierInfo *Ident = AL.getArgAsIdent(0)->Ident;
unsigned BuiltinID = Ident->getBuiltinID();
+ StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
- if (!ArmMveAliasValid(BuiltinID,
- cast<FunctionDecl>(D)->getIdentifier()->getName())) {
- S.Diag(AL.getLoc(), diag::err_attribute_arm_mve_alias);
+ bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
+ if ((IsAArch64 && !ArmSveAliasValid(BuiltinID, AliasName)) ||
+ (!IsAArch64 && !ArmMveAliasValid(BuiltinID, AliasName) &&
+ !ArmCdeAliasValid(BuiltinID, AliasName))) {
+ S.Diag(AL.getLoc(), diag::err_attribute_arm_builtin_alias);
return;
}
- D->addAttr(::new (S.Context) ArmMveAliasAttr(S.Context, AL, Ident));
+ D->addAttr(::new (S.Context) ArmBuiltinAliasAttr(S.Context, AL, Ident));
}
//===----------------------------------------------------------------------===//
@@ -5407,9 +5493,9 @@ static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
//===----------------------------------------------------------------------===//
UuidAttr *Sema::mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
- StringRef Uuid) {
+ StringRef UuidAsWritten, MSGuidDecl *GuidDecl) {
if (const auto *UA = D->getAttr<UuidAttr>()) {
- if (UA->getGuid().equals_lower(Uuid))
+ if (declaresSameEntity(UA->getGuidDecl(), GuidDecl))
return nullptr;
if (!UA->getGuid().empty()) {
Diag(UA->getLocation(), diag::err_mismatched_uuid);
@@ -5418,7 +5504,7 @@ UuidAttr *Sema::mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
}
}
- return ::new (Context) UuidAttr(Context, CI, Uuid);
+ return ::new (Context) UuidAttr(Context, CI, UuidAsWritten, GuidDecl);
}
static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -5428,13 +5514,14 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- StringRef StrRef;
+ StringRef OrigStrRef;
SourceLocation LiteralLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, StrRef, &LiteralLoc))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, OrigStrRef, &LiteralLoc))
return;
// GUID format is "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" or
// "{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}", normalize to the former.
+ StringRef StrRef = OrigStrRef;
if (StrRef.size() == 38 && StrRef.front() == '{' && StrRef.back() == '}')
StrRef = StrRef.drop_front().drop_back();
@@ -5456,6 +5543,16 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
+ // Convert to our parsed format and canonicalize.
+ MSGuidDecl::Parts Parsed;
+ StrRef.substr(0, 8).getAsInteger(16, Parsed.Part1);
+ StrRef.substr(9, 4).getAsInteger(16, Parsed.Part2);
+ StrRef.substr(14, 4).getAsInteger(16, Parsed.Part3);
+ for (unsigned i = 0; i != 8; ++i)
+ StrRef.substr(19 + 2 * i + (i >= 2 ? 1 : 0), 2)
+ .getAsInteger(16, Parsed.Part4And5[i]);
+ MSGuidDecl *Guid = S.Context.getMSGuidDecl(Parsed);
+
// FIXME: It'd be nice to also emit a fixit removing uuid(...) (and, if it's
// the only thing in the [] list, the [] too), and add an insertion of
// __declspec(uuid(...)). But sadly, neither the SourceLocs of the commas
@@ -5465,7 +5562,7 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (AL.isMicrosoftAttribute()) // Check for [uuid(...)] spelling.
S.Diag(AL.getLoc(), diag::warn_atl_uuid_deprecated);
- UuidAttr *UA = S.mergeUuidAttr(D, AL, StrRef);
+ UuidAttr *UA = S.mergeUuidAttr(D, AL, OrigStrRef, Guid);
if (UA)
D->addAttr(UA);
}
@@ -5795,45 +5892,75 @@ static void handleWebAssemblyExportNameAttr(Sema &S, Decl *D, const ParsedAttr &
D->addAttr(UsedAttr::CreateImplicit(S.Context));
}
-static void handleWebAssemblyImportModuleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'import_module'" << ExpectedFunction;
- return;
+WebAssemblyImportModuleAttr *
+Sema::mergeImportModuleAttr(Decl *D, const WebAssemblyImportModuleAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
+ if (ExistingAttr->getImportModule() == AL.getImportModule())
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 0
+ << ExistingAttr->getImportModule() << AL.getImportModule();
+ Diag(AL.getLoc(), diag::note_previous_attribute);
+ return nullptr;
}
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
+ return nullptr;
+ }
+ return ::new (Context) WebAssemblyImportModuleAttr(Context, AL,
+ AL.getImportModule());
+}
+WebAssemblyImportNameAttr *
+Sema::mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL) {
auto *FD = cast<FunctionDecl>(D);
- if (FD->isThisDeclarationADefinition()) {
- S.Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
- return;
+
+ if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportNameAttr>()) {
+ if (ExistingAttr->getImportName() == AL.getImportName())
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 1
+ << ExistingAttr->getImportName() << AL.getImportName();
+ Diag(AL.getLoc(), diag::note_previous_attribute);
+ return nullptr;
+ }
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
+ return nullptr;
}
+ return ::new (Context) WebAssemblyImportNameAttr(Context, AL,
+ AL.getImportName());
+}
+
+static void
+handleWebAssemblyImportModuleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
StringRef Str;
SourceLocation ArgLoc;
if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
return;
+ if (FD->hasBody()) {
+ S.Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
+ return;
+ }
FD->addAttr(::new (S.Context)
WebAssemblyImportModuleAttr(S.Context, AL, Str));
}
-static void handleWebAssemblyImportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'import_name'" << ExpectedFunction;
- return;
- }
-
+static void
+handleWebAssemblyImportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
auto *FD = cast<FunctionDecl>(D);
- if (FD->isThisDeclarationADefinition()) {
- S.Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
- return;
- }
StringRef Str;
SourceLocation ArgLoc;
if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
return;
+ if (FD->hasBody()) {
+ S.Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
+ return;
+ }
FD->addAttr(::new (S.Context) WebAssemblyImportNameAttr(S.Context, AL, Str));
}
@@ -6199,11 +6326,6 @@ static void handleCapabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
!S.checkStringLiteralArgumentAttr(AL, 0, N, &LiteralLoc))
return;
- // Currently, there are only two names allowed for a capability: role and
- // mutex (case insensitive). Diagnose other capability names.
- if (!N.equals_lower("mutex") && !N.equals_lower("role"))
- S.Diag(LiteralLoc, diag::warn_invalid_capability_name) << N;
-
D->addAttr(::new (S.Context) CapabilityAttr(S.Context, AL, N));
}
@@ -6567,7 +6689,9 @@ static void handleObjCExternallyRetainedAttr(Sema &S, Decl *D,
// If D is a function-like declaration (method, block, or function), then we
// make every parameter psuedo-strong.
- for (unsigned I = 0, E = getFunctionOrMethodNumParams(D); I != E; ++I) {
+ unsigned NumParams =
+ hasFunctionProto(D) ? getFunctionOrMethodNumParams(D) : 0;
+ for (unsigned I = 0; I != NumParams; ++I) {
auto *PVD = const_cast<ParmVarDecl *>(getFunctionOrMethodParam(D, I));
QualType Ty = PVD->getType();
@@ -6620,7 +6744,7 @@ static void handleMSAllocatorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
handleSimpleAttribute<MSAllocatorAttr>(S, D, AL);
}
-static void handeAcquireHandleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+static void handleAcquireHandleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (AL.isUsedAsTypeAttr())
return;
// Warn if the parameter is definitely not an output parameter.
@@ -6700,6 +6824,8 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
switch (AL.getKind()) {
default:
+ if (AL.getInfo().handleDeclAttribute(S, D, AL) != ParsedAttrInfo::NotHandled)
+ break;
if (!AL.isStmtAttr()) {
// Type attributes are handled elsewhere; silently move on.
assert(AL.isTypeAttr() && "Non-type attribute not handled");
@@ -6722,15 +6848,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleSimpleAttributeWithExclusions<Mips16Attr, MicroMipsAttr,
MipsInterruptAttr>(S, D, AL);
break;
- case ParsedAttr::AT_NoMips16:
- handleSimpleAttribute<NoMips16Attr>(S, D, AL);
- break;
case ParsedAttr::AT_MicroMips:
handleSimpleAttributeWithExclusions<MicroMipsAttr, Mips16Attr>(S, D, AL);
break;
- case ParsedAttr::AT_NoMicroMips:
- handleSimpleAttribute<NoMicroMipsAttr>(S, D, AL);
- break;
case ParsedAttr::AT_MipsLongCall:
handleSimpleAttributeWithExclusions<MipsLongCallAttr, MipsShortCallAttr>(
S, D, AL);
@@ -6766,9 +6886,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_WebAssemblyImportName:
handleWebAssemblyImportNameAttr(S, D, AL);
break;
- case ParsedAttr::AT_IBAction:
- handleSimpleAttribute<IBActionAttr>(S, D, AL);
- break;
case ParsedAttr::AT_IBOutlet:
handleIBOutlet(S, D, AL);
break;
@@ -6793,9 +6910,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_AlwaysInline:
handleAlwaysInlineAttr(S, D, AL);
break;
- case ParsedAttr::AT_Artificial:
- handleSimpleAttribute<ArtificialAttr>(S, D, AL);
- break;
case ParsedAttr::AT_AnalyzerNoReturn:
handleAnalyzerNoReturnAttr(S, D, AL);
break;
@@ -6825,16 +6939,20 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handlePassObjectSizeAttr(S, D, AL);
break;
case ParsedAttr::AT_Constructor:
- handleConstructorAttr(S, D, AL);
- break;
- case ParsedAttr::AT_CXX11NoReturn:
- handleSimpleAttribute<CXX11NoReturnAttr>(S, D, AL);
+ if (S.Context.getTargetInfo().getTriple().isOSAIX())
+ llvm::report_fatal_error(
+ "'constructor' attribute is not yet supported on AIX");
+ else
+ handleConstructorAttr(S, D, AL);
break;
case ParsedAttr::AT_Deprecated:
handleDeprecatedAttr(S, D, AL);
break;
case ParsedAttr::AT_Destructor:
- handleDestructorAttr(S, D, AL);
+ if (S.Context.getTargetInfo().getTriple().isOSAIX())
+ llvm::report_fatal_error("'destructor' attribute is not yet supported on AIX");
+ else
+ handleDestructorAttr(S, D, AL);
break;
case ParsedAttr::AT_EnableIf:
handleEnableIfAttr(S, D, AL);
@@ -6857,15 +6975,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_OptimizeNone:
handleOptimizeNoneAttr(S, D, AL);
break;
- case ParsedAttr::AT_FlagEnum:
- handleSimpleAttribute<FlagEnumAttr>(S, D, AL);
- break;
case ParsedAttr::AT_EnumExtensibility:
handleEnumExtensibilityAttr(S, D, AL);
break;
- case ParsedAttr::AT_Flatten:
- handleSimpleAttribute<FlattenAttr>(S, D, AL);
- break;
case ParsedAttr::AT_SYCLKernel:
handleSYCLKernelAttr(S, D, AL);
break;
@@ -6888,9 +7000,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_CUDAHost:
handleSimpleAttributeWithExclusions<CUDAHostAttr, CUDAGlobalAttr>(S, D, AL);
break;
- case ParsedAttr::AT_HIPPinnedShadow:
- handleSimpleAttributeWithExclusions<HIPPinnedShadowAttr, CUDADeviceAttr,
- CUDAConstantAttr>(S, D, AL);
+ case ParsedAttr::AT_CUDADeviceBuiltinSurfaceType:
+ handleSimpleAttributeWithExclusions<CUDADeviceBuiltinSurfaceTypeAttr,
+ CUDADeviceBuiltinTextureTypeAttr>(S, D,
+ AL);
+ break;
+ case ParsedAttr::AT_CUDADeviceBuiltinTextureType:
+ handleSimpleAttributeWithExclusions<CUDADeviceBuiltinTextureTypeAttr,
+ CUDADeviceBuiltinSurfaceTypeAttr>(S, D,
+ AL);
break;
case ParsedAttr::AT_GNUInline:
handleGNUInlineAttr(S, D, AL);
@@ -6901,27 +7019,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Restrict:
handleRestrictAttr(S, D, AL);
break;
- case ParsedAttr::AT_LifetimeBound:
- handleSimpleAttribute<LifetimeBoundAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_MayAlias:
- handleSimpleAttribute<MayAliasAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Mode:
handleModeAttr(S, D, AL);
break;
- case ParsedAttr::AT_NoAlias:
- handleSimpleAttribute<NoAliasAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoCommon:
- handleSimpleAttribute<NoCommonAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoSplitStack:
- handleSimpleAttribute<NoSplitStackAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoUniqueAddress:
- handleSimpleAttribute<NoUniqueAddressAttr>(S, D, AL);
- break;
case ParsedAttr::AT_NonNull:
if (auto *PVD = dyn_cast<ParmVarDecl>(D))
handleNonNullAttrParameter(S, PVD, AL);
@@ -6940,9 +7040,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_AllocAlign:
handleAllocAlignAttr(S, D, AL);
break;
- case ParsedAttr::AT_Overloadable:
- handleSimpleAttribute<OverloadableAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Ownership:
handleOwnershipAttr(S, D, AL);
break;
@@ -6998,9 +7095,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_ObjCRuntimeName:
handleObjCRuntimeName(S, D, AL);
break;
- case ParsedAttr::AT_ObjCRuntimeVisible:
- handleSimpleAttribute<ObjCRuntimeVisibleAttr>(S, D, AL);
- break;
case ParsedAttr::AT_ObjCBoxable:
handleObjCBoxable(S, D, AL);
break;
@@ -7018,12 +7112,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
S.AddXConsumedAttr(D, AL, parsedAttrToRetainOwnershipKind(AL),
/*IsTemplateInstantiation=*/false);
break;
- case ParsedAttr::AT_NSConsumesSelf:
- handleSimpleAttribute<NSConsumesSelfAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_OSConsumesThis:
- handleSimpleAttribute<OSConsumesThisAttr>(S, D, AL);
- break;
case ParsedAttr::AT_OSReturnsRetainedOnZero:
handleSimpleAttributeOrDiagnose<OSReturnsRetainedOnZeroAttr>(
S, D, AL, isValidOSObjectOutParameter(D),
@@ -7057,11 +7145,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_VecTypeHint:
handleVecTypeHint(S, D, AL);
break;
- case ParsedAttr::AT_ConstInit:
- handleSimpleAttribute<ConstInitAttr>(S, D, AL);
- break;
case ParsedAttr::AT_InitPriority:
- handleInitPriorityAttr(S, D, AL);
+ if (S.Context.getTargetInfo().getTriple().isOSAIX())
+ llvm::report_fatal_error(
+ "'init_priority' attribute is not yet supported on AIX");
+ else
+ handleInitPriorityAttr(S, D, AL);
break;
case ParsedAttr::AT_Packed:
handlePackedAttr(S, D, AL);
@@ -7090,12 +7179,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Unavailable:
handleAttrWithMessage<UnavailableAttr>(S, D, AL);
break;
- case ParsedAttr::AT_ArcWeakrefUnavailable:
- handleSimpleAttribute<ArcWeakrefUnavailableAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_ObjCRootClass:
- handleSimpleAttribute<ObjCRootClassAttr>(S, D, AL);
- break;
case ParsedAttr::AT_ObjCDirect:
handleObjCDirectAttr(S, D, AL);
break;
@@ -7103,27 +7186,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleObjCDirectMembersAttr(S, D, AL);
handleSimpleAttribute<ObjCDirectMembersAttr>(S, D, AL);
break;
- case ParsedAttr::AT_ObjCNonLazyClass:
- handleSimpleAttribute<ObjCNonLazyClassAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_ObjCSubclassingRestricted:
- handleSimpleAttribute<ObjCSubclassingRestrictedAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_ObjCClassStub:
- handleSimpleAttribute<ObjCClassStubAttr>(S, D, AL);
- break;
case ParsedAttr::AT_ObjCExplicitProtocolImpl:
handleObjCSuppresProtocolAttr(S, D, AL);
break;
- case ParsedAttr::AT_ObjCRequiresPropertyDefs:
- handleSimpleAttribute<ObjCRequiresPropertyDefsAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Unused:
handleUnusedAttr(S, D, AL);
break;
- case ParsedAttr::AT_ReturnsTwice:
- handleSimpleAttribute<ReturnsTwiceAttr>(S, D, AL);
- break;
case ParsedAttr::AT_NotTailCalled:
handleSimpleAttributeWithExclusions<NotTailCalledAttr, AlwaysInlineAttr>(
S, D, AL);
@@ -7132,24 +7200,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleSimpleAttributeWithExclusions<DisableTailCallsAttr, NakedAttr>(S, D,
AL);
break;
- case ParsedAttr::AT_Used:
- handleSimpleAttribute<UsedAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Visibility:
handleVisibilityAttr(S, D, AL, false);
break;
case ParsedAttr::AT_TypeVisibility:
handleVisibilityAttr(S, D, AL, true);
break;
- case ParsedAttr::AT_WarnUnused:
- handleSimpleAttribute<WarnUnusedAttr>(S, D, AL);
- break;
case ParsedAttr::AT_WarnUnusedResult:
handleWarnUnusedResult(S, D, AL);
break;
- case ParsedAttr::AT_Weak:
- handleSimpleAttribute<WeakAttr>(S, D, AL);
- break;
case ParsedAttr::AT_WeakRef:
handleWeakRefAttr(S, D, AL);
break;
@@ -7159,9 +7218,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_TransparentUnion:
handleTransparentUnionAttr(S, D, AL);
break;
- case ParsedAttr::AT_ObjCException:
- handleSimpleAttribute<ObjCExceptionAttr>(S, D, AL);
- break;
case ParsedAttr::AT_ObjCMethodFamily:
handleObjCMethodFamilyAttr(S, D, AL);
break;
@@ -7177,36 +7233,14 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Sentinel:
handleSentinelAttr(S, D, AL);
break;
- case ParsedAttr::AT_Const:
- handleSimpleAttribute<ConstAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_Pure:
- handleSimpleAttribute<PureAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Cleanup:
handleCleanupAttr(S, D, AL);
break;
case ParsedAttr::AT_NoDebug:
handleNoDebugAttr(S, D, AL);
break;
- case ParsedAttr::AT_NoDuplicate:
- handleSimpleAttribute<NoDuplicateAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_Convergent:
- handleSimpleAttribute<ConvergentAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoInline:
- handleSimpleAttribute<NoInlineAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoInstrumentFunction: // Interacts with -pg.
- handleSimpleAttribute<NoInstrumentFunctionAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_NoStackProtector:
- // Interacts with -fstack-protector options.
- handleSimpleAttribute<NoStackProtectorAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_CFICanonicalJumpTable:
- handleSimpleAttribute<CFICanonicalJumpTableAttr>(S, D, AL);
+ case ParsedAttr::AT_CmseNSEntry:
+ handleCmseNSEntryAttr(S, D, AL);
break;
case ParsedAttr::AT_StdCall:
case ParsedAttr::AT_CDecl:
@@ -7232,9 +7266,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Pointer:
handleLifetimeCategoryAttr(S, D, AL);
break;
- case ParsedAttr::AT_OpenCLKernel:
- handleSimpleAttribute<OpenCLKernelAttr>(S, D, AL);
- break;
case ParsedAttr::AT_OpenCLAccess:
handleOpenCLAccessAttr(S, D, AL);
break;
@@ -7253,38 +7284,17 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_InternalLinkage:
handleInternalLinkageAttr(S, D, AL);
break;
- case ParsedAttr::AT_ExcludeFromExplicitInstantiation:
- handleSimpleAttribute<ExcludeFromExplicitInstantiationAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_LTOVisibilityPublic:
- handleSimpleAttribute<LTOVisibilityPublicAttr>(S, D, AL);
- break;
// Microsoft attributes:
- case ParsedAttr::AT_EmptyBases:
- handleSimpleAttribute<EmptyBasesAttr>(S, D, AL);
- break;
case ParsedAttr::AT_LayoutVersion:
handleLayoutVersion(S, D, AL);
break;
- case ParsedAttr::AT_TrivialABI:
- handleSimpleAttribute<TrivialABIAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_MSNoVTable:
- handleSimpleAttribute<MSNoVTableAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_MSStruct:
- handleSimpleAttribute<MSStructAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Uuid:
handleUuidAttr(S, D, AL);
break;
case ParsedAttr::AT_MSInheritance:
handleMSInheritanceAttr(S, D, AL);
break;
- case ParsedAttr::AT_SelectAny:
- handleSimpleAttribute<SelectAnyAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Thread:
handleDeclspecThreadAttr(S, D, AL);
break;
@@ -7303,24 +7313,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_AssertSharedLock:
handleAssertSharedLockAttr(S, D, AL);
break;
- case ParsedAttr::AT_GuardedVar:
- handleSimpleAttribute<GuardedVarAttr>(S, D, AL);
- break;
case ParsedAttr::AT_PtGuardedVar:
handlePtGuardedVarAttr(S, D, AL);
break;
- case ParsedAttr::AT_ScopedLockable:
- handleSimpleAttribute<ScopedLockableAttr>(S, D, AL);
- break;
case ParsedAttr::AT_NoSanitize:
handleNoSanitizeAttr(S, D, AL);
break;
case ParsedAttr::AT_NoSanitizeSpecific:
handleNoSanitizeSpecificAttr(S, D, AL);
break;
- case ParsedAttr::AT_NoThreadSafetyAnalysis:
- handleSimpleAttribute<NoThreadSafetyAnalysisAttr>(S, D, AL);
- break;
case ParsedAttr::AT_GuardedBy:
handleGuardedByAttr(S, D, AL);
break;
@@ -7372,12 +7373,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Consumable:
handleConsumableAttr(S, D, AL);
break;
- case ParsedAttr::AT_ConsumableAutoCast:
- handleSimpleAttribute<ConsumableAutoCastAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_ConsumableSetOnRead:
- handleSimpleAttribute<ConsumableSetOnReadAttr>(S, D, AL);
- break;
case ParsedAttr::AT_CallableWhen:
handleCallableWhenAttr(S, D, AL);
break;
@@ -7401,16 +7396,8 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_TypeTagForDatatype:
handleTypeTagForDatatypeAttr(S, D, AL);
break;
- case ParsedAttr::AT_AnyX86NoCallerSavedRegisters:
- handleSimpleAttribute<AnyX86NoCallerSavedRegistersAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_RenderScriptKernel:
- handleSimpleAttribute<RenderScriptKernelAttr>(S, D, AL);
- break;
+
// XRay attributes.
- case ParsedAttr::AT_XRayInstrument:
- handleSimpleAttribute<XRayInstrumentAttr>(S, D, AL);
- break;
case ParsedAttr::AT_XRayLogArgs:
handleXRayLogArgsAttr(S, D, AL);
break;
@@ -7419,11 +7406,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handlePatchableFunctionEntryAttr(S, D, AL);
break;
- // Move semantics attribute.
- case ParsedAttr::AT_Reinitializes:
- handleSimpleAttribute<ReinitializesAttr>(S, D, AL);
- break;
-
case ParsedAttr::AT_AlwaysDestroy:
case ParsedAttr::AT_NoDestroy:
handleDestroyAttr(S, D, AL);
@@ -7433,6 +7415,10 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleUninitializedAttr(S, D, AL);
break;
+ case ParsedAttr::AT_LoaderUninitialized:
+ handleSimpleAttribute<LoaderUninitializedAttr>(S, D, AL);
+ break;
+
case ParsedAttr::AT_ObjCExternallyRetained:
handleObjCExternallyRetainedAttr(S, D, AL);
break;
@@ -7445,12 +7431,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleMSAllocatorAttr(S, D, AL);
break;
- case ParsedAttr::AT_ArmMveAlias:
- handleArmMveAliasAttr(S, D, AL);
+ case ParsedAttr::AT_ArmBuiltinAlias:
+ handleArmBuiltinAliasAttr(S, D, AL);
break;
case ParsedAttr::AT_AcquireHandle:
- handeAcquireHandleAttr(S, D, AL);
+ handleAcquireHandleAttr(S, D, AL);
break;
case ParsedAttr::AT_ReleaseHandle:
@@ -7782,534 +7768,6 @@ static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &DD,
DD.Triggered = true;
}
-static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
- const Decl *D) {
- // Check each AvailabilityAttr to find the one for this platform.
- for (const auto *A : D->attrs()) {
- if (const auto *Avail = dyn_cast<AvailabilityAttr>(A)) {
- // FIXME: this is copied from CheckAvailability. We should try to
- // de-duplicate.
-
- // Check if this is an App Extension "platform", and if so chop off
- // the suffix for matching with the actual platform.
- StringRef ActualPlatform = Avail->getPlatform()->getName();
- StringRef RealizedPlatform = ActualPlatform;
- if (Context.getLangOpts().AppExt) {
- size_t suffix = RealizedPlatform.rfind("_app_extension");
- if (suffix != StringRef::npos)
- RealizedPlatform = RealizedPlatform.slice(0, suffix);
- }
-
- StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
-
- // Match the platform name.
- if (RealizedPlatform == TargetPlatform)
- return Avail;
- }
- }
- return nullptr;
-}
-
-/// The diagnostic we should emit for \c D, and the declaration that
-/// originated it, or \c AR_Available.
-///
-/// \param D The declaration to check.
-/// \param Message If non-null, this will be populated with the message from
-/// the availability attribute that is selected.
-/// \param ClassReceiver If we're checking the the method of a class message
-/// send, the class. Otherwise nullptr.
-static std::pair<AvailabilityResult, const NamedDecl *>
-ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
- std::string *Message,
- ObjCInterfaceDecl *ClassReceiver) {
- AvailabilityResult Result = D->getAvailability(Message);
-
- // For typedefs, if the typedef declaration appears available look
- // to the underlying type to see if it is more restrictive.
- while (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
- if (Result == AR_Available) {
- if (const auto *TT = TD->getUnderlyingType()->getAs<TagType>()) {
- D = TT->getDecl();
- Result = D->getAvailability(Message);
- continue;
- }
- }
- break;
- }
-
- // Forward class declarations get their attributes from their definition.
- if (const auto *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
- if (IDecl->getDefinition()) {
- D = IDecl->getDefinition();
- Result = D->getAvailability(Message);
- }
- }
-
- if (const auto *ECD = dyn_cast<EnumConstantDecl>(D))
- if (Result == AR_Available) {
- const DeclContext *DC = ECD->getDeclContext();
- if (const auto *TheEnumDecl = dyn_cast<EnumDecl>(DC)) {
- Result = TheEnumDecl->getAvailability(Message);
- D = TheEnumDecl;
- }
- }
-
- // For +new, infer availability from -init.
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (S.NSAPIObj && ClassReceiver) {
- ObjCMethodDecl *Init = ClassReceiver->lookupInstanceMethod(
- S.NSAPIObj->getInitSelector());
- if (Init && Result == AR_Available && MD->isClassMethod() &&
- MD->getSelector() == S.NSAPIObj->getNewSelector() &&
- MD->definedInNSObject(S.getASTContext())) {
- Result = Init->getAvailability(Message);
- D = Init;
- }
- }
- }
-
- return {Result, D};
-}
-
-
-/// whether we should emit a diagnostic for \c K and \c DeclVersion in
-/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
-/// in a deprecated context, but not the other way around.
-static bool
-ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
- VersionTuple DeclVersion, Decl *Ctx,
- const NamedDecl *OffendingDecl) {
- assert(K != AR_Available && "Expected an unavailable declaration here!");
-
- // Checks if we should emit the availability diagnostic in the context of C.
- auto CheckContext = [&](const Decl *C) {
- if (K == AR_NotYetIntroduced) {
- if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, C))
- if (AA->getIntroduced() >= DeclVersion)
- return true;
- } else if (K == AR_Deprecated) {
- if (C->isDeprecated())
- return true;
- } else if (K == AR_Unavailable) {
- // It is perfectly fine to refer to an 'unavailable' Objective-C method
- // when it is referenced from within the @implementation itself. In this
- // context, we interpret unavailable as a form of access control.
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(OffendingDecl)) {
- if (const auto *Impl = dyn_cast<ObjCImplDecl>(C)) {
- if (MD->getClassInterface() == Impl->getClassInterface())
- return true;
- }
- }
- }
-
- if (C->isUnavailable())
- return true;
- return false;
- };
-
- do {
- if (CheckContext(Ctx))
- return false;
-
- // An implementation implicitly has the availability of the interface.
- // Unless it is "+load" method.
- if (const auto *MethodD = dyn_cast<ObjCMethodDecl>(Ctx))
- if (MethodD->isClassMethod() &&
- MethodD->getSelector().getAsString() == "load")
- return true;
-
- if (const auto *CatOrImpl = dyn_cast<ObjCImplDecl>(Ctx)) {
- if (const ObjCInterfaceDecl *Interface = CatOrImpl->getClassInterface())
- if (CheckContext(Interface))
- return false;
- }
- // A category implicitly has the availability of the interface.
- else if (const auto *CatD = dyn_cast<ObjCCategoryDecl>(Ctx))
- if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
- if (CheckContext(Interface))
- return false;
- } while ((Ctx = cast_or_null<Decl>(Ctx->getDeclContext())));
-
- return true;
-}
-
-static bool
-shouldDiagnoseAvailabilityByDefault(const ASTContext &Context,
- const VersionTuple &DeploymentVersion,
- const VersionTuple &DeclVersion) {
- const auto &Triple = Context.getTargetInfo().getTriple();
- VersionTuple ForceAvailabilityFromVersion;
- switch (Triple.getOS()) {
- case llvm::Triple::IOS:
- case llvm::Triple::TvOS:
- ForceAvailabilityFromVersion = VersionTuple(/*Major=*/11);
- break;
- case llvm::Triple::WatchOS:
- ForceAvailabilityFromVersion = VersionTuple(/*Major=*/4);
- break;
- case llvm::Triple::Darwin:
- case llvm::Triple::MacOSX:
- ForceAvailabilityFromVersion = VersionTuple(/*Major=*/10, /*Minor=*/13);
- break;
- default:
- // New targets should always warn about availability.
- return Triple.getVendor() == llvm::Triple::Apple;
- }
- return DeploymentVersion >= ForceAvailabilityFromVersion ||
- DeclVersion >= ForceAvailabilityFromVersion;
-}
-
-static NamedDecl *findEnclosingDeclToAnnotate(Decl *OrigCtx) {
- for (Decl *Ctx = OrigCtx; Ctx;
- Ctx = cast_or_null<Decl>(Ctx->getDeclContext())) {
- if (isa<TagDecl>(Ctx) || isa<FunctionDecl>(Ctx) || isa<ObjCMethodDecl>(Ctx))
- return cast<NamedDecl>(Ctx);
- if (auto *CD = dyn_cast<ObjCContainerDecl>(Ctx)) {
- if (auto *Imp = dyn_cast<ObjCImplDecl>(Ctx))
- return Imp->getClassInterface();
- return CD;
- }
- }
-
- return dyn_cast<NamedDecl>(OrigCtx);
-}
-
-namespace {
-
-struct AttributeInsertion {
- StringRef Prefix;
- SourceLocation Loc;
- StringRef Suffix;
-
- static AttributeInsertion createInsertionAfter(const NamedDecl *D) {
- return {" ", D->getEndLoc(), ""};
- }
- static AttributeInsertion createInsertionAfter(SourceLocation Loc) {
- return {" ", Loc, ""};
- }
- static AttributeInsertion createInsertionBefore(const NamedDecl *D) {
- return {"", D->getBeginLoc(), "\n"};
- }
-};
-
-} // end anonymous namespace
-
-/// Tries to parse a string as ObjC method name.
-///
-/// \param Name The string to parse. Expected to originate from availability
-/// attribute argument.
-/// \param SlotNames The vector that will be populated with slot names. In case
-/// of unsuccessful parsing can contain invalid data.
-/// \returns A number of method parameters if parsing was successful, None
-/// otherwise.
-static Optional<unsigned>
-tryParseObjCMethodName(StringRef Name, SmallVectorImpl<StringRef> &SlotNames,
- const LangOptions &LangOpts) {
- // Accept replacements starting with - or + as valid ObjC method names.
- if (!Name.empty() && (Name.front() == '-' || Name.front() == '+'))
- Name = Name.drop_front(1);
- if (Name.empty())
- return None;
- Name.split(SlotNames, ':');
- unsigned NumParams;
- if (Name.back() == ':') {
- // Remove an empty string at the end that doesn't represent any slot.
- SlotNames.pop_back();
- NumParams = SlotNames.size();
- } else {
- if (SlotNames.size() != 1)
- // Not a valid method name, just a colon-separated string.
- return None;
- NumParams = 0;
- }
- // Verify all slot names are valid.
- bool AllowDollar = LangOpts.DollarIdents;
- for (StringRef S : SlotNames) {
- if (S.empty())
- continue;
- if (!isValidIdentifier(S, AllowDollar))
- return None;
- }
- return NumParams;
-}
-
-/// Returns a source location in which it's appropriate to insert a new
-/// attribute for the given declaration \D.
-static Optional<AttributeInsertion>
-createAttributeInsertion(const NamedDecl *D, const SourceManager &SM,
- const LangOptions &LangOpts) {
- if (isa<ObjCPropertyDecl>(D))
- return AttributeInsertion::createInsertionAfter(D);
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (MD->hasBody())
- return None;
- return AttributeInsertion::createInsertionAfter(D);
- }
- if (const auto *TD = dyn_cast<TagDecl>(D)) {
- SourceLocation Loc =
- Lexer::getLocForEndOfToken(TD->getInnerLocStart(), 0, SM, LangOpts);
- if (Loc.isInvalid())
- return None;
- // Insert after the 'struct'/whatever keyword.
- return AttributeInsertion::createInsertionAfter(Loc);
- }
- return AttributeInsertion::createInsertionBefore(D);
-}
-
-/// Actually emit an availability diagnostic for a reference to an unavailable
-/// decl.
-///
-/// \param Ctx The context that the reference occurred in
-/// \param ReferringDecl The exact declaration that was referenced.
-/// \param OffendingDecl A related decl to \c ReferringDecl that has an
-/// availability attribute corresponding to \c K attached to it. Note that this
-/// may not be the same as ReferringDecl, i.e. if an EnumDecl is annotated and
-/// we refer to a member EnumConstantDecl, ReferringDecl is the EnumConstantDecl
-/// and OffendingDecl is the EnumDecl.
-static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
- Decl *Ctx, const NamedDecl *ReferringDecl,
- const NamedDecl *OffendingDecl,
- StringRef Message,
- ArrayRef<SourceLocation> Locs,
- const ObjCInterfaceDecl *UnknownObjCClass,
- const ObjCPropertyDecl *ObjCProperty,
- bool ObjCPropertyAccess) {
- // Diagnostics for deprecated or unavailable.
- unsigned diag, diag_message, diag_fwdclass_message;
- unsigned diag_available_here = diag::note_availability_specified_here;
- SourceLocation NoteLocation = OffendingDecl->getLocation();
-
- // Matches 'diag::note_property_attribute' options.
- unsigned property_note_select;
-
- // Matches diag::note_availability_specified_here.
- unsigned available_here_select_kind;
-
- VersionTuple DeclVersion;
- if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl))
- DeclVersion = AA->getIntroduced();
-
- if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx,
- OffendingDecl))
- return;
-
- SourceLocation Loc = Locs.front();
-
- // The declaration can have multiple availability attributes, we are looking
- // at one of them.
- const AvailabilityAttr *A = getAttrForPlatform(S.Context, OffendingDecl);
- if (A && A->isInherited()) {
- for (const Decl *Redecl = OffendingDecl->getMostRecentDecl(); Redecl;
- Redecl = Redecl->getPreviousDecl()) {
- const AvailabilityAttr *AForRedecl =
- getAttrForPlatform(S.Context, Redecl);
- if (AForRedecl && !AForRedecl->isInherited()) {
- // If D is a declaration with inherited attributes, the note should
- // point to the declaration with actual attributes.
- NoteLocation = Redecl->getLocation();
- break;
- }
- }
- }
-
- switch (K) {
- case AR_NotYetIntroduced: {
- // We would like to emit the diagnostic even if -Wunguarded-availability is
- // not specified for deployment targets >= to iOS 11 or equivalent or
- // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
- // later.
- const AvailabilityAttr *AA =
- getAttrForPlatform(S.getASTContext(), OffendingDecl);
- VersionTuple Introduced = AA->getIntroduced();
-
- bool UseNewWarning = shouldDiagnoseAvailabilityByDefault(
- S.Context, S.Context.getTargetInfo().getPlatformMinVersion(),
- Introduced);
- unsigned Warning = UseNewWarning ? diag::warn_unguarded_availability_new
- : diag::warn_unguarded_availability;
-
- std::string PlatformName = AvailabilityAttr::getPrettyPlatformName(
- S.getASTContext().getTargetInfo().getPlatformName());
-
- S.Diag(Loc, Warning) << OffendingDecl << PlatformName
- << Introduced.getAsString();
-
- S.Diag(OffendingDecl->getLocation(),
- diag::note_partial_availability_specified_here)
- << OffendingDecl << PlatformName << Introduced.getAsString()
- << S.Context.getTargetInfo().getPlatformMinVersion().getAsString();
-
- if (const auto *Enclosing = findEnclosingDeclToAnnotate(Ctx)) {
- if (const auto *TD = dyn_cast<TagDecl>(Enclosing))
- if (TD->getDeclName().isEmpty()) {
- S.Diag(TD->getLocation(),
- diag::note_decl_unguarded_availability_silence)
- << /*Anonymous*/ 1 << TD->getKindName();
- return;
- }
- auto FixitNoteDiag =
- S.Diag(Enclosing->getLocation(),
- diag::note_decl_unguarded_availability_silence)
- << /*Named*/ 0 << Enclosing;
- // Don't offer a fixit for declarations with availability attributes.
- if (Enclosing->hasAttr<AvailabilityAttr>())
- return;
- if (!S.getPreprocessor().isMacroDefined("API_AVAILABLE"))
- return;
- Optional<AttributeInsertion> Insertion = createAttributeInsertion(
- Enclosing, S.getSourceManager(), S.getLangOpts());
- if (!Insertion)
- return;
- std::string PlatformName =
- AvailabilityAttr::getPlatformNameSourceSpelling(
- S.getASTContext().getTargetInfo().getPlatformName())
- .lower();
- std::string Introduced =
- OffendingDecl->getVersionIntroduced().getAsString();
- FixitNoteDiag << FixItHint::CreateInsertion(
- Insertion->Loc,
- (llvm::Twine(Insertion->Prefix) + "API_AVAILABLE(" + PlatformName +
- "(" + Introduced + "))" + Insertion->Suffix)
- .str());
- }
- return;
- }
- case AR_Deprecated:
- diag = !ObjCPropertyAccess ? diag::warn_deprecated
- : diag::warn_property_method_deprecated;
- diag_message = diag::warn_deprecated_message;
- diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
- property_note_select = /* deprecated */ 0;
- available_here_select_kind = /* deprecated */ 2;
- if (const auto *AL = OffendingDecl->getAttr<DeprecatedAttr>())
- NoteLocation = AL->getLocation();
- break;
-
- case AR_Unavailable:
- diag = !ObjCPropertyAccess ? diag::err_unavailable
- : diag::err_property_method_unavailable;
- diag_message = diag::err_unavailable_message;
- diag_fwdclass_message = diag::warn_unavailable_fwdclass_message;
- property_note_select = /* unavailable */ 1;
- available_here_select_kind = /* unavailable */ 0;
-
- if (auto AL = OffendingDecl->getAttr<UnavailableAttr>()) {
- if (AL->isImplicit() && AL->getImplicitReason()) {
- // Most of these failures are due to extra restrictions in ARC;
- // reflect that in the primary diagnostic when applicable.
- auto flagARCError = [&] {
- if (S.getLangOpts().ObjCAutoRefCount &&
- S.getSourceManager().isInSystemHeader(
- OffendingDecl->getLocation()))
- diag = diag::err_unavailable_in_arc;
- };
-
- switch (AL->getImplicitReason()) {
- case UnavailableAttr::IR_None: break;
-
- case UnavailableAttr::IR_ARCForbiddenType:
- flagARCError();
- diag_available_here = diag::note_arc_forbidden_type;
- break;
-
- case UnavailableAttr::IR_ForbiddenWeak:
- if (S.getLangOpts().ObjCWeakRuntime)
- diag_available_here = diag::note_arc_weak_disabled;
- else
- diag_available_here = diag::note_arc_weak_no_runtime;
- break;
-
- case UnavailableAttr::IR_ARCForbiddenConversion:
- flagARCError();
- diag_available_here = diag::note_performs_forbidden_arc_conversion;
- break;
-
- case UnavailableAttr::IR_ARCInitReturnsUnrelated:
- flagARCError();
- diag_available_here = diag::note_arc_init_returns_unrelated;
- break;
-
- case UnavailableAttr::IR_ARCFieldWithOwnership:
- flagARCError();
- diag_available_here = diag::note_arc_field_with_ownership;
- break;
- }
- }
- }
- break;
-
- case AR_Available:
- llvm_unreachable("Warning for availability of available declaration?");
- }
-
- SmallVector<FixItHint, 12> FixIts;
- if (K == AR_Deprecated) {
- StringRef Replacement;
- if (auto AL = OffendingDecl->getAttr<DeprecatedAttr>())
- Replacement = AL->getReplacement();
- if (auto AL = getAttrForPlatform(S.Context, OffendingDecl))
- Replacement = AL->getReplacement();
-
- CharSourceRange UseRange;
- if (!Replacement.empty())
- UseRange =
- CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc));
- if (UseRange.isValid()) {
- if (const auto *MethodDecl = dyn_cast<ObjCMethodDecl>(ReferringDecl)) {
- Selector Sel = MethodDecl->getSelector();
- SmallVector<StringRef, 12> SelectorSlotNames;
- Optional<unsigned> NumParams = tryParseObjCMethodName(
- Replacement, SelectorSlotNames, S.getLangOpts());
- if (NumParams && NumParams.getValue() == Sel.getNumArgs()) {
- assert(SelectorSlotNames.size() == Locs.size());
- for (unsigned I = 0; I < Locs.size(); ++I) {
- if (!Sel.getNameForSlot(I).empty()) {
- CharSourceRange NameRange = CharSourceRange::getCharRange(
- Locs[I], S.getLocForEndOfToken(Locs[I]));
- FixIts.push_back(FixItHint::CreateReplacement(
- NameRange, SelectorSlotNames[I]));
- } else
- FixIts.push_back(
- FixItHint::CreateInsertion(Locs[I], SelectorSlotNames[I]));
- }
- } else
- FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
- } else
- FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
- }
- }
-
- if (!Message.empty()) {
- S.Diag(Loc, diag_message) << ReferringDecl << Message << FixIts;
- if (ObjCProperty)
- S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
- << ObjCProperty->getDeclName() << property_note_select;
- } else if (!UnknownObjCClass) {
- S.Diag(Loc, diag) << ReferringDecl << FixIts;
- if (ObjCProperty)
- S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
- << ObjCProperty->getDeclName() << property_note_select;
- } else {
- S.Diag(Loc, diag_fwdclass_message) << ReferringDecl << FixIts;
- S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
- }
-
- S.Diag(NoteLocation, diag_available_here)
- << OffendingDecl << available_here_select_kind;
-}
-
-static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD,
- Decl *Ctx) {
- assert(DD.Kind == DelayedDiagnostic::Availability &&
- "Expected an availability diagnostic here");
-
- DD.Triggered = true;
- DoEmitAvailabilityWarning(
- S, DD.getAvailabilityResult(), Ctx, DD.getAvailabilityReferringDecl(),
- DD.getAvailabilityOffendingDecl(), DD.getAvailabilityMessage(),
- DD.getAvailabilitySelectorLocs(), DD.getUnknownObjCClass(),
- DD.getObjCProperty(), false);
-}
void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
assert(DelayedDiagnostics.getCurrentPool());
@@ -8343,7 +7801,7 @@ void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
// Don't bother giving deprecation/unavailable diagnostics if
// the decl is invalid.
if (!decl->isInvalidDecl())
- handleDelayedAvailabilityCheck(*this, diag, decl);
+ handleDelayedAvailabilityCheck(diag, decl);
break;
case DelayedDiagnostic::Access:
@@ -8373,415 +7831,3 @@ void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
assert(curPool && "re-emitting in undelayed context not supported");
curPool->steal(pool);
}
-
-static void EmitAvailabilityWarning(Sema &S, AvailabilityResult AR,
- const NamedDecl *ReferringDecl,
- const NamedDecl *OffendingDecl,
- StringRef Message,
- ArrayRef<SourceLocation> Locs,
- const ObjCInterfaceDecl *UnknownObjCClass,
- const ObjCPropertyDecl *ObjCProperty,
- bool ObjCPropertyAccess) {
- // Delay if we're currently parsing a declaration.
- if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
- S.DelayedDiagnostics.add(
- DelayedDiagnostic::makeAvailability(
- AR, Locs, ReferringDecl, OffendingDecl, UnknownObjCClass,
- ObjCProperty, Message, ObjCPropertyAccess));
- return;
- }
-
- Decl *Ctx = cast<Decl>(S.getCurLexicalContext());
- DoEmitAvailabilityWarning(S, AR, Ctx, ReferringDecl, OffendingDecl,
- Message, Locs, UnknownObjCClass, ObjCProperty,
- ObjCPropertyAccess);
-}
-
-namespace {
-
-/// Returns true if the given statement can be a body-like child of \p Parent.
-bool isBodyLikeChildStmt(const Stmt *S, const Stmt *Parent) {
- switch (Parent->getStmtClass()) {
- case Stmt::IfStmtClass:
- return cast<IfStmt>(Parent)->getThen() == S ||
- cast<IfStmt>(Parent)->getElse() == S;
- case Stmt::WhileStmtClass:
- return cast<WhileStmt>(Parent)->getBody() == S;
- case Stmt::DoStmtClass:
- return cast<DoStmt>(Parent)->getBody() == S;
- case Stmt::ForStmtClass:
- return cast<ForStmt>(Parent)->getBody() == S;
- case Stmt::CXXForRangeStmtClass:
- return cast<CXXForRangeStmt>(Parent)->getBody() == S;
- case Stmt::ObjCForCollectionStmtClass:
- return cast<ObjCForCollectionStmt>(Parent)->getBody() == S;
- case Stmt::CaseStmtClass:
- case Stmt::DefaultStmtClass:
- return cast<SwitchCase>(Parent)->getSubStmt() == S;
- default:
- return false;
- }
-}
-
-class StmtUSEFinder : public RecursiveASTVisitor<StmtUSEFinder> {
- const Stmt *Target;
-
-public:
- bool VisitStmt(Stmt *S) { return S != Target; }
-
- /// Returns true if the given statement is present in the given declaration.
- static bool isContained(const Stmt *Target, const Decl *D) {
- StmtUSEFinder Visitor;
- Visitor.Target = Target;
- return !Visitor.TraverseDecl(const_cast<Decl *>(D));
- }
-};
-
-/// Traverses the AST and finds the last statement that used a given
-/// declaration.
-class LastDeclUSEFinder : public RecursiveASTVisitor<LastDeclUSEFinder> {
- const Decl *D;
-
-public:
- bool VisitDeclRefExpr(DeclRefExpr *DRE) {
- if (DRE->getDecl() == D)
- return false;
- return true;
- }
-
- static const Stmt *findLastStmtThatUsesDecl(const Decl *D,
- const CompoundStmt *Scope) {
- LastDeclUSEFinder Visitor;
- Visitor.D = D;
- for (auto I = Scope->body_rbegin(), E = Scope->body_rend(); I != E; ++I) {
- const Stmt *S = *I;
- if (!Visitor.TraverseStmt(const_cast<Stmt *>(S)))
- return S;
- }
- return nullptr;
- }
-};
-
-/// This class implements -Wunguarded-availability.
-///
-/// This is done with a traversal of the AST of a function that makes reference
-/// to a partially available declaration. Whenever we encounter an \c if of the
-/// form: \c if(@available(...)), we use the version from the condition to visit
-/// the then statement.
-class DiagnoseUnguardedAvailability
- : public RecursiveASTVisitor<DiagnoseUnguardedAvailability> {
- typedef RecursiveASTVisitor<DiagnoseUnguardedAvailability> Base;
-
- Sema &SemaRef;
- Decl *Ctx;
-
- /// Stack of potentially nested 'if (@available(...))'s.
- SmallVector<VersionTuple, 8> AvailabilityStack;
- SmallVector<const Stmt *, 16> StmtStack;
-
- void DiagnoseDeclAvailability(NamedDecl *D, SourceRange Range,
- ObjCInterfaceDecl *ClassReceiver = nullptr);
-
-public:
- DiagnoseUnguardedAvailability(Sema &SemaRef, Decl *Ctx)
- : SemaRef(SemaRef), Ctx(Ctx) {
- AvailabilityStack.push_back(
- SemaRef.Context.getTargetInfo().getPlatformMinVersion());
- }
-
- bool TraverseDecl(Decl *D) {
- // Avoid visiting nested functions to prevent duplicate warnings.
- if (!D || isa<FunctionDecl>(D))
- return true;
- return Base::TraverseDecl(D);
- }
-
- bool TraverseStmt(Stmt *S) {
- if (!S)
- return true;
- StmtStack.push_back(S);
- bool Result = Base::TraverseStmt(S);
- StmtStack.pop_back();
- return Result;
- }
-
- void IssueDiagnostics(Stmt *S) { TraverseStmt(S); }
-
- bool TraverseIfStmt(IfStmt *If);
-
- bool TraverseLambdaExpr(LambdaExpr *E) { return true; }
-
- // for 'case X:' statements, don't bother looking at the 'X'; it can't lead
- // to any useful diagnostics.
- bool TraverseCaseStmt(CaseStmt *CS) { return TraverseStmt(CS->getSubStmt()); }
-
- bool VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *PRE) {
- if (PRE->isClassReceiver())
- DiagnoseDeclAvailability(PRE->getClassReceiver(), PRE->getReceiverLocation());
- return true;
- }
-
- bool VisitObjCMessageExpr(ObjCMessageExpr *Msg) {
- if (ObjCMethodDecl *D = Msg->getMethodDecl()) {
- ObjCInterfaceDecl *ID = nullptr;
- QualType ReceiverTy = Msg->getClassReceiver();
- if (!ReceiverTy.isNull() && ReceiverTy->getAsObjCInterfaceType())
- ID = ReceiverTy->getAsObjCInterfaceType()->getInterface();
-
- DiagnoseDeclAvailability(
- D, SourceRange(Msg->getSelectorStartLoc(), Msg->getEndLoc()), ID);
- }
- return true;
- }
-
- bool VisitDeclRefExpr(DeclRefExpr *DRE) {
- DiagnoseDeclAvailability(DRE->getDecl(),
- SourceRange(DRE->getBeginLoc(), DRE->getEndLoc()));
- return true;
- }
-
- bool VisitMemberExpr(MemberExpr *ME) {
- DiagnoseDeclAvailability(ME->getMemberDecl(),
- SourceRange(ME->getBeginLoc(), ME->getEndLoc()));
- return true;
- }
-
- bool VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
- SemaRef.Diag(E->getBeginLoc(), diag::warn_at_available_unchecked_use)
- << (!SemaRef.getLangOpts().ObjC);
- return true;
- }
-
- bool VisitTypeLoc(TypeLoc Ty);
-};
-
-void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
- NamedDecl *D, SourceRange Range, ObjCInterfaceDecl *ReceiverClass) {
- AvailabilityResult Result;
- const NamedDecl *OffendingDecl;
- std::tie(Result, OffendingDecl) =
- ShouldDiagnoseAvailabilityOfDecl(SemaRef, D, nullptr, ReceiverClass);
- if (Result != AR_Available) {
- // All other diagnostic kinds have already been handled in
- // DiagnoseAvailabilityOfDecl.
- if (Result != AR_NotYetIntroduced)
- return;
-
- const AvailabilityAttr *AA =
- getAttrForPlatform(SemaRef.getASTContext(), OffendingDecl);
- VersionTuple Introduced = AA->getIntroduced();
-
- if (AvailabilityStack.back() >= Introduced)
- return;
-
- // If the context of this function is less available than D, we should not
- // emit a diagnostic.
- if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx,
- OffendingDecl))
- return;
-
- // We would like to emit the diagnostic even if -Wunguarded-availability is
- // not specified for deployment targets >= to iOS 11 or equivalent or
- // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
- // later.
- unsigned DiagKind =
- shouldDiagnoseAvailabilityByDefault(
- SemaRef.Context,
- SemaRef.Context.getTargetInfo().getPlatformMinVersion(), Introduced)
- ? diag::warn_unguarded_availability_new
- : diag::warn_unguarded_availability;
-
- std::string PlatformName = AvailabilityAttr::getPrettyPlatformName(
- SemaRef.getASTContext().getTargetInfo().getPlatformName());
-
- SemaRef.Diag(Range.getBegin(), DiagKind)
- << Range << D << PlatformName << Introduced.getAsString();
-
- SemaRef.Diag(OffendingDecl->getLocation(),
- diag::note_partial_availability_specified_here)
- << OffendingDecl << PlatformName << Introduced.getAsString()
- << SemaRef.Context.getTargetInfo()
- .getPlatformMinVersion()
- .getAsString();
-
- auto FixitDiag =
- SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
- << Range << D
- << (SemaRef.getLangOpts().ObjC ? /*@available*/ 0
- : /*__builtin_available*/ 1);
-
- // Find the statement which should be enclosed in the if @available check.
- if (StmtStack.empty())
- return;
- const Stmt *StmtOfUse = StmtStack.back();
- const CompoundStmt *Scope = nullptr;
- for (const Stmt *S : llvm::reverse(StmtStack)) {
- if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
- Scope = CS;
- break;
- }
- if (isBodyLikeChildStmt(StmtOfUse, S)) {
- // The declaration won't be seen outside of the statement, so we don't
- // have to wrap the uses of any declared variables in if (@available).
- // Therefore we can avoid setting Scope here.
- break;
- }
- StmtOfUse = S;
- }
- const Stmt *LastStmtOfUse = nullptr;
- if (isa<DeclStmt>(StmtOfUse) && Scope) {
- for (const Decl *D : cast<DeclStmt>(StmtOfUse)->decls()) {
- if (StmtUSEFinder::isContained(StmtStack.back(), D)) {
- LastStmtOfUse = LastDeclUSEFinder::findLastStmtThatUsesDecl(D, Scope);
- break;
- }
- }
- }
-
- const SourceManager &SM = SemaRef.getSourceManager();
- SourceLocation IfInsertionLoc =
- SM.getExpansionLoc(StmtOfUse->getBeginLoc());
- SourceLocation StmtEndLoc =
- SM.getExpansionRange(
- (LastStmtOfUse ? LastStmtOfUse : StmtOfUse)->getEndLoc())
- .getEnd();
- if (SM.getFileID(IfInsertionLoc) != SM.getFileID(StmtEndLoc))
- return;
-
- StringRef Indentation = Lexer::getIndentationForLine(IfInsertionLoc, SM);
- const char *ExtraIndentation = " ";
- std::string FixItString;
- llvm::raw_string_ostream FixItOS(FixItString);
- FixItOS << "if (" << (SemaRef.getLangOpts().ObjC ? "@available"
- : "__builtin_available")
- << "("
- << AvailabilityAttr::getPlatformNameSourceSpelling(
- SemaRef.getASTContext().getTargetInfo().getPlatformName())
- << " " << Introduced.getAsString() << ", *)) {\n"
- << Indentation << ExtraIndentation;
- FixitDiag << FixItHint::CreateInsertion(IfInsertionLoc, FixItOS.str());
- SourceLocation ElseInsertionLoc = Lexer::findLocationAfterToken(
- StmtEndLoc, tok::semi, SM, SemaRef.getLangOpts(),
- /*SkipTrailingWhitespaceAndNewLine=*/false);
- if (ElseInsertionLoc.isInvalid())
- ElseInsertionLoc =
- Lexer::getLocForEndOfToken(StmtEndLoc, 0, SM, SemaRef.getLangOpts());
- FixItOS.str().clear();
- FixItOS << "\n"
- << Indentation << "} else {\n"
- << Indentation << ExtraIndentation
- << "// Fallback on earlier versions\n"
- << Indentation << "}";
- FixitDiag << FixItHint::CreateInsertion(ElseInsertionLoc, FixItOS.str());
- }
-}
-
-bool DiagnoseUnguardedAvailability::VisitTypeLoc(TypeLoc Ty) {
- const Type *TyPtr = Ty.getTypePtr();
- SourceRange Range{Ty.getBeginLoc(), Ty.getEndLoc()};
-
- if (Range.isInvalid())
- return true;
-
- if (const auto *TT = dyn_cast<TagType>(TyPtr)) {
- TagDecl *TD = TT->getDecl();
- DiagnoseDeclAvailability(TD, Range);
-
- } else if (const auto *TD = dyn_cast<TypedefType>(TyPtr)) {
- TypedefNameDecl *D = TD->getDecl();
- DiagnoseDeclAvailability(D, Range);
-
- } else if (const auto *ObjCO = dyn_cast<ObjCObjectType>(TyPtr)) {
- if (NamedDecl *D = ObjCO->getInterface())
- DiagnoseDeclAvailability(D, Range);
- }
-
- return true;
-}
-
-bool DiagnoseUnguardedAvailability::TraverseIfStmt(IfStmt *If) {
- VersionTuple CondVersion;
- if (auto *E = dyn_cast<ObjCAvailabilityCheckExpr>(If->getCond())) {
- CondVersion = E->getVersion();
-
- // If we're using the '*' case here or if this check is redundant, then we
- // use the enclosing version to check both branches.
- if (CondVersion.empty() || CondVersion <= AvailabilityStack.back())
- return TraverseStmt(If->getThen()) && TraverseStmt(If->getElse());
- } else {
- // This isn't an availability checking 'if', we can just continue.
- return Base::TraverseIfStmt(If);
- }
-
- AvailabilityStack.push_back(CondVersion);
- bool ShouldContinue = TraverseStmt(If->getThen());
- AvailabilityStack.pop_back();
-
- return ShouldContinue && TraverseStmt(If->getElse());
-}
-
-} // end anonymous namespace
-
-void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
- Stmt *Body = nullptr;
-
- if (auto *FD = D->getAsFunction()) {
- // FIXME: We only examine the pattern decl for availability violations now,
- // but we should also examine instantiated templates.
- if (FD->isTemplateInstantiation())
- return;
-
- Body = FD->getBody();
- } else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
- Body = MD->getBody();
- else if (auto *BD = dyn_cast<BlockDecl>(D))
- Body = BD->getBody();
-
- assert(Body && "Need a body here!");
-
- DiagnoseUnguardedAvailability(*this, D).IssueDiagnostics(Body);
-}
-
-void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D,
- ArrayRef<SourceLocation> Locs,
- const ObjCInterfaceDecl *UnknownObjCClass,
- bool ObjCPropertyAccess,
- bool AvoidPartialAvailabilityChecks,
- ObjCInterfaceDecl *ClassReceiver) {
- std::string Message;
- AvailabilityResult Result;
- const NamedDecl* OffendingDecl;
- // See if this declaration is unavailable, deprecated, or partial.
- std::tie(Result, OffendingDecl) =
- ShouldDiagnoseAvailabilityOfDecl(*this, D, &Message, ClassReceiver);
- if (Result == AR_Available)
- return;
-
- if (Result == AR_NotYetIntroduced) {
- if (AvoidPartialAvailabilityChecks)
- return;
-
- // We need to know the @available context in the current function to
- // diagnose this use, let DiagnoseUnguardedAvailabilityViolations do that
- // when we're done parsing the current function.
- if (getCurFunctionOrMethodDecl()) {
- getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
- return;
- } else if (getCurBlock() || getCurLambda()) {
- getCurFunction()->HasPotentialAvailabilityViolations = true;
- return;
- }
- }
-
- const ObjCPropertyDecl *ObjCPDecl = nullptr;
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) {
- AvailabilityResult PDeclResult = PD->getAvailability(nullptr);
- if (PDeclResult == Result)
- ObjCPDecl = PD;
- }
- }
-
- EmitAvailabilityWarning(*this, Result, D, OffendingDecl, Message, Locs,
- UnknownObjCClass, ObjCPDecl, ObjCPropertyAccess);
-}
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index 9916d3be77e1..22bf35dbd0cb 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -38,8 +38,9 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include <map>
#include <set>
@@ -51,102 +52,109 @@ using namespace clang;
//===----------------------------------------------------------------------===//
namespace {
- /// CheckDefaultArgumentVisitor - C++ [dcl.fct.default] Traverses
- /// the default argument of a parameter to determine whether it
- /// contains any ill-formed subexpressions. For example, this will
- /// diagnose the use of local variables or parameters within the
- /// default argument expression.
- class CheckDefaultArgumentVisitor
- : public StmtVisitor<CheckDefaultArgumentVisitor, bool> {
- Expr *DefaultArg;
- Sema *S;
+/// CheckDefaultArgumentVisitor - C++ [dcl.fct.default] Traverses
+/// the default argument of a parameter to determine whether it
+/// contains any ill-formed subexpressions. For example, this will
+/// diagnose the use of local variables or parameters within the
+/// default argument expression.
+class CheckDefaultArgumentVisitor
+ : public ConstStmtVisitor<CheckDefaultArgumentVisitor, bool> {
+ Sema &S;
+ const Expr *DefaultArg;
- public:
- CheckDefaultArgumentVisitor(Expr *defarg, Sema *s)
- : DefaultArg(defarg), S(s) {}
-
- bool VisitExpr(Expr *Node);
- bool VisitDeclRefExpr(DeclRefExpr *DRE);
- bool VisitCXXThisExpr(CXXThisExpr *ThisE);
- bool VisitLambdaExpr(LambdaExpr *Lambda);
- bool VisitPseudoObjectExpr(PseudoObjectExpr *POE);
- };
+public:
+ CheckDefaultArgumentVisitor(Sema &S, const Expr *DefaultArg)
+ : S(S), DefaultArg(DefaultArg) {}
+
+ bool VisitExpr(const Expr *Node);
+ bool VisitDeclRefExpr(const DeclRefExpr *DRE);
+ bool VisitCXXThisExpr(const CXXThisExpr *ThisE);
+ bool VisitLambdaExpr(const LambdaExpr *Lambda);
+ bool VisitPseudoObjectExpr(const PseudoObjectExpr *POE);
+};
- /// VisitExpr - Visit all of the children of this expression.
- bool CheckDefaultArgumentVisitor::VisitExpr(Expr *Node) {
- bool IsInvalid = false;
- for (Stmt *SubStmt : Node->children())
- IsInvalid |= Visit(SubStmt);
- return IsInvalid;
- }
-
- /// VisitDeclRefExpr - Visit a reference to a declaration, to
- /// determine whether this declaration can be used in the default
- /// argument expression.
- bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(DeclRefExpr *DRE) {
- NamedDecl *Decl = DRE->getDecl();
- if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(Decl)) {
- // C++ [dcl.fct.default]p9
- // Default arguments are evaluated each time the function is
- // called. The order of evaluation of function arguments is
- // unspecified. Consequently, parameters of a function shall not
- // be used in default argument expressions, even if they are not
- // evaluated. Parameters of a function declared before a default
- // argument expression are in scope and can hide namespace and
- // class member names.
- return S->Diag(DRE->getBeginLoc(),
- diag::err_param_default_argument_references_param)
+/// VisitExpr - Visit all of the children of this expression.
+bool CheckDefaultArgumentVisitor::VisitExpr(const Expr *Node) {
+ bool IsInvalid = false;
+ for (const Stmt *SubStmt : Node->children())
+ IsInvalid |= Visit(SubStmt);
+ return IsInvalid;
+}
+
+/// VisitDeclRefExpr - Visit a reference to a declaration, to
+/// determine whether this declaration can be used in the default
+/// argument expression.
+bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(const DeclRefExpr *DRE) {
+ const NamedDecl *Decl = DRE->getDecl();
+ if (const auto *Param = dyn_cast<ParmVarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p9:
+ // [...] parameters of a function shall not be used in default
+ // argument expressions, even if they are not evaluated. [...]
+ //
+ // C++17 [dcl.fct.default]p9 (by CWG 2082):
+ // [...] A parameter shall not appear as a potentially-evaluated
+ // expression in a default argument. [...]
+ //
+ if (DRE->isNonOdrUse() != NOUR_Unevaluated)
+ return S.Diag(DRE->getBeginLoc(),
+ diag::err_param_default_argument_references_param)
<< Param->getDeclName() << DefaultArg->getSourceRange();
- } else if (VarDecl *VDecl = dyn_cast<VarDecl>(Decl)) {
- // C++ [dcl.fct.default]p7
- // Local variables shall not be used in default argument
- // expressions.
- if (VDecl->isLocalVarDecl())
- return S->Diag(DRE->getBeginLoc(),
- diag::err_param_default_argument_references_local)
- << VDecl->getDeclName() << DefaultArg->getSourceRange();
- }
-
- return false;
- }
-
- /// VisitCXXThisExpr - Visit a C++ "this" expression.
- bool CheckDefaultArgumentVisitor::VisitCXXThisExpr(CXXThisExpr *ThisE) {
- // C++ [dcl.fct.default]p8:
- // The keyword this shall not be used in a default argument of a
- // member function.
- return S->Diag(ThisE->getBeginLoc(),
- diag::err_param_default_argument_references_this)
- << ThisE->getSourceRange();
+ } else if (const auto *VDecl = dyn_cast<VarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p7:
+ // Local variables shall not be used in default argument
+ // expressions.
+ //
+ // C++17 [dcl.fct.default]p7 (by CWG 2082):
+ // A local variable shall not appear as a potentially-evaluated
+ // expression in a default argument.
+ //
+ // C++20 [dcl.fct.default]p7 (DR as part of P0588R1, see also CWG 2346):
+ // Note: A local variable cannot be odr-used (6.3) in a default argument.
+ //
+ if (VDecl->isLocalVarDecl() && !DRE->isNonOdrUse())
+ return S.Diag(DRE->getBeginLoc(),
+ diag::err_param_default_argument_references_local)
+ << VDecl->getDeclName() << DefaultArg->getSourceRange();
}
- bool CheckDefaultArgumentVisitor::VisitPseudoObjectExpr(PseudoObjectExpr *POE) {
- bool Invalid = false;
- for (PseudoObjectExpr::semantics_iterator
- i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) {
- Expr *E = *i;
+ return false;
+}
- // Look through bindings.
- if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
- E = OVE->getSourceExpr();
- assert(E && "pseudo-object binding without source expression?");
- }
+/// VisitCXXThisExpr - Visit a C++ "this" expression.
+bool CheckDefaultArgumentVisitor::VisitCXXThisExpr(const CXXThisExpr *ThisE) {
+ // C++ [dcl.fct.default]p8:
+ // The keyword this shall not be used in a default argument of a
+ // member function.
+ return S.Diag(ThisE->getBeginLoc(),
+ diag::err_param_default_argument_references_this)
+ << ThisE->getSourceRange();
+}
- Invalid |= Visit(E);
+bool CheckDefaultArgumentVisitor::VisitPseudoObjectExpr(
+ const PseudoObjectExpr *POE) {
+ bool Invalid = false;
+ for (const Expr *E : POE->semantics()) {
+ // Look through bindings.
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ E = OVE->getSourceExpr();
+ assert(E && "pseudo-object binding without source expression?");
}
- return Invalid;
+
+ Invalid |= Visit(E);
}
+ return Invalid;
+}
- bool CheckDefaultArgumentVisitor::VisitLambdaExpr(LambdaExpr *Lambda) {
- // C++11 [expr.lambda.prim]p13:
- // A lambda-expression appearing in a default argument shall not
- // implicitly or explicitly capture any entity.
- if (Lambda->capture_begin() == Lambda->capture_end())
- return false;
+bool CheckDefaultArgumentVisitor::VisitLambdaExpr(const LambdaExpr *Lambda) {
+ // C++11 [expr.lambda.prim]p13:
+ // A lambda-expression appearing in a default argument shall not
+ // implicitly or explicitly capture any entity.
+ if (Lambda->capture_begin() == Lambda->capture_end())
+ return false;
- return S->Diag(Lambda->getBeginLoc(), diag::err_lambda_capture_default_arg);
- }
+ return S.Diag(Lambda->getBeginLoc(), diag::err_lambda_capture_default_arg);
}
+} // namespace
void
Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
@@ -246,14 +254,12 @@ void Sema::ImplicitExceptionSpecification::CalledStmt(Stmt *S) {
ComputedEST = EST_None;
}
-bool
-Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
- SourceLocation EqualLoc) {
+ExprResult Sema::ConvertParamDefaultArgument(const ParmVarDecl *Param,
+ Expr *Arg,
+ SourceLocation EqualLoc) {
if (RequireCompleteType(Param->getLocation(), Param->getType(),
- diag::err_typecheck_decl_incomplete_type)) {
- Param->setInvalidDecl();
+ diag::err_typecheck_decl_incomplete_type))
return true;
- }
// C++ [dcl.fct.default]p5
// A default argument expression is implicitly converted (clause
@@ -274,7 +280,12 @@ Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
CheckCompletedExpr(Arg, EqualLoc);
Arg = MaybeCreateExprWithCleanups(Arg);
- // Okay: add the default argument to the parameter
+ return Arg;
+}
+
+void Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
+ SourceLocation EqualLoc) {
+ // Add the default argument to the parameter
Param->setDefaultArg(Arg);
// We have already instantiated this parameter; provide each of the
@@ -288,8 +299,6 @@ Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
// We're done tracking this parameter's instantiations.
UnparsedDefaultArgInstantiations.erase(InstPos);
}
-
- return false;
}
/// ActOnParamDefaultArgument - Check whether the default argument
@@ -304,18 +313,22 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
ParmVarDecl *Param = cast<ParmVarDecl>(param);
UnparsedDefaultArgLocs.erase(Param);
+ auto Fail = [&] {
+ Param->setInvalidDecl();
+ Param->setDefaultArg(new (Context) OpaqueValueExpr(
+ EqualLoc, Param->getType().getNonReferenceType(), VK_RValue));
+ };
+
// Default arguments are only permitted in C++
if (!getLangOpts().CPlusPlus) {
Diag(EqualLoc, diag::err_param_default_argument)
<< DefaultArg->getSourceRange();
- Param->setInvalidDecl();
- return;
+ return Fail();
}
// Check for unexpanded parameter packs.
if (DiagnoseUnexpandedParameterPack(DefaultArg, UPPC_DefaultArgument)) {
- Param->setInvalidDecl();
- return;
+ return Fail();
}
// C++11 [dcl.fct.default]p3
@@ -324,15 +337,21 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
if (Param->isParameterPack()) {
Diag(EqualLoc, diag::err_param_default_argument_on_parameter_pack)
<< DefaultArg->getSourceRange();
+ // Recover by discarding the default argument.
+ Param->setDefaultArg(nullptr);
return;
}
+ ExprResult Result = ConvertParamDefaultArgument(Param, DefaultArg, EqualLoc);
+ if (Result.isInvalid())
+ return Fail();
+
+ DefaultArg = Result.getAs<Expr>();
+
// Check that the default argument is well-formed
- CheckDefaultArgumentVisitor DefaultArgChecker(DefaultArg, this);
- if (DefaultArgChecker.Visit(DefaultArg)) {
- Param->setInvalidDecl();
- return;
- }
+ CheckDefaultArgumentVisitor DefaultArgChecker(*this, DefaultArg);
+ if (DefaultArgChecker.Visit(DefaultArg))
+ return Fail();
SetParamDefaultArgument(Param, DefaultArg, EqualLoc);
}
@@ -419,14 +438,9 @@ void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
}
static bool functionDeclHasDefaultArgument(const FunctionDecl *FD) {
- for (unsigned NumParams = FD->getNumParams(); NumParams > 0; --NumParams) {
- const ParmVarDecl *PVD = FD->getParamDecl(NumParams-1);
- if (!PVD->hasDefaultArg())
- return false;
- if (!PVD->hasInheritedDefaultArg())
- return true;
- }
- return false;
+ return std::any_of(FD->param_begin(), FD->param_end(), [](ParmVarDecl *P) {
+ return P->hasDefaultArg() && !P->hasInheritedDefaultArg();
+ });
}
/// MergeCXXFunctionDecl - Merge two declarations of the same C++
@@ -664,7 +678,7 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
// for the same class template shall not have equivalent
// parameter-declaration-clauses.
if (isa<CXXDeductionGuideDecl>(New) &&
- !New->isFunctionTemplateSpecialization()) {
+ !New->isFunctionTemplateSpecialization() && isVisible(Old)) {
Diag(New->getLocation(), diag::err_deduction_guide_redeclared);
Diag(Old->getLocation(), diag::note_previous_declaration);
}
@@ -761,7 +775,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
Err << SourceRange(Loc, Loc);
} else if (!CPlusPlus20Specifiers.empty()) {
auto &&Warn = Diag(CPlusPlus20SpecifierLocs.front(),
- getLangOpts().CPlusPlus2a
+ getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_decomp_decl_spec
: diag::ext_decomp_decl_spec);
Warn << (int)CPlusPlus20Specifiers.size()
@@ -778,7 +792,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
// C++2a [dcl.struct.bind]p1:
// A cv that includes volatile is deprecated
if ((DS.getTypeQualifiers() & DeclSpec::TQ_volatile) &&
- getLangOpts().CPlusPlus2a)
+ getLangOpts().CPlusPlus20)
Diag(DS.getVolatileSpecLoc(),
diag::warn_deprecated_volatile_structured_binding);
@@ -952,7 +966,7 @@ static std::string printTemplateArgs(const PrintingPolicy &PrintingPolicy,
Arg.getArgument().print(PrintingPolicy, OS);
First = false;
}
- return OS.str();
+ return std::string(OS.str());
}
static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
@@ -1052,7 +1066,7 @@ static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
TemplateArgumentListInfo &Args;
ICEDiagnoser(LookupResult &R, TemplateArgumentListInfo &Args)
: R(R), Args(Args) {}
- void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) {
+ void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override {
S.Diag(Loc, diag::err_decomp_decl_std_tuple_size_not_constant)
<< printTemplateArgs(S.Context.getPrintingPolicy(), Args);
}
@@ -1100,16 +1114,17 @@ static QualType getTupleLikeElementType(Sema &S, SourceLocation Loc,
}
namespace {
-struct BindingDiagnosticTrap {
+struct InitializingBinding {
Sema &S;
- DiagnosticErrorTrap Trap;
- BindingDecl *BD;
-
- BindingDiagnosticTrap(Sema &S, BindingDecl *BD)
- : S(S), Trap(S.Diags), BD(BD) {}
- ~BindingDiagnosticTrap() {
- if (Trap.hasErrorOccurred())
- S.Diag(BD->getLocation(), diag::note_in_binding_decl_init) << BD;
+ InitializingBinding(Sema &S, BindingDecl *BD) : S(S) {
+ Sema::CodeSynthesisContext Ctx;
+ Ctx.Kind = Sema::CodeSynthesisContext::InitializingStructuredBinding;
+ Ctx.PointOfInstantiation = BD->getLocation();
+ Ctx.Entity = BD;
+ S.pushCodeSynthesisContext(Ctx);
+ }
+ ~InitializingBinding() {
+ S.popCodeSynthesisContext();
}
};
}
@@ -1158,7 +1173,7 @@ static bool checkTupleLikeDecomposition(Sema &S,
unsigned I = 0;
for (auto *B : Bindings) {
- BindingDiagnosticTrap Trap(S, B);
+ InitializingBinding InitContext(S, B);
SourceLocation Loc = B->getLocation();
ExprResult E = S.BuildDeclRefExpr(Src, DecompType, VK_LValue, Loc);
@@ -1528,25 +1543,34 @@ void Sema::MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old) {
/// [dcl.fct.default].
void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
unsigned NumParams = FD->getNumParams();
- unsigned p;
+ unsigned ParamIdx = 0;
+
+ // This checking doesn't make sense for explicit specializations; their
+ // default arguments are determined by the declaration we're specializing,
+ // not by FD.
+ if (FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ return;
+ if (auto *FTD = FD->getDescribedFunctionTemplate())
+ if (FTD->isMemberSpecialization())
+ return;
// Find first parameter with a default argument
- for (p = 0; p < NumParams; ++p) {
- ParmVarDecl *Param = FD->getParamDecl(p);
+ for (; ParamIdx < NumParams; ++ParamIdx) {
+ ParmVarDecl *Param = FD->getParamDecl(ParamIdx);
if (Param->hasDefaultArg())
break;
}
- // C++11 [dcl.fct.default]p4:
+ // C++20 [dcl.fct.default]p4:
// In a given function declaration, each parameter subsequent to a parameter
// with a default argument shall have a default argument supplied in this or
- // a previous declaration or shall be a function parameter pack. A default
- // argument shall not be redefined by a later declaration (not even to the
- // same value).
- unsigned LastMissingDefaultArg = 0;
- for (; p < NumParams; ++p) {
- ParmVarDecl *Param = FD->getParamDecl(p);
- if (!Param->hasDefaultArg() && !Param->isParameterPack()) {
+ // a previous declaration, unless the parameter was expanded from a
+ // parameter pack, or shall be a function parameter pack.
+ for (; ParamIdx < NumParams; ++ParamIdx) {
+ ParmVarDecl *Param = FD->getParamDecl(ParamIdx);
+ if (!Param->hasDefaultArg() && !Param->isParameterPack() &&
+ !(CurrentInstantiationScope &&
+ CurrentInstantiationScope->isLocalPackExpansion(Param))) {
if (Param->isInvalidDecl())
/* We already complained about this parameter. */;
else if (Param->getIdentifier())
@@ -1556,21 +1580,6 @@ void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
else
Diag(Param->getLocation(),
diag::err_param_default_argument_missing);
-
- LastMissingDefaultArg = p;
- }
- }
-
- if (LastMissingDefaultArg > 0) {
- // Some default arguments were missing. Clear out all of the
- // default arguments up to (and including) the last missing
- // default argument, so that we leave the function parameters
- // in a semantically valid state.
- for (p = 0; p <= LastMissingDefaultArg; ++p) {
- ParmVarDecl *Param = FD->getParamDecl(p);
- if (Param->hasDefaultArg()) {
- Param->setDefaultArg(nullptr);
- }
}
}
}
@@ -1716,7 +1725,7 @@ bool Sema::CheckConstexprFunctionDefinition(const FunctionDecl *NewFD,
// - it shall not be virtual; (removed in C++20)
const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD);
if (Method && Method->isVirtual()) {
- if (getLangOpts().CPlusPlus2a) {
+ if (getLangOpts().CPlusPlus20) {
if (Kind == CheckConstexprKind::Diagnose)
Diag(Method->getLocation(), diag::warn_cxx17_compat_constexpr_virtual);
} else {
@@ -1856,11 +1865,11 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
if (Kind == Sema::CheckConstexprKind::Diagnose) {
SemaRef.Diag(
VD->getLocation(),
- SemaRef.getLangOpts().CPlusPlus2a
+ SemaRef.getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_constexpr_local_var_no_init
: diag::ext_constexpr_local_var_no_init)
<< isa<CXXConstructorDecl>(Dcl);
- } else if (!SemaRef.getLangOpts().CPlusPlus2a) {
+ } else if (!SemaRef.getLangOpts().CPlusPlus20) {
return false;
}
continue;
@@ -1919,7 +1928,7 @@ static bool CheckConstexprCtorInitializer(Sema &SemaRef,
Sema::CheckConstexprKind Kind) {
// In C++20 onwards, there's nothing to check for validity.
if (Kind == Sema::CheckConstexprKind::CheckValid &&
- SemaRef.getLangOpts().CPlusPlus2a)
+ SemaRef.getLangOpts().CPlusPlus20)
return true;
if (Field->isInvalidDecl())
@@ -1941,14 +1950,14 @@ static bool CheckConstexprCtorInitializer(Sema &SemaRef,
if (Kind == Sema::CheckConstexprKind::Diagnose) {
if (!Diagnosed) {
SemaRef.Diag(Dcl->getLocation(),
- SemaRef.getLangOpts().CPlusPlus2a
+ SemaRef.getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_constexpr_ctor_missing_init
: diag::ext_constexpr_ctor_missing_init);
Diagnosed = true;
}
SemaRef.Diag(Field->getLocation(),
diag::note_constexpr_ctor_missing_init);
- } else if (!SemaRef.getLangOpts().CPlusPlus2a) {
+ } else if (!SemaRef.getLangOpts().CPlusPlus20) {
return false;
}
} else if (Field->isAnonymousStructOrUnion()) {
@@ -2132,14 +2141,14 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
// apply the general constexpr rules.
switch (Kind) {
case Sema::CheckConstexprKind::CheckValid:
- if (!SemaRef.getLangOpts().CPlusPlus2a)
+ if (!SemaRef.getLangOpts().CPlusPlus20)
return false;
break;
case Sema::CheckConstexprKind::Diagnose:
SemaRef.Diag(Body->getBeginLoc(),
- !SemaRef.getLangOpts().CPlusPlus2a
- ? diag::ext_constexpr_function_try_block_cxx2a
+ !SemaRef.getLangOpts().CPlusPlus20
+ ? diag::ext_constexpr_function_try_block_cxx20
: diag::warn_cxx17_compat_constexpr_function_try_block)
<< isa<CXXConstructorDecl>(Dcl);
break;
@@ -2162,14 +2171,14 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
if (Kind == Sema::CheckConstexprKind::CheckValid) {
// If this is only valid as an extension, report that we don't satisfy the
// constraints of the current language.
- if ((Cxx2aLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus2a) ||
+ if ((Cxx2aLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus20) ||
(Cxx1yLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus17))
return false;
} else if (Cxx2aLoc.isValid()) {
SemaRef.Diag(Cxx2aLoc,
- SemaRef.getLangOpts().CPlusPlus2a
+ SemaRef.getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_constexpr_body_invalid_stmt
- : diag::ext_constexpr_body_invalid_stmt_cxx2a)
+ : diag::ext_constexpr_body_invalid_stmt_cxx20)
<< isa<CXXConstructorDecl>(Dcl);
} else if (Cxx1yLoc.isValid()) {
SemaRef.Diag(Cxx1yLoc,
@@ -2194,10 +2203,10 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
if (Kind == Sema::CheckConstexprKind::Diagnose) {
SemaRef.Diag(
Dcl->getLocation(),
- SemaRef.getLangOpts().CPlusPlus2a
+ SemaRef.getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_constexpr_union_ctor_no_init
: diag::ext_constexpr_union_ctor_no_init);
- } else if (!SemaRef.getLangOpts().CPlusPlus2a) {
+ } else if (!SemaRef.getLangOpts().CPlusPlus20) {
return false;
}
}
@@ -2306,7 +2315,7 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
!Expr::isPotentialConstantExpr(Dcl, Diags)) {
SemaRef.Diag(Dcl->getLocation(),
diag::ext_constexpr_function_never_constant_expr)
- << isa<CXXConstructorDecl>(Dcl);
+ << isa<CXXConstructorDecl>(Dcl) << Dcl->isConsteval();
for (size_t I = 0, N = Diags.size(); I != N; ++I)
SemaRef.Diag(Diags[I].first, Diags[I].second);
// Don't return false here: we allow this for compatibility in
@@ -2417,7 +2426,10 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc) {
QualType BaseType = TInfo->getType();
-
+ if (BaseType->containsErrors()) {
+ // Already emitted a diagnostic when parsing the error type.
+ return nullptr;
+ }
// C++ [class.union]p1:
// A union shall not have base classes.
if (Class->isUnion()) {
@@ -2821,13 +2833,13 @@ void Sema::BuildBasePathArray(const CXXBasePaths &Paths,
/// if there is an error, and Range is the source range to highlight
/// if there is an error.
///
-/// If either InaccessibleBaseID or AmbigiousBaseConvID are 0, then the
+/// If either InaccessibleBaseID or AmbiguousBaseConvID are 0, then the
/// diagnostic for the respective type of error will be suppressed, but the
/// check for ill-formed code will still be performed.
bool
Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
- unsigned AmbigiousBaseConvID,
+ unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
@@ -2853,7 +2865,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
for (const CXXBasePath &PossiblePath : Paths) {
if (PossiblePath.size() == 1) {
Path = &PossiblePath;
- if (AmbigiousBaseConvID)
+ if (AmbiguousBaseConvID)
Diag(Loc, diag::ext_ms_ambiguous_direct_base)
<< Base << Derived << Range;
break;
@@ -2881,7 +2893,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
return false;
}
- if (AmbigiousBaseConvID) {
+ if (AmbiguousBaseConvID) {
// We know that the derived-to-base conversion is ambiguous, and
// we're going to produce a diagnostic. Perform the derived-to-base
// search just one more time to compute all of the possible paths so
@@ -2900,7 +2912,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
// to each base class subobject.
std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths);
- Diag(Loc, AmbigiousBaseConvID)
+ Diag(Loc, AmbiguousBaseConvID)
<< Derived << Base << PathDisplayStr << Range << Name;
}
return true;
@@ -3033,7 +3045,7 @@ void Sema::CheckOverrideControl(NamedDecl *D) {
<< MD->getDeclName();
}
-void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D) {
+void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent) {
if (D->isInvalidDecl() || D->hasAttr<OverrideAttr>())
return;
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D);
@@ -3049,12 +3061,22 @@ void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D) {
return;
if (MD->size_overridden_methods() > 0) {
- unsigned DiagID = isa<CXXDestructorDecl>(MD)
- ? diag::warn_destructor_marked_not_override_overriding
- : diag::warn_function_marked_not_override_overriding;
- Diag(MD->getLocation(), DiagID) << MD->getDeclName();
- const CXXMethodDecl *OMD = *MD->begin_overridden_methods();
- Diag(OMD->getLocation(), diag::note_overridden_virtual_function);
+ auto EmitDiag = [&](unsigned DiagInconsistent, unsigned DiagSuggest) {
+ unsigned DiagID =
+ Inconsistent && !Diags.isIgnored(DiagInconsistent, MD->getLocation())
+ ? DiagInconsistent
+ : DiagSuggest;
+ Diag(MD->getLocation(), DiagID) << MD->getDeclName();
+ const CXXMethodDecl *OMD = *MD->begin_overridden_methods();
+ Diag(OMD->getLocation(), diag::note_overridden_virtual_function);
+ };
+ if (isa<CXXDestructorDecl>(MD))
+ EmitDiag(
+ diag::warn_inconsistent_destructor_marked_not_override_overriding,
+ diag::warn_suggest_destructor_marked_not_override_overriding);
+ else
+ EmitDiag(diag::warn_inconsistent_function_marked_not_override_overriding,
+ diag::warn_suggest_function_marked_not_override_overriding);
}
}
@@ -5443,6 +5465,15 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
// subobjects.
bool VisitVirtualBases = !ClassDecl->isAbstract();
+ // If the destructor exists and has already been marked used in the MS ABI,
+ // then virtual base destructors have already been checked and marked used.
+ // Skip checking them again to avoid duplicate diagnostics.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ CXXDestructorDecl *Dtor = ClassDecl->getDestructor();
+ if (Dtor && Dtor->isUsed())
+ VisitVirtualBases = false;
+ }
+
llvm::SmallPtrSet<const RecordType *, 8> DirectVirtualBases;
// Bases.
@@ -5477,16 +5508,21 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
DiagnoseUseOfDecl(Dtor, Location);
}
- if (!VisitVirtualBases)
- return;
+ if (VisitVirtualBases)
+ MarkVirtualBaseDestructorsReferenced(Location, ClassDecl,
+ &DirectVirtualBases);
+}
+void Sema::MarkVirtualBaseDestructorsReferenced(
+ SourceLocation Location, CXXRecordDecl *ClassDecl,
+ llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases) {
// Virtual bases.
for (const auto &VBase : ClassDecl->vbases()) {
// Bases are always records in a well-formed non-dependent class.
const RecordType *RT = VBase.getType()->castAs<RecordType>();
- // Ignore direct virtual bases.
- if (DirectVirtualBases.count(RT))
+ // Ignore already visited direct virtual bases.
+ if (DirectVirtualBases && DirectVirtualBases->count(RT))
continue;
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl());
@@ -5788,6 +5824,23 @@ static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) {
// declaration.
return;
+ // Add a context note to explain how we got to any diagnostics produced below.
+ struct MarkingClassDllexported {
+ Sema &S;
+ MarkingClassDllexported(Sema &S, CXXRecordDecl *Class,
+ SourceLocation AttrLoc)
+ : S(S) {
+ Sema::CodeSynthesisContext Ctx;
+ Ctx.Kind = Sema::CodeSynthesisContext::MarkingClassDllexported;
+ Ctx.PointOfInstantiation = AttrLoc;
+ Ctx.Entity = Class;
+ S.pushCodeSynthesisContext(Ctx);
+ }
+ ~MarkingClassDllexported() {
+ S.popCodeSynthesisContext();
+ }
+ } MarkingDllexportedContext(S, Class, ClassAttr->getLocation());
+
if (S.Context.getTargetInfo().getTriple().isWindowsGNUEnvironment())
S.MarkVTableUsed(Class->getLocation(), Class, true);
@@ -5823,13 +5876,7 @@ static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) {
// defaulted methods, and the copy and move assignment operators. The
// latter are exported even if they are trivial, because the address of
// an operator can be taken and should compare equal across libraries.
- DiagnosticErrorTrap Trap(S.Diags);
S.MarkFunctionReferenced(Class->getLocation(), MD);
- if (Trap.hasErrorOccurred()) {
- S.Diag(ClassAttr->getLocation(), diag::note_due_to_dllexported_class)
- << Class << !S.getLangOpts().CPlusPlus11;
- break;
- }
// There is no later point when we will see the definition of this
// function, so pass it to the consumer now.
@@ -5877,6 +5924,123 @@ static void checkForMultipleExportedDefaultConstructors(Sema &S,
}
}
+static void checkCUDADeviceBuiltinSurfaceClassTemplate(Sema &S,
+ CXXRecordDecl *Class) {
+ bool ErrorReported = false;
+ auto reportIllegalClassTemplate = [&ErrorReported](Sema &S,
+ ClassTemplateDecl *TD) {
+ if (ErrorReported)
+ return;
+ S.Diag(TD->getLocation(),
+ diag::err_cuda_device_builtin_surftex_cls_template)
+ << /*surface*/ 0 << TD;
+ ErrorReported = true;
+ };
+
+ ClassTemplateDecl *TD = Class->getDescribedClassTemplate();
+ if (!TD) {
+ auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(Class);
+ if (!SD) {
+ S.Diag(Class->getLocation(),
+ diag::err_cuda_device_builtin_surftex_ref_decl)
+ << /*surface*/ 0 << Class;
+ S.Diag(Class->getLocation(),
+ diag::note_cuda_device_builtin_surftex_should_be_template_class)
+ << Class;
+ return;
+ }
+ TD = SD->getSpecializedTemplate();
+ }
+
+ TemplateParameterList *Params = TD->getTemplateParameters();
+ unsigned N = Params->size();
+
+ if (N != 2) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_n_args)
+ << TD << 2;
+ }
+ if (N > 0 && !isa<TemplateTypeParmDecl>(Params->getParam(0))) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*1st*/ 0 << /*type*/ 0;
+ }
+ if (N > 1) {
+ auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(1));
+ if (!NTTP || !NTTP->getType()->isIntegralOrEnumerationType()) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*2nd*/ 1 << /*integer*/ 1;
+ }
+ }
+}
+
+static void checkCUDADeviceBuiltinTextureClassTemplate(Sema &S,
+ CXXRecordDecl *Class) {
+ bool ErrorReported = false;
+ auto reportIllegalClassTemplate = [&ErrorReported](Sema &S,
+ ClassTemplateDecl *TD) {
+ if (ErrorReported)
+ return;
+ S.Diag(TD->getLocation(),
+ diag::err_cuda_device_builtin_surftex_cls_template)
+ << /*texture*/ 1 << TD;
+ ErrorReported = true;
+ };
+
+ ClassTemplateDecl *TD = Class->getDescribedClassTemplate();
+ if (!TD) {
+ auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(Class);
+ if (!SD) {
+ S.Diag(Class->getLocation(),
+ diag::err_cuda_device_builtin_surftex_ref_decl)
+ << /*texture*/ 1 << Class;
+ S.Diag(Class->getLocation(),
+ diag::note_cuda_device_builtin_surftex_should_be_template_class)
+ << Class;
+ return;
+ }
+ TD = SD->getSpecializedTemplate();
+ }
+
+ TemplateParameterList *Params = TD->getTemplateParameters();
+ unsigned N = Params->size();
+
+ if (N != 3) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_n_args)
+ << TD << 3;
+ }
+ if (N > 0 && !isa<TemplateTypeParmDecl>(Params->getParam(0))) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*1st*/ 0 << /*type*/ 0;
+ }
+ if (N > 1) {
+ auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(1));
+ if (!NTTP || !NTTP->getType()->isIntegralOrEnumerationType()) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*2nd*/ 1 << /*integer*/ 1;
+ }
+ }
+ if (N > 2) {
+ auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(2));
+ if (!NTTP || !NTTP->getType()->isIntegralOrEnumerationType()) {
+ reportIllegalClassTemplate(S, TD);
+ S.Diag(TD->getLocation(),
+ diag::note_cuda_device_builtin_surftex_cls_should_have_match_arg)
+ << TD << /*3rd*/ 2 << /*integer*/ 1;
+ }
+ }
+}
+
void Sema::checkClassLevelCodeSegAttribute(CXXRecordDecl *Class) {
// Mark any compiler-generated routines with the implicit code_seg attribute.
for (auto *Method : Class->methods()) {
@@ -6151,7 +6315,7 @@ Sema::getDefaultedFunctionKind(const FunctionDecl *FD) {
case OO_Spaceship:
// No point allowing this if <=> doesn't exist in the current language mode.
- if (!getLangOpts().CPlusPlus2a)
+ if (!getLangOpts().CPlusPlus20)
break;
return DefaultedComparisonKind::ThreeWay;
@@ -6160,7 +6324,7 @@ Sema::getDefaultedFunctionKind(const FunctionDecl *FD) {
case OO_Greater:
case OO_GreaterEqual:
// No point allowing this if <=> doesn't exist in the current language mode.
- if (!getLangOpts().CPlusPlus2a)
+ if (!getLangOpts().CPlusPlus20)
break;
return DefaultedComparisonKind::Relational;
@@ -6172,27 +6336,31 @@ Sema::getDefaultedFunctionKind(const FunctionDecl *FD) {
return DefaultedFunctionKind();
}
-static void DefineImplicitSpecialMember(Sema &S, CXXMethodDecl *MD,
- SourceLocation DefaultLoc) {
- switch (S.getSpecialMember(MD)) {
+static void DefineDefaultedFunction(Sema &S, FunctionDecl *FD,
+ SourceLocation DefaultLoc) {
+ Sema::DefaultedFunctionKind DFK = S.getDefaultedFunctionKind(FD);
+ if (DFK.isComparison())
+ return S.DefineDefaultedComparison(DefaultLoc, FD, DFK.asComparison());
+
+ switch (DFK.asSpecialMember()) {
case Sema::CXXDefaultConstructor:
S.DefineImplicitDefaultConstructor(DefaultLoc,
- cast<CXXConstructorDecl>(MD));
+ cast<CXXConstructorDecl>(FD));
break;
case Sema::CXXCopyConstructor:
- S.DefineImplicitCopyConstructor(DefaultLoc, cast<CXXConstructorDecl>(MD));
+ S.DefineImplicitCopyConstructor(DefaultLoc, cast<CXXConstructorDecl>(FD));
break;
case Sema::CXXCopyAssignment:
- S.DefineImplicitCopyAssignment(DefaultLoc, MD);
+ S.DefineImplicitCopyAssignment(DefaultLoc, cast<CXXMethodDecl>(FD));
break;
case Sema::CXXDestructor:
- S.DefineImplicitDestructor(DefaultLoc, cast<CXXDestructorDecl>(MD));
+ S.DefineImplicitDestructor(DefaultLoc, cast<CXXDestructorDecl>(FD));
break;
case Sema::CXXMoveConstructor:
- S.DefineImplicitMoveConstructor(DefaultLoc, cast<CXXConstructorDecl>(MD));
+ S.DefineImplicitMoveConstructor(DefaultLoc, cast<CXXConstructorDecl>(FD));
break;
case Sema::CXXMoveAssignment:
- S.DefineImplicitMoveAssignment(DefaultLoc, MD);
+ S.DefineImplicitMoveAssignment(DefaultLoc, cast<CXXMethodDecl>(FD));
break;
case Sema::CXXInvalid:
llvm_unreachable("Invalid special member.");
@@ -6313,6 +6481,27 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
return HasNonDeletedCopyOrMove;
}
+/// Report an error regarding overriding, along with any relevant
+/// overridden methods.
+///
+/// \param DiagID the primary error to report.
+/// \param MD the overriding method.
+static bool
+ReportOverrides(Sema &S, unsigned DiagID, const CXXMethodDecl *MD,
+ llvm::function_ref<bool(const CXXMethodDecl *)> Report) {
+ bool IssuedDiagnostic = false;
+ for (const CXXMethodDecl *O : MD->overridden_methods()) {
+ if (Report(O)) {
+ if (!IssuedDiagnostic) {
+ S.Diag(MD->getLocation(), DiagID) << MD->getDeclName();
+ IssuedDiagnostic = true;
+ }
+ S.Diag(O->getLocation(), diag::note_overridden_virtual_function);
+ }
+ }
+ return IssuedDiagnostic;
+}
+
/// Perform semantic checks on a class definition that has been
/// completing, introducing implicitly-declared members, checking for
/// abstract types, etc.
@@ -6427,21 +6616,64 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// primary comparison functions (==, <=>).
llvm::SmallVector<FunctionDecl*, 5> DefaultedSecondaryComparisons;
- auto CheckForDefaultedFunction = [&](FunctionDecl *FD) {
- if (!FD || FD->isInvalidDecl() || !FD->isExplicitlyDefaulted())
+ // Perform checks that can't be done until we know all the properties of a
+ // member function (whether it's defaulted, deleted, virtual, overriding,
+ // ...).
+ auto CheckCompletedMemberFunction = [&](CXXMethodDecl *MD) {
+ // A static function cannot override anything.
+ if (MD->getStorageClass() == SC_Static) {
+ if (ReportOverrides(*this, diag::err_static_overrides_virtual, MD,
+ [](const CXXMethodDecl *) { return true; }))
+ return;
+ }
+
+ // A deleted function cannot override a non-deleted function and vice
+ // versa.
+ if (ReportOverrides(*this,
+ MD->isDeleted() ? diag::err_deleted_override
+ : diag::err_non_deleted_override,
+ MD, [&](const CXXMethodDecl *V) {
+ return MD->isDeleted() != V->isDeleted();
+ })) {
+ if (MD->isDefaulted() && MD->isDeleted())
+ // Explain why this defaulted function was deleted.
+ DiagnoseDeletedDefaultedFunction(MD);
return;
+ }
+
+ // A consteval function cannot override a non-consteval function and vice
+ // versa.
+ if (ReportOverrides(*this,
+ MD->isConsteval() ? diag::err_consteval_override
+ : diag::err_non_consteval_override,
+ MD, [&](const CXXMethodDecl *V) {
+ return MD->isConsteval() != V->isConsteval();
+ })) {
+ if (MD->isDefaulted() && MD->isDeleted())
+ // Explain why this defaulted function was deleted.
+ DiagnoseDeletedDefaultedFunction(MD);
+ return;
+ }
+ };
+
+ auto CheckForDefaultedFunction = [&](FunctionDecl *FD) -> bool {
+ if (!FD || FD->isInvalidDecl() || !FD->isExplicitlyDefaulted())
+ return false;
DefaultedFunctionKind DFK = getDefaultedFunctionKind(FD);
if (DFK.asComparison() == DefaultedComparisonKind::NotEqual ||
- DFK.asComparison() == DefaultedComparisonKind::Relational)
+ DFK.asComparison() == DefaultedComparisonKind::Relational) {
DefaultedSecondaryComparisons.push_back(FD);
- else
- CheckExplicitlyDefaultedFunction(S, FD);
+ return true;
+ }
+
+ CheckExplicitlyDefaultedFunction(S, FD);
+ return false;
};
auto CompleteMemberFunction = [&](CXXMethodDecl *M) {
// Check whether the explicitly-defaulted members are valid.
- CheckForDefaultedFunction(M);
+ bool Incomplete = CheckForDefaultedFunction(M);
// Skip the rest of the checks for a member of a dependent class.
if (Record->isDependentType())
@@ -6488,7 +6720,10 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// function right away.
// FIXME: We can defer doing this until the vtable is marked as used.
if (M->isDefaulted() && M->isConstexpr() && M->size_overridden_methods())
- DefineImplicitSpecialMember(*this, M, M->getLocation());
+ DefineDefaultedFunction(*this, M, M->getLocation());
+
+ if (!Incomplete)
+ CheckCompletedMemberFunction(M);
};
// Check the destructor before any other member function. We need to
@@ -6524,19 +6759,21 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
}
}
- if (HasMethodWithOverrideControl &&
- HasOverridingMethodWithoutOverrideControl) {
- // At least one method has the 'override' control declared.
- // Diagnose all other overridden methods which do not have 'override'
- // specified on them.
+ if (HasOverridingMethodWithoutOverrideControl) {
+ bool HasInconsistentOverrideControl = HasMethodWithOverrideControl;
for (auto *M : Record->methods())
- DiagnoseAbsenceOfOverrideControl(M);
+ DiagnoseAbsenceOfOverrideControl(M, HasInconsistentOverrideControl);
}
// Check the defaulted secondary comparisons after any other member functions.
- for (FunctionDecl *FD : DefaultedSecondaryComparisons)
+ for (FunctionDecl *FD : DefaultedSecondaryComparisons) {
CheckExplicitlyDefaultedFunction(S, FD);
+ // If this is a member function, we deferred checking it until now.
+ if (auto *MD = dyn_cast<CXXMethodDecl>(FD))
+ CheckCompletedMemberFunction(MD);
+ }
+
// ms_struct is a request to use the same ABI rules as MSVC. Check
// whether this class uses any C++ features that are implemented
// completely differently in MSVC, and if so, emit a diagnostic.
@@ -6546,7 +6783,11 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// headers, sweeping up a bunch of types that the project doesn't
// really rely on MSVC-compatible layout for. We must therefore
// support "ms_struct except for C++ stuff" as a secondary ABI.
- if (Record->isMsStruct(Context) &&
+ // Don't emit this diagnostic if the feature was enabled as a
+ // language option (as opposed to via a pragma or attribute), as
+ // the option -mms-bitfields otherwise essentially makes it impossible
+ // to build C++ code, unless this diagnostic is turned off.
+ if (Record->isMsStruct(Context) && !Context.getLangOpts().MSBitfields &&
(Record->isPolymorphic() || Record->getNumBases())) {
Diag(Record->getLocation(), diag::warn_cxx_ms_struct);
}
@@ -6581,6 +6822,13 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// is especially required for cases like vtable assumption loads.
MarkVTableUsed(Record->getInnerLocStart(), Record);
}
+
+ if (getLangOpts().CUDA) {
+ if (Record->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>())
+ checkCUDADeviceBuiltinSurfaceClassTemplate(*this, Record);
+ else if (Record->hasAttr<CUDADeviceBuiltinTextureTypeAttr>())
+ checkCUDADeviceBuiltinTextureClassTemplate(*this, Record);
+ }
}
/// Look up the special member function that would be called by a special
@@ -6955,7 +7203,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
// C++2a changes the second bullet to instead delete the function if it's
// defaulted on its first declaration, unless it's "an assignment operator,
// and its return type differs or its parameter type is not a reference".
- bool DeleteOnTypeMismatch = getLangOpts().CPlusPlus2a && First;
+ bool DeleteOnTypeMismatch = getLangOpts().CPlusPlus20 && First;
bool ShouldDeleteForTypeMismatch = false;
unsigned ExpectedParams = 1;
if (CSM == CXXDefaultConstructor || CSM == CXXDestructor)
@@ -7065,7 +7313,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
// FIXME: This should not apply if the member is deleted.
bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, RD, CSM,
HasConstParam);
- if ((getLangOpts().CPlusPlus2a ||
+ if ((getLangOpts().CPlusPlus20 ||
(getLangOpts().CPlusPlus14 ? !isa<CXXDestructorDecl>(MD)
: isa<CXXConstructorDecl>(MD))) &&
MD->isConstexpr() && !Constexpr &&
@@ -7083,7 +7331,9 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
// If a function is explicitly defaulted on its first declaration, it is
// implicitly considered to be constexpr if the implicit declaration
// would be.
- MD->setConstexprKind(Constexpr ? CSK_constexpr : CSK_unspecified);
+ MD->setConstexprKind(
+ Constexpr ? (MD->isConsteval() ? CSK_consteval : CSK_constexpr)
+ : CSK_unspecified);
if (!Type->hasExceptionSpec()) {
// C++2a [except.spec]p3:
@@ -7373,7 +7623,14 @@ private:
/// resolution [...]
CandidateSet.exclude(FD);
- S.LookupOverloadedBinOp(CandidateSet, OO, Fns, Args);
+ if (Args[0]->getType()->isOverloadableType())
+ S.LookupOverloadedBinOp(CandidateSet, OO, Fns, Args);
+ else {
+ // FIXME: We determine whether this is a valid expression by checking to
+ // see if there's a viable builtin operator candidate for it. That isn't
+ // really what the rules ask us to do, but should give the right results.
+ S.AddBuiltinOperatorCandidates(OO, FD->getLocation(), Args, CandidateSet);
+ }
Result R;
@@ -7438,6 +7695,31 @@ private:
if (OO == OO_Spaceship && FD->getReturnType()->isUndeducedAutoType()) {
if (auto *BestFD = Best->Function) {
+ // If any callee has an undeduced return type, deduce it now.
+ // FIXME: It's not clear how a failure here should be handled. For
+ // now, we produce an eager diagnostic, because that is forward
+ // compatible with most (all?) other reasonable options.
+ if (BestFD->getReturnType()->isUndeducedType() &&
+ S.DeduceReturnType(BestFD, FD->getLocation(),
+ /*Diagnose=*/false)) {
+ // Don't produce a duplicate error when asked to explain why the
+ // comparison is deleted: we diagnosed that when initially checking
+ // the defaulted operator.
+ if (Diagnose == NoDiagnostics) {
+ S.Diag(
+ FD->getLocation(),
+ diag::err_defaulted_comparison_cannot_deduce_undeduced_auto)
+ << Subobj.Kind << Subobj.Decl;
+ S.Diag(
+ Subobj.Loc,
+ diag::note_defaulted_comparison_cannot_deduce_undeduced_auto)
+ << Subobj.Kind << Subobj.Decl;
+ S.Diag(BestFD->getLocation(),
+ diag::note_defaulted_comparison_cannot_deduce_callee)
+ << Subobj.Kind << Subobj.Decl;
+ }
+ return Result::deleted();
+ }
if (auto *Info = S.Context.CompCategories.lookupInfoForType(
BestFD->getCallResultType())) {
R.Category = Info->Kind;
@@ -7826,10 +8108,14 @@ private:
return StmtError();
OverloadedOperatorKind OO = FD->getOverloadedOperator();
- ExprResult Op = S.CreateOverloadedBinOp(
- Loc, BinaryOperator::getOverloadedOpcode(OO), Fns,
- Obj.first.get(), Obj.second.get(), /*PerformADL=*/true,
- /*AllowRewrittenCandidates=*/true, FD);
+ BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(OO);
+ ExprResult Op;
+ if (Type->isOverloadableType())
+ Op = S.CreateOverloadedBinOp(Loc, Opc, Fns, Obj.first.get(),
+ Obj.second.get(), /*PerformADL=*/true,
+ /*AllowRewrittenCandidates=*/true, FD);
+ else
+ Op = S.CreateBuiltinBinOp(Loc, Opc, Obj.first.get(), Obj.second.get());
if (Op.isInvalid())
return StmtError();
@@ -7869,8 +8155,12 @@ private:
llvm::APInt ZeroVal(S.Context.getIntWidth(S.Context.IntTy), 0);
Expr *Zero =
IntegerLiteral::Create(S.Context, ZeroVal, S.Context.IntTy, Loc);
- ExprResult Comp = S.CreateOverloadedBinOp(Loc, BO_NE, Fns, VDRef.get(),
- Zero, true, true, FD);
+ ExprResult Comp;
+ if (VDRef.get()->getType()->isOverloadableType())
+ Comp = S.CreateOverloadedBinOp(Loc, BO_NE, Fns, VDRef.get(), Zero, true,
+ true, FD);
+ else
+ Comp = S.CreateBuiltinBinOp(Loc, BO_NE, VDRef.get(), Zero);
if (Comp.isInvalid())
return StmtError();
Sema::ConditionResult Cond = S.ActOnCondition(
@@ -9423,27 +9713,57 @@ void Sema::DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD) {
}
void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
- auto PrintDiagAndRemoveAttr = [&]() {
+ auto PrintDiagAndRemoveAttr = [&](unsigned N) {
// No diagnostics if this is a template instantiation.
- if (!isTemplateInstantiation(RD.getTemplateSpecializationKind()))
+ if (!isTemplateInstantiation(RD.getTemplateSpecializationKind())) {
Diag(RD.getAttr<TrivialABIAttr>()->getLocation(),
diag::ext_cannot_use_trivial_abi) << &RD;
+ Diag(RD.getAttr<TrivialABIAttr>()->getLocation(),
+ diag::note_cannot_use_trivial_abi_reason) << &RD << N;
+ }
RD.dropAttr<TrivialABIAttr>();
};
+ // Ill-formed if the copy and move constructors are deleted.
+ auto HasNonDeletedCopyOrMoveConstructor = [&]() {
+ // If the type is dependent, then assume it might have
+ // implicit copy or move ctor because we won't know yet at this point.
+ if (RD.isDependentType())
+ return true;
+ if (RD.needsImplicitCopyConstructor() &&
+ !RD.defaultedCopyConstructorIsDeleted())
+ return true;
+ if (RD.needsImplicitMoveConstructor() &&
+ !RD.defaultedMoveConstructorIsDeleted())
+ return true;
+ for (const CXXConstructorDecl *CD : RD.ctors())
+ if (CD->isCopyOrMoveConstructor() && !CD->isDeleted())
+ return true;
+ return false;
+ };
+
+ if (!HasNonDeletedCopyOrMoveConstructor()) {
+ PrintDiagAndRemoveAttr(0);
+ return;
+ }
+
// Ill-formed if the struct has virtual functions.
if (RD.isPolymorphic()) {
- PrintDiagAndRemoveAttr();
+ PrintDiagAndRemoveAttr(1);
return;
}
for (const auto &B : RD.bases()) {
// Ill-formed if the base class is non-trivial for the purpose of calls or a
// virtual base.
- if ((!B.getType()->isDependentType() &&
- !B.getType()->getAsCXXRecordDecl()->canPassInRegisters()) ||
- B.isVirtual()) {
- PrintDiagAndRemoveAttr();
+ if (!B.getType()->isDependentType() &&
+ !B.getType()->getAsCXXRecordDecl()->canPassInRegisters()) {
+ PrintDiagAndRemoveAttr(2);
+ return;
+ }
+
+ if (B.isVirtual()) {
+ PrintDiagAndRemoveAttr(3);
return;
}
}
@@ -9453,14 +9773,14 @@ void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
// non-trivial for the purpose of calls.
QualType FT = FD->getType();
if (FT.getObjCLifetime() == Qualifiers::OCL_Weak) {
- PrintDiagAndRemoveAttr();
+ PrintDiagAndRemoveAttr(4);
return;
}
if (const auto *RT = FT->getBaseElementTypeUnsafe()->getAs<RecordType>())
if (!RT->isDependentType() &&
!cast<CXXRecordDecl>(RT->getDecl())->canPassInRegisters()) {
- PrintDiagAndRemoveAttr();
+ PrintDiagAndRemoveAttr(5);
return;
}
}
@@ -9533,86 +9853,95 @@ static void findImplicitlyDeclaredEqualityComparisons(
/// [special]p1). This routine can only be executed just before the
/// definition of the class is complete.
void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
- if (ClassDecl->needsImplicitDefaultConstructor()) {
- ++getASTContext().NumImplicitDefaultConstructors;
+ // Don't add implicit special members to templated classes.
+ // FIXME: This means unqualified lookups for 'operator=' within a class
+ // template don't work properly.
+ if (!ClassDecl->isDependentType()) {
+ if (ClassDecl->needsImplicitDefaultConstructor()) {
+ ++getASTContext().NumImplicitDefaultConstructors;
- if (ClassDecl->hasInheritedConstructor())
- DeclareImplicitDefaultConstructor(ClassDecl);
- }
+ if (ClassDecl->hasInheritedConstructor())
+ DeclareImplicitDefaultConstructor(ClassDecl);
+ }
- if (ClassDecl->needsImplicitCopyConstructor()) {
- ++getASTContext().NumImplicitCopyConstructors;
+ if (ClassDecl->needsImplicitCopyConstructor()) {
+ ++getASTContext().NumImplicitCopyConstructors;
- // If the properties or semantics of the copy constructor couldn't be
- // determined while the class was being declared, force a declaration
- // of it now.
- if (ClassDecl->needsOverloadResolutionForCopyConstructor() ||
- ClassDecl->hasInheritedConstructor())
- DeclareImplicitCopyConstructor(ClassDecl);
- // For the MS ABI we need to know whether the copy ctor is deleted. A
- // prerequisite for deleting the implicit copy ctor is that the class has a
- // move ctor or move assignment that is either user-declared or whose
- // semantics are inherited from a subobject. FIXME: We should provide a more
- // direct way for CodeGen to ask whether the constructor was deleted.
- else if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
- (ClassDecl->hasUserDeclaredMoveConstructor() ||
- ClassDecl->needsOverloadResolutionForMoveConstructor() ||
- ClassDecl->hasUserDeclaredMoveAssignment() ||
- ClassDecl->needsOverloadResolutionForMoveAssignment()))
- DeclareImplicitCopyConstructor(ClassDecl);
- }
+ // If the properties or semantics of the copy constructor couldn't be
+ // determined while the class was being declared, force a declaration
+ // of it now.
+ if (ClassDecl->needsOverloadResolutionForCopyConstructor() ||
+ ClassDecl->hasInheritedConstructor())
+ DeclareImplicitCopyConstructor(ClassDecl);
+ // For the MS ABI we need to know whether the copy ctor is deleted. A
+ // prerequisite for deleting the implicit copy ctor is that the class has
+ // a move ctor or move assignment that is either user-declared or whose
+ // semantics are inherited from a subobject. FIXME: We should provide a
+ // more direct way for CodeGen to ask whether the constructor was deleted.
+ else if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ (ClassDecl->hasUserDeclaredMoveConstructor() ||
+ ClassDecl->needsOverloadResolutionForMoveConstructor() ||
+ ClassDecl->hasUserDeclaredMoveAssignment() ||
+ ClassDecl->needsOverloadResolutionForMoveAssignment()))
+ DeclareImplicitCopyConstructor(ClassDecl);
+ }
- if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveConstructor()) {
- ++getASTContext().NumImplicitMoveConstructors;
+ if (getLangOpts().CPlusPlus11 &&
+ ClassDecl->needsImplicitMoveConstructor()) {
+ ++getASTContext().NumImplicitMoveConstructors;
- if (ClassDecl->needsOverloadResolutionForMoveConstructor() ||
- ClassDecl->hasInheritedConstructor())
- DeclareImplicitMoveConstructor(ClassDecl);
- }
+ if (ClassDecl->needsOverloadResolutionForMoveConstructor() ||
+ ClassDecl->hasInheritedConstructor())
+ DeclareImplicitMoveConstructor(ClassDecl);
+ }
- if (ClassDecl->needsImplicitCopyAssignment()) {
- ++getASTContext().NumImplicitCopyAssignmentOperators;
+ if (ClassDecl->needsImplicitCopyAssignment()) {
+ ++getASTContext().NumImplicitCopyAssignmentOperators;
- // If we have a dynamic class, then the copy assignment operator may be
- // virtual, so we have to declare it immediately. This ensures that, e.g.,
- // it shows up in the right place in the vtable and that we diagnose
- // problems with the implicit exception specification.
- if (ClassDecl->isDynamicClass() ||
- ClassDecl->needsOverloadResolutionForCopyAssignment() ||
- ClassDecl->hasInheritedAssignment())
- DeclareImplicitCopyAssignment(ClassDecl);
- }
+ // If we have a dynamic class, then the copy assignment operator may be
+ // virtual, so we have to declare it immediately. This ensures that, e.g.,
+ // it shows up in the right place in the vtable and that we diagnose
+ // problems with the implicit exception specification.
+ if (ClassDecl->isDynamicClass() ||
+ ClassDecl->needsOverloadResolutionForCopyAssignment() ||
+ ClassDecl->hasInheritedAssignment())
+ DeclareImplicitCopyAssignment(ClassDecl);
+ }
- if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveAssignment()) {
- ++getASTContext().NumImplicitMoveAssignmentOperators;
+ if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveAssignment()) {
+ ++getASTContext().NumImplicitMoveAssignmentOperators;
- // Likewise for the move assignment operator.
- if (ClassDecl->isDynamicClass() ||
- ClassDecl->needsOverloadResolutionForMoveAssignment() ||
- ClassDecl->hasInheritedAssignment())
- DeclareImplicitMoveAssignment(ClassDecl);
- }
+ // Likewise for the move assignment operator.
+ if (ClassDecl->isDynamicClass() ||
+ ClassDecl->needsOverloadResolutionForMoveAssignment() ||
+ ClassDecl->hasInheritedAssignment())
+ DeclareImplicitMoveAssignment(ClassDecl);
+ }
- if (ClassDecl->needsImplicitDestructor()) {
- ++getASTContext().NumImplicitDestructors;
+ if (ClassDecl->needsImplicitDestructor()) {
+ ++getASTContext().NumImplicitDestructors;
- // If we have a dynamic class, then the destructor may be virtual, so we
- // have to declare the destructor immediately. This ensures that, e.g., it
- // shows up in the right place in the vtable and that we diagnose problems
- // with the implicit exception specification.
- if (ClassDecl->isDynamicClass() ||
- ClassDecl->needsOverloadResolutionForDestructor())
- DeclareImplicitDestructor(ClassDecl);
+ // If we have a dynamic class, then the destructor may be virtual, so we
+ // have to declare the destructor immediately. This ensures that, e.g., it
+ // shows up in the right place in the vtable and that we diagnose problems
+ // with the implicit exception specification.
+ if (ClassDecl->isDynamicClass() ||
+ ClassDecl->needsOverloadResolutionForDestructor())
+ DeclareImplicitDestructor(ClassDecl);
+ }
}
// C++2a [class.compare.default]p3:
// If the member-specification does not explicitly declare any member or
// friend named operator==, an == operator function is declared implicitly
- // for each defaulted three-way comparison operator function defined in the
- // member-specification
+ // for each defaulted three-way comparison operator function defined in
+ // the member-specification
// FIXME: Consider doing this lazily.
- if (getLangOpts().CPlusPlus2a) {
- llvm::SmallVector<FunctionDecl*, 4> DefaultedSpaceships;
+ // We do this during the initial parse for a class template, not during
+ // instantiation, so that we can handle unqualified lookups for 'operator=='
+ // when parsing the template.
+ if (getLangOpts().CPlusPlus20 && !inTemplateInstantiation()) {
+ llvm::SmallVector<FunctionDecl *, 4> DefaultedSpaceships;
findImplicitlyDeclaredEqualityComparisons(Context, ClassDecl,
DefaultedSpaceships);
for (auto *FD : DefaultedSpaceships)
@@ -9620,19 +9949,17 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
}
}
-unsigned Sema::ActOnReenterTemplateScope(Scope *S, Decl *D) {
+unsigned
+Sema::ActOnReenterTemplateScope(Decl *D,
+ llvm::function_ref<Scope *()> EnterScope) {
if (!D)
return 0;
+ AdjustDeclIfTemplate(D);
- // The order of template parameters is not important here. All names
- // get added to the same scope.
+ // In order to get name lookup right, reenter template scopes in order from
+ // outermost to innermost.
SmallVector<TemplateParameterList *, 4> ParameterLists;
-
- if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
- D = TD->getTemplatedDecl();
-
- if (auto *PSD = dyn_cast<ClassTemplatePartialSpecializationDecl>(D))
- ParameterLists.push_back(PSD->getTemplateParameters());
+ DeclContext *LookupDC = dyn_cast<DeclContext>(D);
if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
for (unsigned i = 0; i < DD->getNumTemplateParameterLists(); ++i)
@@ -9641,31 +9968,49 @@ unsigned Sema::ActOnReenterTemplateScope(Scope *S, Decl *D) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
ParameterLists.push_back(FTD->getTemplateParameters());
- }
- }
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ LookupDC = VD->getDeclContext();
- if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (VarTemplateDecl *VTD = VD->getDescribedVarTemplate())
+ ParameterLists.push_back(VTD->getTemplateParameters());
+ else if (auto *PSD = dyn_cast<VarTemplatePartialSpecializationDecl>(D))
+ ParameterLists.push_back(PSD->getTemplateParameters());
+ }
+ } else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
for (unsigned i = 0; i < TD->getNumTemplateParameterLists(); ++i)
ParameterLists.push_back(TD->getTemplateParameterList(i));
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {
if (ClassTemplateDecl *CTD = RD->getDescribedClassTemplate())
ParameterLists.push_back(CTD->getTemplateParameters());
+ else if (auto *PSD = dyn_cast<ClassTemplatePartialSpecializationDecl>(D))
+ ParameterLists.push_back(PSD->getTemplateParameters());
}
}
+ // FIXME: Alias declarations and concepts.
unsigned Count = 0;
+ Scope *InnermostTemplateScope = nullptr;
for (TemplateParameterList *Params : ParameterLists) {
- if (Params->size() > 0)
- // Ignore explicit specializations; they don't contribute to the template
- // depth.
- ++Count;
+ // Ignore explicit specializations; they don't contribute to the template
+ // depth.
+ if (Params->size() == 0)
+ continue;
+
+ InnermostTemplateScope = EnterScope();
for (NamedDecl *Param : *Params) {
if (Param->getDeclName()) {
- S->AddDecl(Param);
+ InnermostTemplateScope->AddDecl(Param);
IdResolver.AddDecl(Param);
}
}
+ ++Count;
+ }
+
+ // Associate the new template scopes with the corresponding entities.
+ if (InnermostTemplateScope) {
+ assert(LookupDC && "no enclosing DeclContext for template lookup");
+ EnterTemplatedContext(InnermostTemplateScope, LookupDC);
}
return Count;
@@ -9717,11 +10062,6 @@ void Sema::ActOnDelayedCXXMethodParameter(Scope *S, Decl *ParamD) {
ParmVarDecl *Param = cast<ParmVarDecl>(ParamD);
- // If this parameter has an unparsed default argument, clear it out
- // to make way for the parsed default argument.
- if (Param->hasUnparsedDefaultArg())
- Param->setDefaultArg(nullptr);
-
S->AddDecl(Param);
if (Param->getDeclName())
IdResolver.AddDecl(Param);
@@ -9855,11 +10195,9 @@ void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
// either there are no other parameters or else all other
// parameters have default arguments.
if (!Constructor->isInvalidDecl() &&
- ((Constructor->getNumParams() == 1) ||
- (Constructor->getNumParams() > 1 &&
- Constructor->getParamDecl(1)->hasDefaultArg())) &&
- Constructor->getTemplateSpecializationKind()
- != TSK_ImplicitInstantiation) {
+ Constructor->hasOneParamOrDefaultArgs() &&
+ Constructor->getTemplateSpecializationKind() !=
+ TSK_ImplicitInstantiation) {
QualType ParamType = Constructor->getParamDecl(0)->getType();
QualType ClassTy = Context.getTagDeclType(ClassDecl);
if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) {
@@ -9944,12 +10282,12 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
// declaration.
QualType DeclaratorType = GetTypeFromParser(D.getName().DestructorName);
if (const TypedefType *TT = DeclaratorType->getAs<TypedefType>())
- Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
+ Diag(D.getIdentifierLoc(), diag::ext_destructor_typedef_name)
<< DeclaratorType << isa<TypeAliasDecl>(TT->getDecl());
else if (const TemplateSpecializationType *TST =
DeclaratorType->getAs<TemplateSpecializationType>())
if (TST->isTypeAlias())
- Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
+ Diag(D.getIdentifierLoc(), diag::ext_destructor_typedef_name)
<< DeclaratorType << 1;
// C++ [class.dtor]p2:
@@ -10211,7 +10549,7 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
R = Context.getFunctionType(ConvType, None, Proto->getExtProtoInfo());
// C++0x explicit conversion operators.
- if (DS.hasExplicitSpecifier() && !getLangOpts().CPlusPlus2a)
+ if (DS.hasExplicitSpecifier() && !getLangOpts().CPlusPlus20)
Diag(DS.getExplicitSpecLoc(),
getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_explicit_conversion_functions
@@ -10230,15 +10568,12 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
// Make sure we aren't redeclaring the conversion function.
QualType ConvType = Context.getCanonicalType(Conversion->getConversionType());
-
// C++ [class.conv.fct]p1:
// [...] A conversion function is never used to convert a
// (possibly cv-qualified) object to the (possibly cv-qualified)
// same object type (or a reference to it), to a (possibly
// cv-qualified) base class of that type (or a reference to it),
// or to (possibly cv-qualified) void.
- // FIXME: Suppress this warning if the conversion function ends up being a
- // virtual function that overrides a virtual function in a base class.
QualType ClassType
= Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
if (const ReferenceType *ConvTypeRef = ConvType->getAs<ReferenceType>())
@@ -10246,6 +10581,8 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
if (Conversion->getTemplateSpecializationKind() != TSK_Undeclared &&
Conversion->getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
/* Suppress diagnostics for instantiations. */;
+ else if (Conversion->size_overridden_methods() != 0)
+ /* Suppress diagnostics for overriding virtual function in a base class. */;
else if (ConvType->isRecordType()) {
ConvType = Context.getCanonicalType(ConvType).getUnqualifiedType();
if (ConvType == ClassType)
@@ -10920,8 +11257,7 @@ bool Sema::isInitListConstructor(const FunctionDecl *Ctor) {
// is of type std::initializer_list<E> or reference to possibly cv-qualified
// std::initializer_list<E> for some type E, and either there are no other
// parameters or else all other parameters have default arguments.
- if (Ctor->getNumParams() < 1 ||
- (Ctor->getNumParams() > 1 && !Ctor->getParamDecl(1)->hasDefaultArg()))
+ if (!Ctor->hasOneParamOrDefaultArgs())
return false;
QualType ArgType = Ctor->getParamDecl(0)->getType();
@@ -12960,6 +13296,25 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
}
}
+void Sema::CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
+ CXXDestructorDecl *Destructor) {
+ if (Destructor->isInvalidDecl())
+ return;
+
+ CXXRecordDecl *ClassDecl = Destructor->getParent();
+ assert(Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ "implicit complete dtors unneeded outside MS ABI");
+ assert(ClassDecl->getNumVBases() > 0 &&
+ "complete dtor only exists for classes with vbases");
+
+ SynthesizedFunctionScope Scope(*this, Destructor);
+
+ // Add a context note for diagnostics produced after this point.
+ Scope.addContextNote(CurrentLocation);
+
+ MarkVirtualBaseDestructorsReferenced(Destructor->getLocation(), ClassDecl);
+}
+
/// Perform any semantic analysis which needs to be delayed until all
/// pending class member declarations have been parsed.
void Sema::ActOnFinishCXXMemberDecls() {
@@ -12981,7 +13336,7 @@ void Sema::ActOnFinishCXXNonNestedClass() {
SmallVector<CXXMethodDecl*, 4> WorkList;
std::swap(DelayedDllExportMemberFunctions, WorkList);
for (CXXMethodDecl *M : WorkList) {
- DefineImplicitSpecialMember(*this, M, M->getLocation());
+ DefineDefaultedFunction(*this, M, M->getLocation());
// Pass the method to the consumer to get emitted. This is not necessary
// for explicit instantiation definitions, as they will get emitted
@@ -13180,13 +13535,13 @@ buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
// directly construct UnaryOperators here because semantic analysis
// does not permit us to take the address of an xvalue.
Expr *From = FromB.build(S, Loc);
- From = new (S.Context) UnaryOperator(From, UO_AddrOf,
- S.Context.getPointerType(From->getType()),
- VK_RValue, OK_Ordinary, Loc, false);
+ From = UnaryOperator::Create(
+ S.Context, From, UO_AddrOf, S.Context.getPointerType(From->getType()),
+ VK_RValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
Expr *To = ToB.build(S, Loc);
- To = new (S.Context) UnaryOperator(To, UO_AddrOf,
- S.Context.getPointerType(To->getType()),
- VK_RValue, OK_Ordinary, Loc, false);
+ To = UnaryOperator::Create(
+ S.Context, To, UO_AddrOf, S.Context.getPointerType(To->getType()),
+ VK_RValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
const Type *E = T->getBaseElementTypeUnsafe();
bool NeedsCollectableMemCpy =
@@ -13420,18 +13775,17 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
// Create the comparison against the array bound.
llvm::APInt Upper
= ArrayTy->getSize().zextOrTrunc(S.Context.getTypeSize(SizeType));
- Expr *Comparison
- = new (S.Context) BinaryOperator(IterationVarRefRVal.build(S, Loc),
- IntegerLiteral::Create(S.Context, Upper, SizeType, Loc),
- BO_NE, S.Context.BoolTy,
- VK_RValue, OK_Ordinary, Loc, FPOptions());
+ Expr *Comparison = BinaryOperator::Create(
+ S.Context, IterationVarRefRVal.build(S, Loc),
+ IntegerLiteral::Create(S.Context, Upper, SizeType, Loc), BO_NE,
+ S.Context.BoolTy, VK_RValue, OK_Ordinary, Loc, S.CurFPFeatureOverrides());
// Create the pre-increment of the iteration variable. We can determine
// whether the increment will overflow based on the value of the array
// bound.
- Expr *Increment = new (S.Context)
- UnaryOperator(IterationVarRef.build(S, Loc), UO_PreInc, SizeType,
- VK_LValue, OK_Ordinary, Loc, Upper.isMaxValue());
+ Expr *Increment = UnaryOperator::Create(
+ S.Context, IterationVarRef.build(S, Loc), UO_PreInc, SizeType, VK_LValue,
+ OK_Ordinary, Loc, Upper.isMaxValue(), S.CurFPFeatureOverrides());
// Construct the loop that copies all elements of this array.
return S.ActOnForStmt(
@@ -13529,8 +13883,10 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, CopyAssignment);
- if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment))
+ if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment)) {
+ ClassDecl->setImplicitCopyAssignmentIsDeleted();
SetDeclDeleted(CopyAssignment, ClassLoc);
+ }
if (S)
PushOnScopeChains(CopyAssignment, S, false);
@@ -14642,13 +14998,18 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
MarkFunctionReferenced(ConstructLoc, Constructor);
if (getLangOpts().CUDA && !CheckCUDACall(ConstructLoc, Constructor))
return ExprError();
+ if (getLangOpts().SYCLIsDevice &&
+ !checkSYCLDeviceFunction(ConstructLoc, Constructor))
+ return ExprError();
- return CXXConstructExpr::Create(
- Context, DeclInitType, ConstructLoc, Constructor, Elidable,
- ExprArgs, HadMultipleCandidates, IsListInitialization,
- IsStdInitListInitialization, RequiresZeroInit,
- static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
- ParenRange);
+ return CheckForImmediateInvocation(
+ CXXConstructExpr::Create(
+ Context, DeclInitType, ConstructLoc, Constructor, Elidable, ExprArgs,
+ HadMultipleCandidates, IsListInitialization,
+ IsStdInitListInitialization, RequiresZeroInit,
+ static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
+ ParenRange),
+ Constructor);
}
ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
@@ -14726,6 +15087,10 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
if (VD->isInvalidDecl()) return;
+ // If initializing the variable failed, don't also diagnose problems with
+ // the desctructor, they're likely related.
+ if (VD->getInit() && VD->getInit()->containsErrors())
+ return;
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl());
if (ClassDecl->isInvalidDecl()) return;
@@ -14752,10 +15117,13 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
// If the destructor is constexpr, check whether the variable has constant
// destruction now.
- if (Destructor->isConstexpr() && VD->getInit() &&
- !VD->getInit()->isValueDependent() && VD->evaluateValue()) {
+ if (Destructor->isConstexpr()) {
+ bool HasConstantInit = false;
+ if (VD->getInit() && !VD->getInit()->isValueDependent())
+ HasConstantInit = VD->evaluateValue();
SmallVector<PartialDiagnosticAt, 8> Notes;
- if (!VD->evaluateDestruction(Notes) && VD->isConstexpr()) {
+ if (!VD->evaluateDestruction(Notes) && VD->isConstexpr() &&
+ HasConstantInit) {
Diag(VD->getLocation(),
diag::err_constexpr_var_requires_const_destruction) << VD;
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
@@ -14855,12 +15223,6 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
QualType ResultType =
FnDecl->getType()->castAs<FunctionType>()->getReturnType();
- // Check that the result type is not dependent.
- if (ResultType->isDependentType())
- return SemaRef.Diag(FnDecl->getLocation(),
- diag::err_operator_new_delete_dependent_result_type)
- << FnDecl->getDeclName() << ExpectedResultType;
-
// The operator is valid on any address space for OpenCL.
if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
if (auto *PtrTy = ResultType->getAs<PointerType>()) {
@@ -14869,10 +15231,16 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
}
// Check that the result type is what we expect.
- if (SemaRef.Context.getCanonicalType(ResultType) != ExpectedResultType)
- return SemaRef.Diag(FnDecl->getLocation(),
- diag::err_operator_new_delete_invalid_result_type)
- << FnDecl->getDeclName() << ExpectedResultType;
+ if (SemaRef.Context.getCanonicalType(ResultType) != ExpectedResultType) {
+ // Reject even if the type is dependent; an operator delete function is
+ // required to have a non-dependent result type.
+ return SemaRef.Diag(
+ FnDecl->getLocation(),
+ ResultType->isDependentType()
+ ? diag::err_operator_new_delete_dependent_result_type
+ : diag::err_operator_new_delete_invalid_result_type)
+ << FnDecl->getDeclName() << ExpectedResultType;
+ }
// A function template must have at least 2 parameters.
if (FnDecl->getDescribedFunctionTemplate() && FnDecl->getNumParams() < 2)
@@ -14886,13 +15254,7 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
diag::err_operator_new_delete_too_few_parameters)
<< FnDecl->getDeclName();
- // Check the first parameter type is not dependent.
QualType FirstParamType = FnDecl->getParamDecl(0)->getType();
- if (FirstParamType->isDependentType())
- return SemaRef.Diag(FnDecl->getLocation(), DependentParamTypeDiag)
- << FnDecl->getDeclName() << ExpectedFirstParamType;
-
- // Check that the first parameter type is what we expect.
if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
// The operator is valid on any address space for OpenCL.
if (auto *PtrTy =
@@ -14900,10 +15262,18 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
FirstParamType = RemoveAddressSpaceFromPtr(SemaRef, PtrTy);
}
}
+
+ // Check that the first parameter type is what we expect.
if (SemaRef.Context.getCanonicalType(FirstParamType).getUnqualifiedType() !=
- ExpectedFirstParamType)
- return SemaRef.Diag(FnDecl->getLocation(), InvalidParamTypeDiag)
- << FnDecl->getDeclName() << ExpectedFirstParamType;
+ ExpectedFirstParamType) {
+ // The first parameter type is not allowed to be dependent. As a tentative
+ // DR resolution, we allow a dependent parameter type if it is the right
+ // type anyway, to allow destroying operator delete in class templates.
+ return SemaRef.Diag(FnDecl->getLocation(), FirstParamType->isDependentType()
+ ? DependentParamTypeDiag
+ : InvalidParamTypeDiag)
+ << FnDecl->getDeclName() << ExpectedFirstParamType;
+ }
return false;
}
@@ -15442,6 +15812,11 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
!BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK))
Invalid = true;
+ if (!Invalid && Mode != 1 && BaseType->isSizelessType()) {
+ Diag(Loc, diag::err_catch_sizeless) << (Mode == 2 ? 1 : 0) << BaseType;
+ Invalid = true;
+ }
+
if (!Invalid && !ExDeclType->isDependentType() &&
RequireNonAbstractType(Loc, ExDeclType,
diag::err_abstract_type_in_decl,
@@ -16304,9 +16679,16 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
Diag(Prev->getLocation().isInvalid() ? DelLoc : Prev->getLocation(),
Prev->isImplicit() ? diag::note_previous_implicit_declaration
: diag::note_previous_declaration);
+ // We can't recover from this; the declaration might have already
+ // been used.
+ Fn->setInvalidDecl();
+ return;
}
- // If the declaration wasn't the first, we delete the function anyway for
- // recovery.
+
+ // To maintain the invariant that functions are only deleted on their first
+ // declaration, mark the implicitly-instantiated declaration of the
+ // explicitly-specialized function as deleted instead of marking the
+ // instantiated redeclaration.
Fn = Fn->getCanonicalDecl();
}
@@ -16316,9 +16698,6 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
Fn->setInvalidDecl();
}
- if (Fn->isDeleted())
- return;
-
// C++11 [basic.start.main]p3:
// A program that defines main as deleted [...] is ill-formed.
if (Fn->isMain())
@@ -16328,25 +16707,6 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
// A deleted function is implicitly inline.
Fn->setImplicitlyInline();
Fn->setDeletedAsWritten();
-
- // See if we're deleting a function which is already known to override a
- // non-deleted virtual function.
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn)) {
- bool IssuedDiagnostic = false;
- for (const CXXMethodDecl *O : MD->overridden_methods()) {
- if (!(*MD->begin_overridden_methods())->isDeleted()) {
- if (!IssuedDiagnostic) {
- Diag(DelLoc, diag::err_deleted_override) << MD->getDeclName();
- IssuedDiagnostic = true;
- }
- Diag(O->getLocation(), diag::note_overridden_virtual_function);
- }
- }
- // If this function was implicitly deleted because it was defaulted,
- // explain why it was deleted.
- if (IssuedDiagnostic && MD->isDefaulted())
- DiagnoseDeletedDefaultedFunction(MD);
- }
}
void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
@@ -16363,7 +16723,7 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
}
Diag(DefaultLoc, diag::err_default_special_members)
- << getLangOpts().CPlusPlus2a;
+ << getLangOpts().CPlusPlus20;
return;
}
@@ -16377,7 +16737,7 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
(!isa<CXXConstructorDecl>(FD) &&
FD->getDeclName().getCXXOverloadedOperator() != OO_Equal))) {
Diag(DefaultLoc, diag::err_default_special_members)
- << getLangOpts().CPlusPlus2a;
+ << getLangOpts().CPlusPlus20;
return;
}
@@ -16392,7 +16752,7 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
// 'operator<=>' when parsing the '<=>' token.
if (DefKind.isComparison() &&
DefKind.asComparison() != DefaultedComparisonKind::ThreeWay) {
- Diag(DefaultLoc, getLangOpts().CPlusPlus2a
+ Diag(DefaultLoc, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_defaulted_comparison
: diag::ext_defaulted_comparison);
}
@@ -16428,10 +16788,12 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
if (Primary->getCanonicalDecl()->isDefaulted())
return;
+ // FIXME: Once we support defining comparisons out of class, check for a
+ // defaulted comparison here.
if (CheckExplicitlyDefaultedSpecialMember(MD, DefKind.asSpecialMember()))
MD->setInvalidDecl();
else
- DefineImplicitSpecialMember(*this, MD, DefaultLoc);
+ DefineDefaultedFunction(*this, MD, DefaultLoc);
}
static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
@@ -16743,7 +17105,7 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
return;
// Do not mark as used if compiling for the device outside of the target
// region.
- if (LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
+ if (TUKind != TU_Prefix && LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
!isInOpenMPDeclareTargetContext() &&
!isInOpenMPTargetExecutionDirective()) {
if (!DefinitionRequired)
@@ -17386,3 +17748,50 @@ MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record,
return NewPD;
}
+
+void Sema::ActOnStartFunctionDeclarationDeclarator(
+ Declarator &Declarator, unsigned TemplateParameterDepth) {
+ auto &Info = InventedParameterInfos.emplace_back();
+ TemplateParameterList *ExplicitParams = nullptr;
+ ArrayRef<TemplateParameterList *> ExplicitLists =
+ Declarator.getTemplateParameterLists();
+ if (!ExplicitLists.empty()) {
+ bool IsMemberSpecialization, IsInvalid;
+ ExplicitParams = MatchTemplateParametersToScopeSpecifier(
+ Declarator.getBeginLoc(), Declarator.getIdentifierLoc(),
+ Declarator.getCXXScopeSpec(), /*TemplateId=*/nullptr,
+ ExplicitLists, /*IsFriend=*/false, IsMemberSpecialization, IsInvalid,
+ /*SuppressDiagnostic=*/true);
+ }
+ if (ExplicitParams) {
+ Info.AutoTemplateParameterDepth = ExplicitParams->getDepth();
+ for (NamedDecl *Param : *ExplicitParams)
+ Info.TemplateParams.push_back(Param);
+ Info.NumExplicitTemplateParams = ExplicitParams->size();
+ } else {
+ Info.AutoTemplateParameterDepth = TemplateParameterDepth;
+ Info.NumExplicitTemplateParams = 0;
+ }
+}
+
+void Sema::ActOnFinishFunctionDeclarationDeclarator(Declarator &Declarator) {
+ auto &FSI = InventedParameterInfos.back();
+ if (FSI.TemplateParams.size() > FSI.NumExplicitTemplateParams) {
+ if (FSI.NumExplicitTemplateParams != 0) {
+ TemplateParameterList *ExplicitParams =
+ Declarator.getTemplateParameterLists().back();
+ Declarator.setInventedTemplateParameterList(
+ TemplateParameterList::Create(
+ Context, ExplicitParams->getTemplateLoc(),
+ ExplicitParams->getLAngleLoc(), FSI.TemplateParams,
+ ExplicitParams->getRAngleLoc(),
+ ExplicitParams->getRequiresClause()));
+ } else {
+ Declarator.setInventedTemplateParameterList(
+ TemplateParameterList::Create(
+ Context, SourceLocation(), SourceLocation(), FSI.TemplateParams,
+ SourceLocation(), /*RequiresClause=*/nullptr));
+ }
+ }
+ InventedParameterInfos.pop_back();
+}
diff --git a/clang/lib/Sema/SemaDeclObjC.cpp b/clang/lib/Sema/SemaDeclObjC.cpp
index 5fdf6aeed5b4..d376880a40e8 100644
--- a/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/clang/lib/Sema/SemaDeclObjC.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
@@ -937,8 +938,7 @@ static bool checkTypeParamListConsistency(Sema &S,
// Override the new type parameter's bound type with the previous type,
// so that it's consistent.
- newTypeParam->setTypeSourceInfo(
- S.Context.getTrivialTypeSourceInfo(prevTypeParam->getUnderlyingType()));
+ S.Context.adjustObjCTypeParamBoundType(prevTypeParam, newTypeParam);
continue;
}
@@ -965,8 +965,7 @@ static bool checkTypeParamListConsistency(Sema &S,
}
// Update the new type parameter's bound to match the previous one.
- newTypeParam->setTypeSourceInfo(
- S.Context.getTrivialTypeSourceInfo(prevTypeParam->getUnderlyingType()));
+ S.Context.adjustObjCTypeParamBoundType(prevTypeParam, newTypeParam);
}
return false;
@@ -1273,7 +1272,8 @@ Decl *Sema::ActOnStartProtocolInterface(
static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl,
ObjCProtocolDecl *&UndefinedProtocol) {
- if (!PDecl->hasDefinition() || PDecl->getDefinition()->isHidden()) {
+ if (!PDecl->hasDefinition() ||
+ !PDecl->getDefinition()->isUnconditionallyVisible()) {
UndefinedProtocol = PDecl;
return true;
}
@@ -2360,7 +2360,7 @@ static bool CheckMethodOverrideReturn(Sema &S,
: diag::warn_conflicting_ret_types;
// Mismatches between ObjC pointers go into a different warning
- // category, and sometimes they're even completely whitelisted.
+ // category, and sometimes they're even completely explicitly allowed.
if (const ObjCObjectPointerType *ImplPtrTy =
MethodImpl->getReturnType()->getAs<ObjCObjectPointerType>()) {
if (const ObjCObjectPointerType *IfacePtrTy =
@@ -2444,7 +2444,7 @@ static bool CheckMethodOverrideParam(Sema &S,
: diag::warn_conflicting_param_types;
// Mismatches between ObjC pointers go into a different warning
- // category, and sometimes they're even completely whitelisted.
+ // category, and sometimes they're even completely explicitly allowed..
if (const ObjCObjectPointerType *ImplPtrTy =
ImplTy->getAs<ObjCObjectPointerType>()) {
if (const ObjCObjectPointerType *IfacePtrTy =
@@ -3236,7 +3236,7 @@ bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
return false;
// If either is hidden, it is not considered to match.
- if (left->isHidden() || right->isHidden())
+ if (!left->isUnconditionallyVisible() || !right->isUnconditionallyVisible())
return false;
if (left->isDirectMethod() != right->isDirectMethod())
@@ -3495,7 +3495,7 @@ bool Sema::CollectMultipleMethodsInGlobalPool(
ObjCMethodList &MethList = InstanceFirst ? Pos->second.first :
Pos->second.second;
for (ObjCMethodList *M = &MethList; M; M = M->getNext())
- if (M->getMethod() && !M->getMethod()->isHidden()) {
+ if (M->getMethod() && M->getMethod()->isUnconditionallyVisible()) {
if (FilterMethodsByTypeBound(M->getMethod(), TypeBound))
Methods.push_back(M->getMethod());
}
@@ -3511,7 +3511,7 @@ bool Sema::CollectMultipleMethodsInGlobalPool(
ObjCMethodList &MethList2 = InstanceFirst ? Pos->second.second :
Pos->second.first;
for (ObjCMethodList *M = &MethList2; M; M = M->getNext())
- if (M->getMethod() && !M->getMethod()->isHidden()) {
+ if (M->getMethod() && M->getMethod()->isUnconditionallyVisible()) {
if (FilterMethodsByTypeBound(M->getMethod(), TypeBound))
Methods.push_back(M->getMethod());
}
@@ -3558,7 +3558,7 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
SmallVector<ObjCMethodDecl *, 4> Methods;
for (ObjCMethodList *M = &MethList; M; M = M->getNext()) {
- if (M->getMethod() && !M->getMethod()->isHidden())
+ if (M->getMethod() && M->getMethod()->isUnconditionallyVisible())
return M->getMethod();
}
return nullptr;
@@ -4580,6 +4580,62 @@ static void checkObjCMethodX86VectorTypes(Sema &SemaRef,
<< (Triple.isMacOSX() ? "macOS 10.11" : "iOS 9");
}
+static void mergeObjCDirectMembers(Sema &S, Decl *CD, ObjCMethodDecl *Method) {
+ if (!Method->isDirectMethod() && !Method->hasAttr<UnavailableAttr>() &&
+ CD->hasAttr<ObjCDirectMembersAttr>()) {
+ Method->addAttr(
+ ObjCDirectAttr::CreateImplicit(S.Context, Method->getLocation()));
+ }
+}
+
+static void checkObjCDirectMethodClashes(Sema &S, ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *Method,
+ ObjCImplDecl *ImpDecl = nullptr) {
+ auto Sel = Method->getSelector();
+ bool isInstance = Method->isInstanceMethod();
+ bool diagnosed = false;
+
+ auto diagClash = [&](const ObjCMethodDecl *IMD) {
+ if (diagnosed || IMD->isImplicit())
+ return;
+ if (Method->isDirectMethod() || IMD->isDirectMethod()) {
+ S.Diag(Method->getLocation(), diag::err_objc_direct_duplicate_decl)
+ << Method->isDirectMethod() << /* method */ 0 << IMD->isDirectMethod()
+ << Method->getDeclName();
+ S.Diag(IMD->getLocation(), diag::note_previous_declaration);
+ diagnosed = true;
+ }
+ };
+
+ // Look for any other declaration of this method anywhere we can see in this
+ // compilation unit.
+ //
+ // We do not use IDecl->lookupMethod() because we have specific needs:
+ //
+ // - we absolutely do not need to walk protocols, because
+ // diag::err_objc_direct_on_protocol has already been emitted
+ // during parsing if there's a conflict,
+ //
+ // - when we do not find a match in a given @interface container,
+ // we need to attempt looking it up in the @implementation block if the
+ // translation unit sees it to find more clashes.
+
+ if (auto *IMD = IDecl->getMethod(Sel, isInstance))
+ diagClash(IMD);
+ else if (auto *Impl = IDecl->getImplementation())
+ if (Impl != ImpDecl)
+ if (auto *IMD = IDecl->getImplementation()->getMethod(Sel, isInstance))
+ diagClash(IMD);
+
+ for (const auto *Cat : IDecl->visible_categories())
+ if (auto *IMD = Cat->getMethod(Sel, isInstance))
+ diagClash(IMD);
+ else if (auto CatImpl = Cat->getImplementation())
+ if (CatImpl != ImpDecl)
+ if (auto *IMD = Cat->getMethod(Sel, isInstance))
+ diagClash(IMD);
+}
+
Decl *Sema::ActOnMethodDeclaration(
Scope *S, SourceLocation MethodLoc, SourceLocation EndLoc,
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
@@ -4808,9 +4864,9 @@ Decl *Sema::ActOnMethodDeclaration(
Diag(ObjCMethod->getLocation(), diag::warn_dealloc_in_category)
<< ObjCMethod->getDeclName();
}
- } else if (ImpDecl->hasAttr<ObjCDirectMembersAttr>()) {
- ObjCMethod->addAttr(
- ObjCDirectAttr::CreateImplicit(Context, ObjCMethod->getLocation()));
+ } else {
+ mergeObjCDirectMembers(*this, ClassDecl, ObjCMethod);
+ checkObjCDirectMethodClashes(*this, IDecl, ObjCMethod, ImpDecl);
}
// Warn if a method declared in a protocol to which a category or
@@ -4831,39 +4887,16 @@ Decl *Sema::ActOnMethodDeclaration(
}
} else {
if (!isa<ObjCProtocolDecl>(ClassDecl)) {
- if (!ObjCMethod->isDirectMethod() &&
- ClassDecl->hasAttr<ObjCDirectMembersAttr>()) {
- ObjCMethod->addAttr(
- ObjCDirectAttr::CreateImplicit(Context, ObjCMethod->getLocation()));
- }
+ mergeObjCDirectMembers(*this, ClassDecl, ObjCMethod);
- // There can be a single declaration in any @interface container
- // for a given direct method, look for clashes as we add them.
- //
- // For valid code, we should always know the primary interface
- // declaration by now, however for invalid code we'll keep parsing
- // but we won't find the primary interface and IDecl will be nil.
ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(ClassDecl);
if (!IDecl)
IDecl = cast<ObjCCategoryDecl>(ClassDecl)->getClassInterface();
-
+ // For valid code, we should always know the primary interface
+ // declaration by now, however for invalid code we'll keep parsing
+ // but we won't find the primary interface and IDecl will be nil.
if (IDecl)
- if (auto *IMD = IDecl->lookupMethod(ObjCMethod->getSelector(),
- ObjCMethod->isInstanceMethod(),
- /*shallowCategoryLookup=*/false,
- /*followSuper=*/false)) {
- if (isa<ObjCProtocolDecl>(IMD->getDeclContext())) {
- // Do not emit a diagnostic for the Protocol case:
- // diag::err_objc_direct_on_protocol has already been emitted
- // during parsing for these with a nicer diagnostic.
- } else if (ObjCMethod->isDirectMethod() || IMD->isDirectMethod()) {
- Diag(ObjCMethod->getLocation(),
- diag::err_objc_direct_duplicate_decl)
- << ObjCMethod->isDirectMethod() << IMD->isDirectMethod()
- << ObjCMethod->getDeclName();
- Diag(IMD->getLocation(), diag::note_previous_declaration);
- }
- }
+ checkObjCDirectMethodClashes(*this, IDecl, ObjCMethod);
}
cast<DeclContext>(ClassDecl)->addDecl(ObjCMethod);
diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp
index 5aedbe7644e4..d7695f9d7d7a 100644
--- a/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -167,6 +167,14 @@ bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
RequireCompleteType(Range.getBegin(), PointeeT, DiagID, Kind, Range))
return ReturnValueOnError;
+ // The MSVC compatibility mode doesn't extend to sizeless types,
+ // so diagnose them separately.
+ if (PointeeT->isSizelessType() && Kind != 1) {
+ Diag(Range.getBegin(), diag::err_sizeless_in_exception_spec)
+ << (Kind == 2 ? 1 : 0) << PointeeT << Range;
+ return true;
+ }
+
return false;
}
@@ -991,10 +999,8 @@ static CanThrowResult canSubStmtsThrow(Sema &Self, const Stmt *S) {
return R;
}
-/// Determine whether the callee of a particular function call can throw.
-/// E and D are both optional, but at least one of E and Loc must be specified.
-static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
- SourceLocation Loc = SourceLocation()) {
+CanThrowResult Sema::canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
+ SourceLocation Loc) {
// As an extension, we assume that __attribute__((nothrow)) functions don't
// throw.
if (D && isa<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>())
@@ -1040,7 +1046,8 @@ static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
if (!FT)
return CT_Can;
- FT = S.ResolveExceptionSpec(Loc.isInvalid() ? E->getBeginLoc() : Loc, FT);
+ if (Loc.isValid() || (Loc.isInvalid() && E))
+ FT = S.ResolveExceptionSpec(Loc.isInvalid() ? E->getBeginLoc() : Loc, FT);
if (!FT)
return CT_Can;
@@ -1061,7 +1068,7 @@ static CanThrowResult canVarDeclThrow(Sema &Self, const VarDecl *VD) {
VD->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
if (auto *Dtor = RD->getDestructor()) {
CT = mergeCanThrow(
- CT, canCalleeThrow(Self, nullptr, Dtor, VD->getLocation()));
+ CT, Sema::canCalleeThrow(Self, nullptr, Dtor, VD->getLocation()));
}
}
}
@@ -1281,6 +1288,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Expr::CompoundLiteralExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::CXXAddrspaceCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::BuiltinBitCastExprClass:
// FIXME: Properly determine whether a variably-modified type can throw.
@@ -1290,7 +1298,10 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
// Some might be dependent for other reasons.
case Expr::ArraySubscriptExprClass:
+ case Expr::MatrixSubscriptExprClass:
case Expr::OMPArraySectionExprClass:
+ case Expr::OMPArrayShapingExprClass:
+ case Expr::OMPIteratorExprClass:
case Expr::BinaryOperatorClass:
case Expr::DependentCoawaitExprClass:
case Expr::CompoundAssignOperatorClass:
@@ -1332,6 +1343,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Expr::CXXUnresolvedConstructExprClass:
case Expr::DependentScopeDeclRefExprClass:
case Expr::CXXFoldExprClass:
+ case Expr::RecoveryExprClass:
return CT_Dependent;
case Expr::AsTypeExprClass:
@@ -1386,6 +1398,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Expr::StringLiteralClass:
case Expr::SourceLocExprClass:
case Expr::ConceptSpecializationExprClass:
+ case Expr::RequiresExprClass:
// These expressions can never throw.
return CT_Cannot;
@@ -1429,6 +1442,8 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPDistributeParallelForSimdDirectiveClass:
case Stmt::OMPDistributeSimdDirectiveClass:
case Stmt::OMPFlushDirectiveClass:
+ case Stmt::OMPDepobjDirectiveClass:
+ case Stmt::OMPScanDirectiveClass:
case Stmt::OMPForDirectiveClass:
case Stmt::OMPForSimdDirectiveClass:
case Stmt::OMPMasterDirectiveClass:
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 5f4071924d3f..ccae79636f32 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "TreeTransform.h"
+#include "UsedDeclVisitor.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
@@ -46,8 +47,10 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace sema;
+using llvm::RoundingMode;
/// Determine whether the use of this declaration is valid, without
/// emitting diagnostics.
@@ -245,8 +248,8 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
return true;
}
- // See if this is a deleted function.
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // See if this is a deleted function.
if (FD->isDeleted()) {
auto *Ctor = dyn_cast<CXXConstructorDecl>(FD);
if (Ctor && Ctor->isInheritingConstructor())
@@ -259,6 +262,29 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
return true;
}
+ // [expr.prim.id]p4
+ // A program that refers explicitly or implicitly to a function with a
+ // trailing requires-clause whose constraint-expression is not satisfied,
+ // other than to declare it, is ill-formed. [...]
+ //
+ // See if this is a function with constraints that need to be satisfied.
+ // Check this before deducing the return type, as it might instantiate the
+ // definition.
+ if (FD->getTrailingRequiresClause()) {
+ ConstraintSatisfaction Satisfaction;
+ if (CheckFunctionConstraints(FD, Satisfaction, Loc))
+ // A diagnostic will have already been generated (non-constant
+ // constraint expression, for example)
+ return true;
+ if (!Satisfaction.IsSatisfied) {
+ Diag(Loc,
+ diag::err_reference_to_function_with_unsatisfied_constraints)
+ << D;
+ DiagnoseUnsatisfiedConstraint(Satisfaction);
+ return true;
+ }
+ }
+
// If the function has a deduced return type, and we can't deduce it,
// then we can't use it either.
if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() &&
@@ -267,6 +293,9 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
if (getLangOpts().CUDA && !CheckCUDACall(Loc, FD))
return true;
+
+ if (getLangOpts().SYCLIsDevice && !checkSYCLDeviceFunction(Loc, FD))
+ return true;
}
if (auto *MD = dyn_cast<CXXMethodDecl>(D)) {
@@ -326,28 +355,25 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc);
- // [expr.prim.id]p4
- // A program that refers explicitly or implicitly to a function with a
- // trailing requires-clause whose constraint-expression is not satisfied,
- // other than to declare it, is ill-formed. [...]
- //
- // See if this is a function with constraints that need to be satisfied.
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- if (Expr *RC = FD->getTrailingRequiresClause()) {
- ConstraintSatisfaction Satisfaction;
- bool Failed = CheckConstraintSatisfaction(RC, Satisfaction);
- if (Failed)
- // A diagnostic will have already been generated (non-constant
- // constraint expression, for example)
- return true;
- if (!Satisfaction.IsSatisfied) {
- Diag(Loc,
- diag::err_reference_to_function_with_unsatisfied_constraints)
- << D;
- DiagnoseUnsatisfiedConstraint(Satisfaction);
- return true;
- }
- }
+ if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)) {
+ if (const auto *VD = dyn_cast<ValueDecl>(D))
+ checkDeviceDecl(VD, Loc);
+
+ if (!Context.getTargetInfo().isTLSSupported())
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ if (VD->getTLSKind() != VarDecl::TLS_None)
+ targetDiag(*Locs.begin(), diag::err_thread_unsupported);
+ }
+
+ if (isa<ParmVarDecl>(D) && isa<RequiresExprBodyDecl>(D->getDeclContext()) &&
+ !isUnevaluatedContext()) {
+ // C++ [expr.prim.req.nested] p3
+ // A local parameter shall only appear as an unevaluated operand
+ // (Clause 8) within the constraint-expression.
+ Diag(Loc, diag::err_requires_expr_parameter_referenced_in_evaluated_context)
+ << D;
+ Diag(D->getLocation(), diag::note_entity_declared_at) << D;
+ return true;
}
return false;
@@ -593,6 +619,10 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
QualType T = E->getType();
assert(!T.isNull() && "r-value conversion on typeless expression?");
+ // lvalue-to-rvalue conversion cannot be applied to function or array types.
+ if (T->isFunctionType() || T->isArrayType())
+ return E;
+
// We don't want to throw lvalue-to-rvalue casts on top of
// expressions of certain types in C++.
if (getLangOpts().CPlusPlus &&
@@ -661,6 +691,9 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
if (E->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
Cleanup.setExprNeedsCleanups(true);
+ if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ Cleanup.setExprNeedsCleanups(true);
+
// C++ [conv.lval]p3:
// If T is cv std::nullptr_t, the result is a null pointer constant.
CastKind CK = T->isNullPtrType() ? CK_NullToPointer : CK_LValueToRValue;
@@ -697,7 +730,7 @@ ExprResult Sema::CallExprUnaryConversions(Expr *E) {
// to function type.
if (Ty->isFunctionType()) {
Res = ImpCastExprToType(E, Context.getPointerType(Ty),
- CK_FunctionToPointerDecay).get();
+ CK_FunctionToPointerDecay);
if (Res.isInvalid())
return ExprError();
}
@@ -931,6 +964,11 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
ExprResult ExprRes = DefaultArgumentPromotion(E);
if (ExprRes.isInvalid())
return ExprError();
+
+ // Copy blocks to the heap.
+ if (ExprRes.get()->getType()->isBlockPointerType())
+ maybeExtendBlockObject(ExprRes);
+
E = ExprRes.get();
// Diagnostics regarding non-POD argument types are
@@ -1375,8 +1413,8 @@ static void checkEnumArithmeticConversions(Sema &S, Expr *LHS, Expr *RHS,
bool IsCompAssign = ACK == Sema::ACK_CompAssign;
if ((!IsCompAssign && LEnum && R->isFloatingType()) ||
(REnum && L->isFloatingType())) {
- S.Diag(Loc, S.getLangOpts().CPlusPlus2a
- ? diag::warn_arith_conv_enum_float_cxx2a
+ S.Diag(Loc, S.getLangOpts().CPlusPlus20
+ ? diag::warn_arith_conv_enum_float_cxx20
: diag::warn_arith_conv_enum_float)
<< LHS->getSourceRange() << RHS->getSourceRange()
<< (int)ACK << LEnum << L << R;
@@ -1388,24 +1426,24 @@ static void checkEnumArithmeticConversions(Sema &S, Expr *LHS, Expr *RHS,
// If either enumeration type is unnamed, it's less likely that the
// user cares about this, but this situation is still deprecated in
// C++2a. Use a different warning group.
- DiagID = S.getLangOpts().CPlusPlus2a
- ? diag::warn_arith_conv_mixed_anon_enum_types_cxx2a
+ DiagID = S.getLangOpts().CPlusPlus20
+ ? diag::warn_arith_conv_mixed_anon_enum_types_cxx20
: diag::warn_arith_conv_mixed_anon_enum_types;
} else if (ACK == Sema::ACK_Conditional) {
// Conditional expressions are separated out because they have
// historically had a different warning flag.
- DiagID = S.getLangOpts().CPlusPlus2a
- ? diag::warn_conditional_mixed_enum_types_cxx2a
+ DiagID = S.getLangOpts().CPlusPlus20
+ ? diag::warn_conditional_mixed_enum_types_cxx20
: diag::warn_conditional_mixed_enum_types;
} else if (ACK == Sema::ACK_Comparison) {
// Comparison expressions are separated out because they have
// historically had a different warning flag.
- DiagID = S.getLangOpts().CPlusPlus2a
- ? diag::warn_comparison_mixed_enum_types_cxx2a
+ DiagID = S.getLangOpts().CPlusPlus20
+ ? diag::warn_comparison_mixed_enum_types_cxx20
: diag::warn_comparison_mixed_enum_types;
} else {
- DiagID = S.getLangOpts().CPlusPlus2a
- ? diag::warn_arith_conv_mixed_enum_types_cxx2a
+ DiagID = S.getLangOpts().CPlusPlus20
+ ? diag::warn_arith_conv_mixed_enum_types_cxx20
: diag::warn_arith_conv_mixed_enum_types;
}
S.Diag(Loc, DiagID) << LHS->getSourceRange() << RHS->getSourceRange()
@@ -1466,6 +1504,11 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
if (LHSType == RHSType)
return LHSType;
+ // ExtInt types aren't subject to conversions between them or normal integers,
+ // so this fails.
+ if(LHSType->isExtIntType() || RHSType->isExtIntType())
+ return QualType();
+
// At this point, we have two different arithmetic types.
// Diagnose attempts to convert between __float128 and long double where
@@ -1750,15 +1793,15 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
// Warn on initializing an array of char from a u8 string literal; this
// becomes ill-formed in C++2a.
- if (getLangOpts().CPlusPlus && !getLangOpts().CPlusPlus2a &&
+ if (getLangOpts().CPlusPlus && !getLangOpts().CPlusPlus20 &&
!getLangOpts().Char8 && Kind == StringLiteral::UTF8) {
- Diag(StringTokLocs.front(), diag::warn_cxx2a_compat_utf8_string);
+ Diag(StringTokLocs.front(), diag::warn_cxx20_compat_utf8_string);
// Create removals for all 'u8' prefixes in the string literal(s). This
// ensures C++2a compatibility (but may change the program behavior when
// built by non-Clang compilers for which the execution character set is
// not always UTF-8).
- auto RemovalDiag = PDiag(diag::note_cxx2a_compat_utf8_string_remove_u8);
+ auto RemovalDiag = PDiag(diag::note_cxx20_compat_utf8_string_remove_u8);
SourceLocation RemovalDiagLoc;
for (const Token &Tok : StringToks) {
if (Tok.getKind() == tok::utf8_string_literal) {
@@ -3113,6 +3156,11 @@ ExprResult Sema::BuildDeclarationNameExpr(
return ExprError();
ExprValueKind valueKind = VK_RValue;
+ // In 'T ...V;', the type of the declaration 'V' is 'T...', but the type of
+ // a reference to 'V' is simply (unexpanded) 'T'. The type, like the value,
+ // is expanded by some outer '...' in the context of the use.
+ type = type.getNonPackExpansionType();
+
switch (D->getKind()) {
// Ignore all the non-ValueDecl kinds.
#define ABSTRACT_DECL(kind)
@@ -3258,6 +3306,9 @@ ExprResult Sema::BuildDeclarationNameExpr(
llvm_unreachable("building reference to deduction guide");
case Decl::MSProperty:
+ case Decl::MSGuid:
+ // FIXME: Should MSGuidDecl be subject to capture in OpenMP,
+ // or duplicated between host and device?
valueKind = VK_LValue;
break;
@@ -3358,6 +3409,70 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
return PredefinedExpr::Create(Context, Loc, ResTy, IK, SL);
}
+static std::pair<QualType, StringLiteral *>
+GetUniqueStableNameInfo(ASTContext &Context, QualType OpType,
+ SourceLocation OpLoc, PredefinedExpr::IdentKind K) {
+ std::pair<QualType, StringLiteral*> Result{{}, nullptr};
+
+ if (OpType->isDependentType()) {
+ Result.first = Context.DependentTy;
+ return Result;
+ }
+
+ std::string Str = PredefinedExpr::ComputeName(Context, K, OpType);
+ llvm::APInt Length(32, Str.length() + 1);
+ Result.first =
+ Context.adjustStringLiteralBaseType(Context.CharTy.withConst());
+ Result.first = Context.getConstantArrayType(
+ Result.first, Length, nullptr, ArrayType::Normal, /*IndexTypeQuals*/ 0);
+ Result.second = StringLiteral::Create(Context, Str, StringLiteral::Ascii,
+ /*Pascal*/ false, Result.first, OpLoc);
+ return Result;
+}
+
+ExprResult Sema::BuildUniqueStableName(SourceLocation OpLoc,
+ TypeSourceInfo *Operand) {
+ QualType ResultTy;
+ StringLiteral *SL;
+ std::tie(ResultTy, SL) = GetUniqueStableNameInfo(
+ Context, Operand->getType(), OpLoc, PredefinedExpr::UniqueStableNameType);
+
+ return PredefinedExpr::Create(Context, OpLoc, ResultTy,
+ PredefinedExpr::UniqueStableNameType, SL,
+ Operand);
+}
+
+ExprResult Sema::BuildUniqueStableName(SourceLocation OpLoc,
+ Expr *E) {
+ QualType ResultTy;
+ StringLiteral *SL;
+ std::tie(ResultTy, SL) = GetUniqueStableNameInfo(
+ Context, E->getType(), OpLoc, PredefinedExpr::UniqueStableNameExpr);
+
+ return PredefinedExpr::Create(Context, OpLoc, ResultTy,
+ PredefinedExpr::UniqueStableNameExpr, SL, E);
+}
+
+ExprResult Sema::ActOnUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation L, SourceLocation R,
+ ParsedType Ty) {
+ TypeSourceInfo *TInfo = nullptr;
+ QualType T = GetTypeFromParser(Ty, &TInfo);
+
+ if (T.isNull())
+ return ExprError();
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
+
+ return BuildUniqueStableName(OpLoc, TInfo);
+}
+
+ExprResult Sema::ActOnUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation L, SourceLocation R,
+ Expr *E) {
+ return BuildUniqueStableName(OpLoc, E);
+}
+
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
PredefinedExpr::IdentKind IK;
@@ -3519,7 +3634,9 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
if (Invalid)
return ExprError();
- NumericLiteralParser Literal(TokSpelling, Tok.getLocation(), PP);
+ NumericLiteralParser Literal(TokSpelling, Tok.getLocation(),
+ PP.getSourceManager(), PP.getLangOpts(),
+ PP.getTargetInfo(), PP.getDiagnostics());
if (Literal.hadError)
return ExprError();
@@ -3872,7 +3989,7 @@ static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
TraitKind == UETT_PreferredAlignOf)) {
// sizeof(function)/alignof(function) is allowed as an extension.
S.Diag(Loc, diag::ext_sizeof_alignof_function_type)
- << TraitKind << ArgRange;
+ << getTraitSpelling(TraitKind) << ArgRange;
return false;
}
@@ -3881,7 +3998,7 @@ static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
if (T->isVoidType()) {
unsigned DiagID = S.LangOpts.OpenCL ? diag::err_opencl_sizeof_alignof_type
: diag::ext_sizeof_alignof_void_type;
- S.Diag(Loc, DiagID) << TraitKind << ArgRange;
+ S.Diag(Loc, DiagID) << getTraitSpelling(TraitKind) << ArgRange;
return false;
}
@@ -3948,7 +4065,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return CheckVecStepTraitOperandType(*this, ExprTy, E->getExprLoc(),
E->getSourceRange());
- // Whitelist some types as extensions
+ // Explicitly list some types as extensions.
if (!CheckExtensionTraitOperandType(*this, ExprTy, E->getExprLoc(),
E->getSourceRange(), ExprKind))
return false;
@@ -3958,14 +4075,15 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// be complete (and will attempt to complete it if it's an array of unknown
// bound).
if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
- if (RequireCompleteType(E->getExprLoc(),
- Context.getBaseElementType(E->getType()),
- diag::err_sizeof_alignof_incomplete_type, ExprKind,
- E->getSourceRange()))
+ if (RequireCompleteSizedType(
+ E->getExprLoc(), Context.getBaseElementType(E->getType()),
+ diag::err_sizeof_alignof_incomplete_or_sizeless_type,
+ getTraitSpelling(ExprKind), E->getSourceRange()))
return true;
} else {
- if (RequireCompleteExprType(E, diag::err_sizeof_alignof_incomplete_type,
- ExprKind, E->getSourceRange()))
+ if (RequireCompleteSizedExprType(
+ E, diag::err_sizeof_alignof_incomplete_or_sizeless_type,
+ getTraitSpelling(ExprKind), E->getSourceRange()))
return true;
}
@@ -3975,7 +4093,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
if (ExprTy->isFunctionType()) {
Diag(E->getExprLoc(), diag::err_sizeof_alignof_function_type)
- << ExprKind << E->getSourceRange();
+ << getTraitSpelling(ExprKind) << E->getSourceRange();
return true;
}
@@ -4057,19 +4175,19 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
if (ExprKind == UETT_VecStep)
return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange);
- // Whitelist some types as extensions
+ // Explicitly list some types as extensions.
if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange,
ExprKind))
return false;
- if (RequireCompleteType(OpLoc, ExprType,
- diag::err_sizeof_alignof_incomplete_type,
- ExprKind, ExprRange))
+ if (RequireCompleteSizedType(
+ OpLoc, ExprType, diag::err_sizeof_alignof_incomplete_or_sizeless_type,
+ getTraitSpelling(ExprKind), ExprRange))
return true;
if (ExprType->isFunctionType()) {
Diag(OpLoc, diag::err_sizeof_alignof_function_type)
- << ExprKind << ExprRange;
+ << getTraitSpelling(ExprKind) << ExprRange;
return true;
}
@@ -4168,6 +4286,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::Complex:
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Record:
case Type::Enum:
case Type::Elaborated:
@@ -4177,6 +4296,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::ObjCObjectPointer:
case Type::ObjCTypeParam:
case Type::Pipe:
+ case Type::ExtInt:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
T = cast<AdjustedType>(Ty)->getOriginalType();
@@ -4442,7 +4562,8 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
if (base && !base->getType().isNull() &&
base->getType()->isSpecificPlaceholderType(BuiltinType::OMPArraySection))
return ActOnOMPArraySectionExpr(base, lbLoc, idx, SourceLocation(),
- /*Length=*/nullptr, rbLoc);
+ SourceLocation(), /*Length*/ nullptr,
+ /*Stride=*/nullptr, rbLoc);
// Since this might be a postfix expression, get rid of ParenListExprs.
if (isa<ParenListExpr>(base)) {
@@ -4451,8 +4572,55 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
base = result.get();
}
+ // Check if base and idx form a MatrixSubscriptExpr.
+ //
+ // Helper to check for comma expressions, which are not allowed as indices for
+ // matrix subscript expressions.
+ auto CheckAndReportCommaError = [this, base, rbLoc](Expr *E) {
+ if (isa<BinaryOperator>(E) && cast<BinaryOperator>(E)->isCommaOp()) {
+ Diag(E->getExprLoc(), diag::err_matrix_subscript_comma)
+ << SourceRange(base->getBeginLoc(), rbLoc);
+ return true;
+ }
+ return false;
+ };
+ // The matrix subscript operator ([][])is considered a single operator.
+ // Separating the index expressions by parenthesis is not allowed.
+ if (base->getType()->isSpecificPlaceholderType(
+ BuiltinType::IncompleteMatrixIdx) &&
+ !isa<MatrixSubscriptExpr>(base)) {
+ Diag(base->getExprLoc(), diag::err_matrix_separate_incomplete_index)
+ << SourceRange(base->getBeginLoc(), rbLoc);
+ return ExprError();
+ }
+ // If the base is either a MatrixSubscriptExpr or a matrix type, try to create
+ // a new MatrixSubscriptExpr.
+ auto *matSubscriptE = dyn_cast<MatrixSubscriptExpr>(base);
+ if (matSubscriptE) {
+ if (CheckAndReportCommaError(idx))
+ return ExprError();
+
+ assert(matSubscriptE->isIncomplete() &&
+ "base has to be an incomplete matrix subscript");
+ return CreateBuiltinMatrixSubscriptExpr(
+ matSubscriptE->getBase(), matSubscriptE->getRowIdx(), idx, rbLoc);
+ }
+ Expr *matrixBase = base;
+ bool IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base);
+ if (!IsMSPropertySubscript) {
+ ExprResult result = CheckPlaceholderExpr(base);
+ if (!result.isInvalid())
+ matrixBase = result.get();
+ }
+ if (matrixBase->getType()->isMatrixType()) {
+ if (CheckAndReportCommaError(idx))
+ return ExprError();
+
+ return CreateBuiltinMatrixSubscriptExpr(matrixBase, idx, nullptr, rbLoc);
+ }
+
// A comma-expression as the index is deprecated in C++2a onwards.
- if (getLangOpts().CPlusPlus2a &&
+ if (getLangOpts().CPlusPlus20 &&
((isa<BinaryOperator>(idx) && cast<BinaryOperator>(idx)->isCommaOp()) ||
(isa<CXXOperatorCallExpr>(idx) &&
cast<CXXOperatorCallExpr>(idx)->getOperator() == OO_Comma))) {
@@ -4465,7 +4633,6 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// operand might be an overloadable type, in which case the overload
// resolution for the operator overload should get the first crack
// at the overload.
- bool IsMSPropertySubscript = false;
if (base->getType()->isNonOverloadPlaceholderType()) {
IsMSPropertySubscript = isMSPropertySubscriptExpr(*this, base);
if (!IsMSPropertySubscript) {
@@ -4526,6 +4693,79 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
return Res;
}
+ExprResult Sema::tryConvertExprToType(Expr *E, QualType Ty) {
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(Ty);
+ InitializationKind Kind =
+ InitializationKind::CreateCopy(E->getBeginLoc(), SourceLocation());
+ InitializationSequence InitSeq(*this, Entity, Kind, E);
+ return InitSeq.Perform(*this, Entity, Kind, E);
+}
+
+ExprResult Sema::CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
+ Expr *ColumnIdx,
+ SourceLocation RBLoc) {
+ ExprResult BaseR = CheckPlaceholderExpr(Base);
+ if (BaseR.isInvalid())
+ return BaseR;
+ Base = BaseR.get();
+
+ ExprResult RowR = CheckPlaceholderExpr(RowIdx);
+ if (RowR.isInvalid())
+ return RowR;
+ RowIdx = RowR.get();
+
+ if (!ColumnIdx)
+ return new (Context) MatrixSubscriptExpr(
+ Base, RowIdx, ColumnIdx, Context.IncompleteMatrixIdxTy, RBLoc);
+
+ // Build an unanalyzed expression if any of the operands is type-dependent.
+ if (Base->isTypeDependent() || RowIdx->isTypeDependent() ||
+ ColumnIdx->isTypeDependent())
+ return new (Context) MatrixSubscriptExpr(Base, RowIdx, ColumnIdx,
+ Context.DependentTy, RBLoc);
+
+ ExprResult ColumnR = CheckPlaceholderExpr(ColumnIdx);
+ if (ColumnR.isInvalid())
+ return ColumnR;
+ ColumnIdx = ColumnR.get();
+
+ // Check that IndexExpr is an integer expression. If it is a constant
+ // expression, check that it is less than Dim (= the number of elements in the
+ // corresponding dimension).
+ auto IsIndexValid = [&](Expr *IndexExpr, unsigned Dim,
+ bool IsColumnIdx) -> Expr * {
+ if (!IndexExpr->getType()->isIntegerType() &&
+ !IndexExpr->isTypeDependent()) {
+ Diag(IndexExpr->getBeginLoc(), diag::err_matrix_index_not_integer)
+ << IsColumnIdx;
+ return nullptr;
+ }
+
+ llvm::APSInt Idx;
+ if (IndexExpr->isIntegerConstantExpr(Idx, Context) &&
+ (Idx < 0 || Idx >= Dim)) {
+ Diag(IndexExpr->getBeginLoc(), diag::err_matrix_index_outside_range)
+ << IsColumnIdx << Dim;
+ return nullptr;
+ }
+
+ ExprResult ConvExpr =
+ tryConvertExprToType(IndexExpr, Context.getSizeType());
+ assert(!ConvExpr.isInvalid() &&
+ "should be able to convert any integer type to size type");
+ return ConvExpr.get();
+ };
+
+ auto *MTy = Base->getType()->getAs<ConstantMatrixType>();
+ RowIdx = IsIndexValid(RowIdx, MTy->getNumRows(), false);
+ ColumnIdx = IsIndexValid(ColumnIdx, MTy->getNumColumns(), true);
+ if (!RowIdx || !ColumnIdx)
+ return ExprError();
+
+ return new (Context) MatrixSubscriptExpr(Base, RowIdx, ColumnIdx,
+ MTy->getElementType(), RBLoc);
+}
+
void Sema::CheckAddressOfNoDeref(const Expr *E) {
ExpressionEvaluationContextRecord &LastRecord = ExprEvalContexts.back();
const Expr *StrippedExpr = E->IgnoreParenImpCasts();
@@ -4573,7 +4813,9 @@ void Sema::CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E) {
ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
- SourceLocation ColonLoc, Expr *Length,
+ SourceLocation ColonLocFirst,
+ SourceLocation ColonLocSecond,
+ Expr *Length, Expr *Stride,
SourceLocation RBLoc) {
if (Base->getType()->isPlaceholderType() &&
!Base->getType()->isSpecificPlaceholderType(
@@ -4601,15 +4843,25 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
return ExprError();
Length = Result.get();
}
+ if (Stride && Stride->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(Stride);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ Stride = Result.get();
+ }
// Build an unanalyzed expression if either operand is type-dependent.
if (Base->isTypeDependent() ||
(LowerBound &&
(LowerBound->isTypeDependent() || LowerBound->isValueDependent())) ||
- (Length && (Length->isTypeDependent() || Length->isValueDependent()))) {
- return new (Context)
- OMPArraySectionExpr(Base, LowerBound, Length, Context.DependentTy,
- VK_LValue, OK_Ordinary, ColonLoc, RBLoc);
+ (Length && (Length->isTypeDependent() || Length->isValueDependent())) ||
+ (Stride && (Stride->isTypeDependent() || Stride->isValueDependent()))) {
+ return new (Context) OMPArraySectionExpr(
+ Base, LowerBound, Length, Stride, Context.DependentTy, VK_LValue,
+ OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc);
}
// Perform default conversions.
@@ -4653,6 +4905,20 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Diag(Length->getExprLoc(), diag::warn_omp_section_is_char)
<< 1 << Length->getSourceRange();
}
+ if (Stride) {
+ ExprResult Res =
+ PerformOpenMPImplicitIntegerConversion(Stride->getExprLoc(), Stride);
+ if (Res.isInvalid())
+ return ExprError(Diag(Stride->getExprLoc(),
+ diag::err_omp_typecheck_section_not_integer)
+ << 1 << Stride->getSourceRange());
+ Stride = Res.get();
+
+ if (Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ Diag(Stride->getExprLoc(), diag::warn_omp_section_is_char)
+ << 1 << Stride->getSourceRange();
+ }
// C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
// C++ [expr.sub]p1: The type "T" shall be a completely-defined object
@@ -4671,7 +4937,7 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
if (LowerBound && !OriginalTy->isAnyPointerType()) {
Expr::EvalResult Result;
if (LowerBound->EvaluateAsInt(Result, Context)) {
- // OpenMP 4.5, [2.4 Array Sections]
+ // OpenMP 5.0, [2.1.5 Array Sections]
// The array section must be a subset of the original array.
llvm::APSInt LowerBoundValue = Result.Val.getInt();
if (LowerBoundValue.isNegative()) {
@@ -4685,7 +4951,7 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
if (Length) {
Expr::EvalResult Result;
if (Length->EvaluateAsInt(Result, Context)) {
- // OpenMP 4.5, [2.4 Array Sections]
+ // OpenMP 5.0, [2.1.5 Array Sections]
// The length must evaluate to non-negative integers.
llvm::APSInt LengthValue = Result.Val.getInt();
if (LengthValue.isNegative()) {
@@ -4695,17 +4961,32 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
return ExprError();
}
}
- } else if (ColonLoc.isValid() &&
+ } else if (ColonLocFirst.isValid() &&
(OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() &&
!OriginalTy->isVariableArrayType()))) {
- // OpenMP 4.5, [2.4 Array Sections]
+ // OpenMP 5.0, [2.1.5 Array Sections]
// When the size of the array dimension is not known, the length must be
// specified explicitly.
- Diag(ColonLoc, diag::err_omp_section_length_undefined)
+ Diag(ColonLocFirst, diag::err_omp_section_length_undefined)
<< (!OriginalTy.isNull() && OriginalTy->isArrayType());
return ExprError();
}
+ if (Stride) {
+ Expr::EvalResult Result;
+ if (Stride->EvaluateAsInt(Result, Context)) {
+ // OpenMP 5.0, [2.1.5 Array Sections]
+ // The stride must evaluate to a positive integer.
+ llvm::APSInt StrideValue = Result.Val.getInt();
+ if (!StrideValue.isStrictlyPositive()) {
+ Diag(Stride->getExprLoc(), diag::err_omp_section_stride_non_positive)
+ << StrideValue.toString(/*Radix=*/10, /*Signed=*/true)
+ << Stride->getSourceRange();
+ return ExprError();
+ }
+ }
+ }
+
if (!Base->getType()->isSpecificPlaceholderType(
BuiltinType::OMPArraySection)) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(Base);
@@ -4713,9 +4994,371 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
return ExprError();
Base = Result.get();
}
- return new (Context)
- OMPArraySectionExpr(Base, LowerBound, Length, Context.OMPArraySectionTy,
- VK_LValue, OK_Ordinary, ColonLoc, RBLoc);
+ return new (Context) OMPArraySectionExpr(
+ Base, LowerBound, Length, Stride, Context.OMPArraySectionTy, VK_LValue,
+ OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc);
+}
+
+ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ ArrayRef<Expr *> Dims,
+ ArrayRef<SourceRange> Brackets) {
+ if (Base->getType()->isPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(Base);
+ if (Result.isInvalid())
+ return ExprError();
+ Result = DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ }
+ QualType BaseTy = Base->getType();
+ // Delay analysis of the types/expressions if instantiation/specialization is
+ // required.
+ if (!BaseTy->isPointerType() && Base->isTypeDependent())
+ return OMPArrayShapingExpr::Create(Context, Context.DependentTy, Base,
+ LParenLoc, RParenLoc, Dims, Brackets);
+ if (!BaseTy->isPointerType() ||
+ (!Base->isTypeDependent() &&
+ BaseTy->getPointeeType()->isIncompleteType()))
+ return ExprError(Diag(Base->getExprLoc(),
+ diag::err_omp_non_pointer_type_array_shaping_base)
+ << Base->getSourceRange());
+
+ SmallVector<Expr *, 4> NewDims;
+ bool ErrorFound = false;
+ for (Expr *Dim : Dims) {
+ if (Dim->getType()->isPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(Dim);
+ if (Result.isInvalid()) {
+ ErrorFound = true;
+ continue;
+ }
+ Result = DefaultLvalueConversion(Result.get());
+ if (Result.isInvalid()) {
+ ErrorFound = true;
+ continue;
+ }
+ Dim = Result.get();
+ }
+ if (!Dim->isTypeDependent()) {
+ ExprResult Result =
+ PerformOpenMPImplicitIntegerConversion(Dim->getExprLoc(), Dim);
+ if (Result.isInvalid()) {
+ ErrorFound = true;
+ Diag(Dim->getExprLoc(), diag::err_omp_typecheck_shaping_not_integer)
+ << Dim->getSourceRange();
+ continue;
+ }
+ Dim = Result.get();
+ Expr::EvalResult EvResult;
+ if (!Dim->isValueDependent() && Dim->EvaluateAsInt(EvResult, Context)) {
+ // OpenMP 5.0, [2.1.4 Array Shaping]
+ // Each si is an integral type expression that must evaluate to a
+ // positive integer.
+ llvm::APSInt Value = EvResult.Val.getInt();
+ if (!Value.isStrictlyPositive()) {
+ Diag(Dim->getExprLoc(), diag::err_omp_shaping_dimension_not_positive)
+ << Value.toString(/*Radix=*/10, /*Signed=*/true)
+ << Dim->getSourceRange();
+ ErrorFound = true;
+ continue;
+ }
+ }
+ }
+ NewDims.push_back(Dim);
+ }
+ if (ErrorFound)
+ return ExprError();
+ return OMPArrayShapingExpr::Create(Context, Context.OMPArrayShapingTy, Base,
+ LParenLoc, RParenLoc, NewDims, Brackets);
+}
+
+ExprResult Sema::ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
+ SourceLocation LLoc, SourceLocation RLoc,
+ ArrayRef<OMPIteratorData> Data) {
+ SmallVector<OMPIteratorExpr::IteratorDefinition, 4> ID;
+ bool IsCorrect = true;
+ for (const OMPIteratorData &D : Data) {
+ TypeSourceInfo *TInfo = nullptr;
+ SourceLocation StartLoc;
+ QualType DeclTy;
+ if (!D.Type.getAsOpaquePtr()) {
+ // OpenMP 5.0, 2.1.6 Iterators
+ // In an iterator-specifier, if the iterator-type is not specified then
+ // the type of that iterator is of int type.
+ DeclTy = Context.IntTy;
+ StartLoc = D.DeclIdentLoc;
+ } else {
+ DeclTy = GetTypeFromParser(D.Type, &TInfo);
+ StartLoc = TInfo->getTypeLoc().getBeginLoc();
+ }
+
+ bool IsDeclTyDependent = DeclTy->isDependentType() ||
+ DeclTy->containsUnexpandedParameterPack() ||
+ DeclTy->isInstantiationDependentType();
+ if (!IsDeclTyDependent) {
+ if (!DeclTy->isIntegralType(Context) && !DeclTy->isAnyPointerType()) {
+ // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++
+ // The iterator-type must be an integral or pointer type.
+ Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer)
+ << DeclTy;
+ IsCorrect = false;
+ continue;
+ }
+ if (DeclTy.isConstant(Context)) {
+ // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++
+ // The iterator-type must not be const qualified.
+ Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer)
+ << DeclTy;
+ IsCorrect = false;
+ continue;
+ }
+ }
+
+ // Iterator declaration.
+ assert(D.DeclIdent && "Identifier expected.");
+ // Always try to create iterator declarator to avoid extra error messages
+ // about unknown declarations use.
+ auto *VD = VarDecl::Create(Context, CurContext, StartLoc, D.DeclIdentLoc,
+ D.DeclIdent, DeclTy, TInfo, SC_None);
+ VD->setImplicit();
+ if (S) {
+ // Check for conflicting previous declaration.
+ DeclarationNameInfo NameInfo(VD->getDeclName(), D.DeclIdentLoc);
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
+ ForVisibleRedeclaration);
+ Previous.suppressDiagnostics();
+ LookupName(Previous, S);
+
+ FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage=*/false,
+ /*AllowInlineNamespace=*/false);
+ if (!Previous.empty()) {
+ NamedDecl *Old = Previous.getRepresentativeDecl();
+ Diag(D.DeclIdentLoc, diag::err_redefinition) << VD->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ } else {
+ PushOnScopeChains(VD, S);
+ }
+ } else {
+ CurContext->addDecl(VD);
+ }
+ Expr *Begin = D.Range.Begin;
+ if (!IsDeclTyDependent && Begin && !Begin->isTypeDependent()) {
+ ExprResult BeginRes =
+ PerformImplicitConversion(Begin, DeclTy, AA_Converting);
+ Begin = BeginRes.get();
+ }
+ Expr *End = D.Range.End;
+ if (!IsDeclTyDependent && End && !End->isTypeDependent()) {
+ ExprResult EndRes = PerformImplicitConversion(End, DeclTy, AA_Converting);
+ End = EndRes.get();
+ }
+ Expr *Step = D.Range.Step;
+ if (!IsDeclTyDependent && Step && !Step->isTypeDependent()) {
+ if (!Step->getType()->isIntegralType(Context)) {
+ Diag(Step->getExprLoc(), diag::err_omp_iterator_step_not_integral)
+ << Step << Step->getSourceRange();
+ IsCorrect = false;
+ continue;
+ }
+ llvm::APSInt Result;
+ bool IsConstant = Step->isIntegerConstantExpr(Result, Context);
+ // OpenMP 5.0, 2.1.6 Iterators, Restrictions
+ // If the step expression of a range-specification equals zero, the
+ // behavior is unspecified.
+ if (IsConstant && Result.isNullValue()) {
+ Diag(Step->getExprLoc(), diag::err_omp_iterator_step_constant_zero)
+ << Step << Step->getSourceRange();
+ IsCorrect = false;
+ continue;
+ }
+ }
+ if (!Begin || !End || !IsCorrect) {
+ IsCorrect = false;
+ continue;
+ }
+ OMPIteratorExpr::IteratorDefinition &IDElem = ID.emplace_back();
+ IDElem.IteratorDecl = VD;
+ IDElem.AssignmentLoc = D.AssignLoc;
+ IDElem.Range.Begin = Begin;
+ IDElem.Range.End = End;
+ IDElem.Range.Step = Step;
+ IDElem.ColonLoc = D.ColonLoc;
+ IDElem.SecondColonLoc = D.SecColonLoc;
+ }
+ if (!IsCorrect) {
+ // Invalidate all created iterator declarations if error is found.
+ for (const OMPIteratorExpr::IteratorDefinition &D : ID) {
+ if (Decl *ID = D.IteratorDecl)
+ ID->setInvalidDecl();
+ }
+ return ExprError();
+ }
+ SmallVector<OMPIteratorHelperData, 4> Helpers;
+ if (!CurContext->isDependentContext()) {
+ // Build number of ityeration for each iteration range.
+ // Ni = ((Stepi > 0) ? ((Endi + Stepi -1 - Begini)/Stepi) :
+ // ((Begini-Stepi-1-Endi) / -Stepi);
+ for (OMPIteratorExpr::IteratorDefinition &D : ID) {
+ // (Endi - Begini)
+ ExprResult Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, D.Range.End,
+ D.Range.Begin);
+ if(!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ ExprResult St, St1;
+ if (D.Range.Step) {
+ St = D.Range.Step;
+ // (Endi - Begini) + Stepi
+ Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res.get(), St.get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // (Endi - Begini) + Stepi - 1
+ Res =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res.get(),
+ ActOnIntegerConstant(D.AssignmentLoc, 1).get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // ((Endi - Begini) + Stepi - 1) / Stepi
+ Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res.get(), St.get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ St1 = CreateBuiltinUnaryOp(D.AssignmentLoc, UO_Minus, D.Range.Step);
+ // (Begini - Endi)
+ ExprResult Res1 = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub,
+ D.Range.Begin, D.Range.End);
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // (Begini - Endi) - Stepi
+ Res1 =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res1.get(), St1.get());
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // (Begini - Endi) - Stepi - 1
+ Res1 =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res1.get(),
+ ActOnIntegerConstant(D.AssignmentLoc, 1).get());
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // ((Begini - Endi) - Stepi - 1) / (-Stepi)
+ Res1 =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res1.get(), St1.get());
+ if (!Res1.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ // Stepi > 0.
+ ExprResult CmpRes =
+ CreateBuiltinBinOp(D.AssignmentLoc, BO_GT, D.Range.Step,
+ ActOnIntegerConstant(D.AssignmentLoc, 0).get());
+ if (!CmpRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ Res = ActOnConditionalOp(D.AssignmentLoc, D.AssignmentLoc, CmpRes.get(),
+ Res.get(), Res1.get());
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ }
+ Res = ActOnFinishFullExpr(Res.get(), /*DiscardedValue=*/false);
+ if (!Res.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+
+ // Build counter update.
+ // Build counter.
+ auto *CounterVD =
+ VarDecl::Create(Context, CurContext, D.IteratorDecl->getBeginLoc(),
+ D.IteratorDecl->getBeginLoc(), nullptr,
+ Res.get()->getType(), nullptr, SC_None);
+ CounterVD->setImplicit();
+ ExprResult RefRes =
+ BuildDeclRefExpr(CounterVD, CounterVD->getType(), VK_LValue,
+ D.IteratorDecl->getBeginLoc());
+ // Build counter update.
+ // I = Begini + counter * Stepi;
+ ExprResult UpdateRes;
+ if (D.Range.Step) {
+ UpdateRes = CreateBuiltinBinOp(
+ D.AssignmentLoc, BO_Mul,
+ DefaultLvalueConversion(RefRes.get()).get(), St.get());
+ } else {
+ UpdateRes = DefaultLvalueConversion(RefRes.get());
+ }
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, D.Range.Begin,
+ UpdateRes.get());
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ ExprResult VDRes =
+ BuildDeclRefExpr(cast<VarDecl>(D.IteratorDecl),
+ cast<VarDecl>(D.IteratorDecl)->getType(), VK_LValue,
+ D.IteratorDecl->getBeginLoc());
+ UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Assign, VDRes.get(),
+ UpdateRes.get());
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ UpdateRes =
+ ActOnFinishFullExpr(UpdateRes.get(), /*DiscardedValue=*/true);
+ if (!UpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ ExprResult CounterUpdateRes =
+ CreateBuiltinUnaryOp(D.AssignmentLoc, UO_PreInc, RefRes.get());
+ if (!CounterUpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ CounterUpdateRes =
+ ActOnFinishFullExpr(CounterUpdateRes.get(), /*DiscardedValue=*/true);
+ if (!CounterUpdateRes.isUsable()) {
+ IsCorrect = false;
+ continue;
+ }
+ OMPIteratorHelperData &HD = Helpers.emplace_back();
+ HD.CounterVD = CounterVD;
+ HD.Upper = Res.get();
+ HD.Update = UpdateRes.get();
+ HD.CounterUpdate = CounterUpdateRes.get();
+ }
+ } else {
+ Helpers.assign(ID.size(), {});
+ }
+ if (!IsCorrect) {
+ // Invalidate all created iterator declarations if error is found.
+ for (const OMPIteratorExpr::IteratorDefinition &D : ID) {
+ if (Decl *ID = D.IteratorDecl)
+ ID->setInvalidDecl();
+ }
+ return ExprError();
+ }
+ return OMPIteratorExpr::Create(Context, Context.OMPIteratorTy, IteratorKwLoc,
+ LLoc, RLoc, ID, Helpers);
}
ExprResult
@@ -4873,8 +5516,9 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
// See IsCForbiddenLValueType.
if (!ResultType.hasQualifiers()) VK = VK_RValue;
} else if (!ResultType->isDependentType() &&
- RequireCompleteType(LLoc, ResultType,
- diag::err_subscript_incomplete_type, BaseExpr))
+ RequireCompleteSizedType(
+ LLoc, ResultType,
+ diag::err_subscript_incomplete_or_sizeless_type, BaseExpr))
return ExprError();
assert(VK == VK_RValue || LangOpts.CPlusPlus ||
@@ -4914,6 +5558,15 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param) {
if (Param->hasUnparsedDefaultArg()) {
+ // If we've already cleared out the location for the default argument,
+ // that means we're parsing it right now.
+ if (!UnparsedDefaultArgLocs.count(Param)) {
+ Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
+ Diag(CallLoc, diag::note_recursive_default_argument_used_here);
+ Param->setInvalidDecl();
+ return true;
+ }
+
Diag(CallLoc,
diag::err_use_of_default_argument_to_function_declared_later) <<
FD << cast<CXXRecordDecl>(FD->getDeclContext())->getDeclName();
@@ -4922,90 +5575,11 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
return true;
}
- if (Param->hasUninstantiatedDefaultArg()) {
- Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
-
- EnterExpressionEvaluationContext EvalContext(
- *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
-
- // Instantiate the expression.
- //
- // FIXME: Pass in a correct Pattern argument, otherwise
- // getTemplateInstantiationArgs uses the lexical context of FD, e.g.
- //
- // template<typename T>
- // struct A {
- // static int FooImpl();
- //
- // template<typename Tp>
- // // bug: default argument A<T>::FooImpl() is evaluated with 2-level
- // // template argument list [[T], [Tp]], should be [[Tp]].
- // friend A<Tp> Foo(int a);
- // };
- //
- // template<typename T>
- // A<T> Foo(int a = A<T>::FooImpl());
- MultiLevelTemplateArgumentList MutiLevelArgList
- = getTemplateInstantiationArgs(FD, nullptr, /*RelativeToPrimary=*/true);
-
- InstantiatingTemplate Inst(*this, CallLoc, Param,
- MutiLevelArgList.getInnermost());
- if (Inst.isInvalid())
- return true;
- if (Inst.isAlreadyInstantiating()) {
- Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
- Param->setInvalidDecl();
- return true;
- }
-
- ExprResult Result;
- {
- // C++ [dcl.fct.default]p5:
- // The names in the [default argument] expression are bound, and
- // the semantic constraints are checked, at the point where the
- // default argument expression appears.
- ContextRAII SavedContext(*this, FD);
- LocalInstantiationScope Local(*this);
- runWithSufficientStackSpace(CallLoc, [&] {
- Result = SubstInitializer(UninstExpr, MutiLevelArgList,
- /*DirectInit*/false);
- });
- }
- if (Result.isInvalid())
- return true;
-
- // Check the expression as an initializer for the parameter.
- InitializedEntity Entity
- = InitializedEntity::InitializeParameter(Context, Param);
- InitializationKind Kind = InitializationKind::CreateCopy(
- Param->getLocation(),
- /*FIXME:EqualLoc*/ UninstExpr->getBeginLoc());
- Expr *ResultE = Result.getAs<Expr>();
-
- InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
- Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
- if (Result.isInvalid())
- return true;
-
- Result =
- ActOnFinishFullExpr(Result.getAs<Expr>(), Param->getOuterLocStart(),
- /*DiscardedValue*/ false);
- if (Result.isInvalid())
- return true;
-
- // Remember the instantiated default argument.
- Param->setDefaultArg(Result.getAs<Expr>());
- if (ASTMutationListener *L = getASTMutationListener()) {
- L->DefaultArgumentInstantiated(Param);
- }
- }
-
- // If the default argument expression is not set yet, we are building it now.
- if (!Param->hasInit()) {
- Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
- Param->setInvalidDecl();
+ if (Param->hasUninstantiatedDefaultArg() &&
+ InstantiateDefaultArgument(CallLoc, FD, Param))
return true;
- }
+
+ assert(Param->hasInit() && "default argument but no initializer?");
// If the default expression creates temporaries, we need to
// push them to the current stack of expression temporaries so they'll
@@ -5038,6 +5612,7 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD, ParmVarDecl *Param) {
+ assert(Param->hasDefaultArg() && "can't build nonexistent default arg");
if (CheckCXXDefaultArgExpr(CallLoc, FD, Param))
return ExprError();
return CXXDefaultArgExpr::Create(Context, CallLoc, Param, CurContext);
@@ -5183,7 +5758,7 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getBeginLoc(), diag::note_callee_decl) << FDecl;
+ Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
return true;
}
@@ -5228,7 +5803,7 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getBeginLoc(), diag::note_callee_decl) << FDecl;
+ Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
// This deletes the extra arguments.
Call->shrinkNumArgs(NumParams);
@@ -5341,9 +5916,6 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
for (Expr *A : Args.slice(ArgIx)) {
ExprResult Arg = DefaultVariadicArgumentPromotion(A, CallType, FDecl);
Invalid |= Arg.isInvalid();
- // Copy blocks to the heap.
- if (A->getType()->isBlockPointerType())
- maybeExtendBlockObject(Arg);
AllArgs.push_back(Arg.get());
}
}
@@ -5476,7 +6048,10 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
// These are always invalid as call arguments and should be reported.
case BuiltinType::BoundMember:
case BuiltinType::BuiltinFn:
+ case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
+ case BuiltinType::OMPArrayShaping:
+ case BuiltinType::OMPIterator:
return true;
}
@@ -5599,7 +6174,8 @@ static void checkDirectCallValidity(Sema &S, const Expr *Fn,
if (Callee->getMinRequiredArguments() > ArgExprs.size())
return;
- if (const EnableIfAttr *Attr = S.CheckEnableIf(Callee, ArgExprs, true)) {
+ if (const EnableIfAttr *Attr =
+ S.CheckEnableIf(Callee, Fn->getBeginLoc(), ArgExprs, true)) {
S.Diag(Fn->getBeginLoc(),
isa<CXXMethodDecl>(Callee)
? diag::err_ovl_no_viable_member_function_in_call
@@ -5706,13 +6282,17 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (auto *ULE = dyn_cast<UnresolvedLookupExpr>(Fn)) {
if (ULE->hasExplicitTemplateArgs() &&
ULE->decls_begin() == ULE->decls_end()) {
- Diag(Fn->getExprLoc(), getLangOpts().CPlusPlus2a
+ Diag(Fn->getExprLoc(), getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_adl_only_template_id
: diag::ext_adl_only_template_id)
<< ULE->getName();
}
}
+ if (LangOpts.OpenMP)
+ Call = ActOnOpenMPCall(Call, Scope, LParenLoc, ArgExprs, RParenLoc,
+ ExecConfig);
+
return Call;
}
@@ -6123,6 +6703,18 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (NDecl)
DiagnoseSentinelCalls(NDecl, LParenLoc, Args);
+ // Warn for unions passing across security boundary (CMSE).
+ if (FuncT != nullptr && FuncT->getCmseNSCallAttr()) {
+ for (unsigned i = 0, e = Args.size(); i != e; i++) {
+ if (const auto *RT =
+ dyn_cast<RecordType>(Args[i]->getType().getCanonicalType())) {
+ if (RT->getDecl()->isOrContainsUnion())
+ Diag(Args[i]->getBeginLoc(), diag::warn_cmse_nonsecure_union)
+ << 0 << i;
+ }
+ }
+ }
+
// Do special checking on direct calls to functions.
if (FDecl) {
if (CheckFunctionCall(FDecl, TheCall, Proto))
@@ -6140,7 +6732,7 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
return ExprError();
}
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), FDecl);
}
ExprResult
@@ -6163,10 +6755,10 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
QualType literalType = TInfo->getType();
if (literalType->isArrayType()) {
- if (RequireCompleteType(LParenLoc, Context.getBaseElementType(literalType),
- diag::err_illegal_decl_array_incomplete_type,
- SourceRange(LParenLoc,
- LiteralExpr->getSourceRange().getEnd())))
+ if (RequireCompleteSizedType(
+ LParenLoc, Context.getBaseElementType(literalType),
+ diag::err_array_incomplete_or_sizeless_type,
+ SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd())))
return ExprError();
if (literalType->isVariableArrayType())
return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init)
@@ -6240,14 +6832,24 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
return ExprError();
}
- // Compound literals that have automatic storage duration are destroyed at
- // the end of the scope. Emit diagnostics if it is or contains a C union type
- // that is non-trivial to destruct.
- if (!isFileScope)
+ if (!isFileScope && !getLangOpts().CPlusPlus) {
+ // Compound literals that have automatic storage duration are destroyed at
+ // the end of the scope in C; in C++, they're just temporaries.
+
+ // Emit diagnostics if it is or contains a C union type that is non-trivial
+ // to destruct.
if (E->getType().hasNonTrivialToPrimitiveDestructCUnion())
checkNonTrivialCUnion(E->getType(), E->getExprLoc(),
NTCUC_CompoundLiteral, NTCUK_Destruct);
+ // Diagnose jumps that enter or exit the lifetime of the compound literal.
+ if (literalType.isDestructedType()) {
+ Cleanup.setExprNeedsCleanups(true);
+ ExprCleanupObjects.push_back(E);
+ getCurFunction()->setHasBranchProtectedScope();
+ }
+ }
+
if (E->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion() ||
E->getType().hasNonTrivialToPrimitiveCopyCUnion())
checkNonTrivialCUnionInInitializer(E->getInitializer(),
@@ -6313,7 +6915,7 @@ Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList,
// already diagnose use of (non-C++20) C99 designator syntax.
if (getLangOpts().CPlusPlus && !DiagnosedArrayDesignator &&
!DiagnosedNestedDesignator && !DiagnosedMixedDesignator) {
- Diag(FirstDesignator, getLangOpts().CPlusPlus2a
+ Diag(FirstDesignator, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_designated_init
: diag::ext_cxx_designated_init);
} else if (!getLangOpts().CPlusPlus && !getLangOpts().C99) {
@@ -7459,7 +8061,8 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// The OpenCL operator with a vector condition is sufficiently
// different to merit its own checker.
- if (getLangOpts().OpenCL && Cond.get()->getType()->isVectorType())
+ if ((getLangOpts().OpenCL && Cond.get()->getType()->isVectorType()) ||
+ Cond.get()->getType()->isExtVectorType())
return OpenCLCheckVectorConditional(*this, Cond, LHS, RHS, QuestionLoc);
// First, check the condition.
@@ -7509,6 +8112,11 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
return ResTy;
}
+ // And if they're both bfloat (which isn't arithmetic), that's fine too.
+ if (LHSTy->isBFloat16Type() && RHSTy->isBFloat16Type()) {
+ return LHSTy;
+ }
+
// If both operands are the same structure or union type, the result is that
// type.
if (const RecordType *LHSRT = LHSTy->getAs<RecordType>()) { // C99 6.5.15p3
@@ -7559,6 +8167,11 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
/*IsIntFirstExpr=*/false))
return LHSTy;
+ // Allow ?: operations in which both operands have the same
+ // built-in sizeless type.
+ if (LHSTy->isSizelessBuiltinType() && LHSTy == RHSTy)
+ return LHSTy;
+
// Emit a better diagnostic if one of the expressions is a null pointer
// constant and the other is not a pointer type. In this case, the user most
// likely forgot to take the address of the other expression.
@@ -8002,6 +8615,24 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
ColonLoc, result, VK, OK);
}
+// Check if we have a conversion between incompatible cmse function pointer
+// types, that is, a conversion between a function pointer with the
+// cmse_nonsecure_call attribute and one without.
+static bool IsInvalidCmseNSCallConversion(Sema &S, QualType FromType,
+ QualType ToType) {
+ if (const auto *ToFn =
+ dyn_cast<FunctionType>(S.Context.getCanonicalType(ToType))) {
+ if (const auto *FromFn =
+ dyn_cast<FunctionType>(S.Context.getCanonicalType(FromType))) {
+ FunctionType::ExtInfo ToEInfo = ToFn->getExtInfo();
+ FunctionType::ExtInfo FromEInfo = FromFn->getExtInfo();
+
+ return ToEInfo.getCmseNSCall() != FromEInfo.getCmseNSCall();
+ }
+ }
+ return false;
+}
+
// checkPointerTypesForAssignment - This is a very tricky routine (despite
// being closely modeled after the C99 spec:-). The odd characteristic of this
// routine is it effectively iqnores the qualifiers on the top level pointee.
@@ -8133,11 +8764,15 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
}
// General pointer incompatibility takes priority over qualifiers.
+ if (RHSType->isFunctionPointerType() && LHSType->isFunctionPointerType())
+ return Sema::IncompatibleFunctionPointer;
return Sema::IncompatiblePointer;
}
if (!S.getLangOpts().CPlusPlus &&
S.IsFunctionConversion(ltrans, rtrans, ltrans))
- return Sema::IncompatiblePointer;
+ return Sema::IncompatibleFunctionPointer;
+ if (IsInvalidCmseNSCallConversion(S, ltrans, rtrans))
+ return Sema::IncompatibleFunctionPointer;
return ConvTy;
}
@@ -8248,7 +8883,7 @@ Sema::CheckAssignmentConstraints(SourceLocation Loc,
/// type ElementType.
static bool isVector(QualType QT, QualType ElementType) {
if (const VectorType *VT = QT->getAs<VectorType>())
- return VT->getElementType() == ElementType;
+ return VT->getElementType().getCanonicalType() == ElementType;
return false;
}
@@ -8691,7 +9326,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
ImplicitConversionSequence ICS =
TryImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false);
@@ -8786,7 +9421,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
if (getLangOpts().ObjC &&
(CheckObjCBridgeRelatedConversions(E->getBeginLoc(), LHSType,
E->getType(), E, Diagnose) ||
- ConversionToObjCStringLiteralCheck(LHSType, E, Diagnose))) {
+ CheckConversionToObjCLiteral(LHSType, E, Diagnose))) {
if (!Diagnose)
return Incompatible;
// Replace the expression with a corrected version and continue so we
@@ -9083,7 +9718,13 @@ static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar,
// Reject cases where the scalar type is not a constant and has a higher
// Order than the vector element type.
llvm::APFloat Result(0.0);
- bool CstScalar = Scalar->get()->EvaluateAsFloat(Result, S.Context);
+
+ // Determine whether this is a constant scalar. In the event that the
+ // value is dependent (and thus cannot be evaluated by the constant
+ // evaluator), skip the evaluation. This will then diagnose once the
+ // expression is instantiated.
+ bool CstScalar = Scalar->get()->isValueDependent() ||
+ Scalar->get()->EvaluateAsFloat(Result, S.Context);
int Order = S.Context.getFloatingTypeOrder(VectorEltTy, ScalarTy);
if (!CstScalar && Order < 0)
return true;
@@ -9106,7 +9747,8 @@ static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar,
ScalarCast = CK_IntegralToFloating;
} else
return true;
- }
+ } else if (ScalarTy->isEnumeralType())
+ return true;
// Adjust scalar if desired.
if (Scalar) {
@@ -9395,6 +10037,9 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
/*AllowBothBool*/getLangOpts().AltiVec,
/*AllowBoolConversions*/false);
+ if (!IsDiv && (LHS.get()->getType()->isConstantMatrixType() ||
+ RHS.get()->getType()->isConstantMatrixType()))
+ return CheckMatrixMultiplyOperands(LHS, RHS, Loc, IsCompAssign);
QualType compType = UsualArithmeticConversions(
LHS, RHS, Loc, IsCompAssign ? ACK_CompAssign : ACK_Arithmetic);
@@ -9509,9 +10154,10 @@ static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc,
assert(ResType->isAnyPointerType() && !ResType->isDependentType());
QualType PointeeTy = ResType->getPointeeType();
- return S.RequireCompleteType(Loc, PointeeTy,
- diag::err_typecheck_arithmetic_incomplete_type,
- PointeeTy, Operand->getSourceRange());
+ return S.RequireCompleteSizedType(
+ Loc, PointeeTy,
+ diag::err_typecheck_arithmetic_incomplete_or_sizeless_type,
+ Operand->getSourceRange());
}
/// Check the validity of an arithmetic pointer operand.
@@ -9565,10 +10211,8 @@ static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
if (isRHSPointer) RHSPointeeTy = RHSExpr->getType()->getPointeeType();
// if both are pointers check if operation is valid wrt address spaces
- if (S.getLangOpts().OpenCL && isLHSPointer && isRHSPointer) {
- const PointerType *lhsPtr = LHSExpr->getType()->castAs<PointerType>();
- const PointerType *rhsPtr = RHSExpr->getType()->castAs<PointerType>();
- if (!lhsPtr->isAddressSpaceOverlapping(*rhsPtr)) {
+ if (isLHSPointer && isRHSPointer) {
+ if (!LHSPointeeTy.isAddressSpaceOverlapping(RHSPointeeTy)) {
S.Diag(Loc,
diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
<< LHSExpr->getType() << RHSExpr->getType() << 1 /*arithmetic op*/
@@ -9715,6 +10359,11 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
return compType;
}
+ if (LHS.get()->getType()->isConstantMatrixType() ||
+ RHS.get()->getType()->isConstantMatrixType()) {
+ return CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy);
+ }
+
QualType compType = UsualArithmeticConversions(
LHS, RHS, Loc, CompLHSTy ? ACK_CompAssign : ACK_Arithmetic);
if (LHS.isInvalid() || RHS.isInvalid())
@@ -9810,6 +10459,11 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
return compType;
}
+ if (LHS.get()->getType()->isConstantMatrixType() ||
+ RHS.get()->getType()->isConstantMatrixType()) {
+ return CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy);
+ }
+
QualType compType = UsualArithmeticConversions(
LHS, RHS, Loc, CompLHSTy ? ACK_CompAssign : ACK_Arithmetic);
if (LHS.isInvalid() || RHS.isInvalid())
@@ -9933,14 +10587,19 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
<< RHS.get()->getSourceRange());
return;
}
- llvm::APInt LeftBits(Right.getBitWidth(),
- S.Context.getTypeSize(LHS.get()->getType()));
+
+ QualType LHSExprType = LHS.get()->getType();
+ uint64_t LeftSize = LHSExprType->isExtIntType()
+ ? S.Context.getIntWidth(LHSExprType)
+ : S.Context.getTypeSize(LHSExprType);
+ llvm::APInt LeftBits(Right.getBitWidth(), LeftSize);
if (Right.uge(LeftBits)) {
S.DiagRuntimeBehavior(Loc, RHS.get(),
S.PDiag(diag::warn_shift_gt_typewidth)
<< RHS.get()->getSourceRange());
return;
}
+
if (Opc != BO_Shl)
return;
@@ -9960,7 +10619,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
// If LHS does not have a signed type and non-negative value
// then, the behavior is undefined before C++2a. Warn about it.
if (Left.isNegative() && !S.getLangOpts().isSignedOverflowDefined() &&
- !S.getLangOpts().CPlusPlus2a) {
+ !S.getLangOpts().CPlusPlus20) {
S.DiagRuntimeBehavior(Loc, LHS.get(),
S.PDiag(diag::warn_shift_lhs_negative)
<< LHS.get()->getSourceRange());
@@ -10440,7 +11099,7 @@ static void diagnoseTautologicalComparison(Sema &S, SourceLocation Loc,
// C++2a [depr.array.comp]:
// Equality and relational comparisons ([expr.eq], [expr.rel]) between two
// operands of array type are deprecated.
- if (S.getLangOpts().CPlusPlus2a && LHSStripped->getType()->isArrayType() &&
+ if (S.getLangOpts().CPlusPlus20 && LHSStripped->getType()->isArrayType() &&
RHSStripped->getType()->isArrayType()) {
S.Diag(Loc, diag::warn_depr_array_comparison)
<< LHS->getSourceRange() << RHS->getSourceRange()
@@ -10897,11 +11556,22 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
// C99 6.5.9p2 and C99 6.5.8p2
if (Context.typesAreCompatible(LCanPointeeTy.getUnqualifiedType(),
RCanPointeeTy.getUnqualifiedType())) {
- // Valid unless a relational comparison of function pointers
- if (IsRelational && LCanPointeeTy->isFunctionType()) {
- Diag(Loc, diag::ext_typecheck_ordered_comparison_of_function_pointers)
- << LHSType << RHSType << LHS.get()->getSourceRange()
- << RHS.get()->getSourceRange();
+ if (IsRelational) {
+ // Pointers both need to point to complete or incomplete types
+ if ((LCanPointeeTy->isIncompleteType() !=
+ RCanPointeeTy->isIncompleteType()) &&
+ !getLangOpts().C11) {
+ Diag(Loc, diag::ext_typecheck_compare_complete_incomplete_pointers)
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange()
+ << LHSType << RHSType << LCanPointeeTy->isIncompleteType()
+ << RCanPointeeTy->isIncompleteType();
+ }
+ if (LCanPointeeTy->isFunctionType()) {
+ // Valid unless a relational comparison of function pointers
+ Diag(Loc, diag::ext_typecheck_ordered_comparison_of_function_pointers)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ }
}
} else if (!IsRelational &&
(LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) {
@@ -10917,8 +11587,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (LCanPointeeTy != RCanPointeeTy) {
// Treat NULL constant as a special case in OpenCL.
if (getLangOpts().OpenCL && !LHSIsNull && !RHSIsNull) {
- const PointerType *LHSPtr = LHSType->castAs<PointerType>();
- if (!LHSPtr->isAddressSpaceOverlapping(*RHSType->castAs<PointerType>())) {
+ if (!LCanPointeeTy.isAddressSpaceOverlapping(RCanPointeeTy)) {
Diag(Loc,
diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
<< LHSType << RHSType << 0 /* comparison */
@@ -11316,12 +11985,12 @@ static void diagnoseXorMisusedAsPow(Sema &S, const ExprResult &XorLHS,
if (XorStr == "xor")
return;
- std::string LHSStr = Lexer::getSourceText(
+ std::string LHSStr = std::string(Lexer::getSourceText(
CharSourceRange::getTokenRange(LHSInt->getSourceRange()),
- S.getSourceManager(), S.getLangOpts());
- std::string RHSStr = Lexer::getSourceText(
+ S.getSourceManager(), S.getLangOpts()));
+ std::string RHSStr = std::string(Lexer::getSourceText(
CharSourceRange::getTokenRange(RHSInt->getSourceRange()),
- S.getSourceManager(), S.getLangOpts());
+ S.getSourceManager(), S.getLangOpts()));
if (Negative) {
RightSideValue = -RightSideValue;
@@ -11401,6 +12070,83 @@ QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
return GetSignedVectorType(LHS.get()->getType());
}
+QualType Sema::CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ // For conversion purposes, we ignore any qualifiers.
+ // For example, "const float" and "float" are equivalent.
+ QualType LHSType = LHS.get()->getType().getUnqualifiedType();
+ QualType RHSType = RHS.get()->getType().getUnqualifiedType();
+
+ const MatrixType *LHSMatType = LHSType->getAs<MatrixType>();
+ const MatrixType *RHSMatType = RHSType->getAs<MatrixType>();
+ assert((LHSMatType || RHSMatType) && "At least one operand must be a matrix");
+
+ if (Context.hasSameType(LHSType, RHSType))
+ return LHSType;
+
+ // Type conversion may change LHS/RHS. Keep copies to the original results, in
+ // case we have to return InvalidOperands.
+ ExprResult OriginalLHS = LHS;
+ ExprResult OriginalRHS = RHS;
+ if (LHSMatType && !RHSMatType) {
+ RHS = tryConvertExprToType(RHS.get(), LHSMatType->getElementType());
+ if (!RHS.isInvalid())
+ return LHSType;
+
+ return InvalidOperands(Loc, OriginalLHS, OriginalRHS);
+ }
+
+ if (!LHSMatType && RHSMatType) {
+ LHS = tryConvertExprToType(LHS.get(), RHSMatType->getElementType());
+ if (!LHS.isInvalid())
+ return RHSType;
+ return InvalidOperands(Loc, OriginalLHS, OriginalRHS);
+ }
+
+ return InvalidOperands(Loc, LHS, RHS);
+}
+
+QualType Sema::CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ auto *LHSMatType = LHS.get()->getType()->getAs<ConstantMatrixType>();
+ auto *RHSMatType = RHS.get()->getType()->getAs<ConstantMatrixType>();
+ assert((LHSMatType || RHSMatType) && "At least one operand must be a matrix");
+
+ if (LHSMatType && RHSMatType) {
+ if (LHSMatType->getNumColumns() != RHSMatType->getNumRows())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ if (!Context.hasSameType(LHSMatType->getElementType(),
+ RHSMatType->getElementType()))
+ return InvalidOperands(Loc, LHS, RHS);
+
+ return Context.getConstantMatrixType(LHSMatType->getElementType(),
+ LHSMatType->getNumRows(),
+ RHSMatType->getNumColumns());
+ }
+ return CheckMatrixElementwiseOperands(LHS, RHS, Loc, IsCompAssign);
+}
+
inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc) {
@@ -12086,7 +12832,7 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
CheckForNullPointerDereference(*this, LHSExpr);
- if (getLangOpts().CPlusPlus2a && LHSType.isVolatileQualified()) {
+ if (getLangOpts().CPlusPlus20 && LHSType.isVolatileQualified()) {
if (CompoundType.isNull()) {
// C++2a [expr.ass]p5:
// A simple-assignment whose left operand is of a volatile-qualified
@@ -12132,8 +12878,8 @@ static bool IgnoreCommaOperand(const Expr *E) {
}
// Look for instances where it is likely the comma operator is confused with
-// another operator. There is a whitelist of acceptable expressions for the
-// left hand side of the comma operator, otherwise emit a warning.
+// another operator. There is an explicit list of acceptable expressions for
+// the left hand side of the comma operator, otherwise emit a warning.
void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) {
// No warnings in macros
if (Loc.isMacroID())
@@ -12143,10 +12889,10 @@ void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) {
if (inTemplateInstantiation())
return;
- // Scope isn't fine-grained enough to whitelist the specific cases, so
+ // Scope isn't fine-grained enough to explicitly list the specific cases, so
// instead, skip more than needed, then call back into here with the
// CommaVisitor in SemaStmt.cpp.
- // The whitelisted locations are the initialization and increment portions
+ // The listed locations are the initialization and increment portions
// of a for loop. The additional checks are on the condition of
// if statements, do/while loops, and for loops.
// Differences in scope flags for C89 mode requires the extra logic.
@@ -12289,7 +13035,7 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
// Now make sure the operand is a modifiable lvalue.
if (CheckForModifiableLvalue(Op, OpLoc, S))
return QualType();
- if (S.getLangOpts().CPlusPlus2a && ResType.isVolatileQualified()) {
+ if (S.getLangOpts().CPlusPlus20 && ResType.isVolatileQualified()) {
// C++2a [expr.pre.inc]p1, [expr.post.inc]p1:
// An operand with volatile-qualified type is deprecated
S.Diag(OpLoc, diag::warn_deprecated_increment_decrement_volatile)
@@ -12321,6 +13067,9 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
/// - *(x + 1) -> x, if x is an array
/// - &"123"[2] -> 0
/// - & __real__ x -> x
+///
+/// FIXME: We don't recurse to the RHS of a comma, nor handle pointers to
+/// members.
static ValueDecl *getPrimaryDecl(Expr *E) {
switch (E->getStmtClass()) {
case Stmt::DeclRefExprClass:
@@ -12361,19 +13110,22 @@ static ValueDecl *getPrimaryDecl(Expr *E) {
// If the result of an implicit cast is an l-value, we care about
// the sub-expression; otherwise, the result here doesn't matter.
return getPrimaryDecl(cast<ImplicitCastExpr>(E)->getSubExpr());
+ case Stmt::CXXUuidofExprClass:
+ return cast<CXXUuidofExpr>(E)->getGuidDecl();
default:
return nullptr;
}
}
namespace {
- enum {
- AO_Bit_Field = 0,
- AO_Vector_Element = 1,
- AO_Property_Expansion = 2,
- AO_Register_Variable = 3,
- AO_No_Error = 4
- };
+enum {
+ AO_Bit_Field = 0,
+ AO_Vector_Element = 1,
+ AO_Property_Expansion = 2,
+ AO_Register_Variable = 3,
+ AO_Matrix_Element = 4,
+ AO_No_Error = 5
+};
}
/// Diagnose invalid operand for address of operations.
///
@@ -12540,6 +13292,9 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
} else if (op->getObjectKind() == OK_VectorComponent) {
// The operand cannot be an element of a vector
AddressOfError = AO_Vector_Element;
+ } else if (op->getObjectKind() == OK_MatrixComponent) {
+ // The operand cannot be an element of a matrix.
+ AddressOfError = AO_Matrix_Element;
} else if (dcl) { // C99 6.5.3.2p1
// We have an lvalue with a decl. Make sure the decl is not declared
// with the register storage-class specifier.
@@ -12581,7 +13336,7 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
}
}
} else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl) &&
- !isa<BindingDecl>(dcl))
+ !isa<BindingDecl>(dcl) && !isa<MSGuidDecl>(dcl))
llvm_unreachable("Unknown/unexpected decl type");
}
@@ -12845,7 +13600,7 @@ static ExprResult convertHalfVecBinOp(Sema &S, ExprResult LHS, ExprResult RHS,
BinaryOperatorKind Opc, QualType ResultTy,
ExprValueKind VK, ExprObjectKind OK,
bool IsCompAssign, SourceLocation OpLoc,
- FPOptions FPFeatures) {
+ FPOptionsOverride FPFeatures) {
auto &Context = S.getASTContext();
assert((isVector(ResultTy, Context.HalfTy) ||
isVector(ResultTy, Context.ShortTy)) &&
@@ -12863,13 +13618,13 @@ static ExprResult convertHalfVecBinOp(Sema &S, ExprResult LHS, ExprResult RHS,
BinOpResTy = S.GetSignedVectorType(BinOpResTy);
if (IsCompAssign)
- return new (Context) CompoundAssignOperator(
- LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, BinOpResTy, BinOpResTy,
- OpLoc, FPFeatures);
+ return CompoundAssignOperator::Create(Context, LHS.get(), RHS.get(), Opc,
+ ResultTy, VK, OK, OpLoc, FPFeatures,
+ BinOpResTy, BinOpResTy);
LHS = convertVector(LHS.get(), Context.FloatTy, S);
- auto *BO = new (Context) BinaryOperator(LHS.get(), RHS.get(), Opc, BinOpResTy,
- VK, OK, OpLoc, FPFeatures);
+ auto *BO = BinaryOperator::Create(Context, LHS.get(), RHS.get(), Opc,
+ BinOpResTy, VK, OK, OpLoc, FPFeatures);
return convertVector(BO, ResultTy->castAs<VectorType>()->getElementType(), S);
}
@@ -12882,13 +13637,15 @@ CorrectDelayedTyposInBinOp(Sema &S, BinaryOperatorKind Opc, Expr *LHSExpr,
// doesn't handle dependent types properly, so make sure any TypoExprs have
// been dealt with before checking the operands.
LHS = S.CorrectDelayedTyposInExpr(LHS);
- RHS = S.CorrectDelayedTyposInExpr(RHS, [Opc, LHS](Expr *E) {
- if (Opc != BO_Assign)
- return ExprResult(E);
- // Avoid correcting the RHS to the same Expr as the LHS.
- Decl *D = getDeclFromExpr(E);
- return (D && D == getDeclFromExpr(LHS.get())) ? ExprError() : E;
- });
+ RHS = S.CorrectDelayedTyposInExpr(
+ RHS, /*InitDecl=*/nullptr, /*RecoverUncorrectedTypos=*/false,
+ [Opc, LHS](Expr *E) {
+ if (Opc != BO_Assign)
+ return ExprResult(E);
+ // Avoid correcting the RHS to the same Expr as the LHS.
+ Decl *D = getDeclFromExpr(E);
+ return (D && D == getDeclFromExpr(LHS.get())) ? ExprError() : E;
+ });
}
return std::make_pair(LHS, RHS);
}
@@ -12896,10 +13653,27 @@ CorrectDelayedTyposInBinOp(Sema &S, BinaryOperatorKind Opc, Expr *LHSExpr,
/// Returns true if conversion between vectors of halfs and vectors of floats
/// is needed.
static bool needsConversionOfHalfVec(bool OpRequiresConversion, ASTContext &Ctx,
- QualType SrcType) {
- return OpRequiresConversion && !Ctx.getLangOpts().NativeHalfType &&
- !Ctx.getTargetInfo().useFP16ConversionIntrinsics() &&
- isVector(SrcType, Ctx.HalfTy);
+ Expr *E0, Expr *E1 = nullptr) {
+ if (!OpRequiresConversion || Ctx.getLangOpts().NativeHalfType ||
+ Ctx.getTargetInfo().useFP16ConversionIntrinsics())
+ return false;
+
+ auto HasVectorOfHalfType = [&Ctx](Expr *E) {
+ QualType Ty = E->IgnoreImplicit()->getType();
+
+ // Don't promote half precision neon vectors like float16x4_t in arm_neon.h
+ // to vectors of floats. Although the element type of the vectors is __fp16,
+ // the vectors shouldn't be treated as storage-only types. See the
+ // discussion here: https://reviews.llvm.org/rG825235c140e7
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ if (VT->getVectorKind() == VectorType::NeonVector)
+ return false;
+ return VT->getElementType().getCanonicalType() == Ctx.HalfTy;
+ }
+ return false;
+ };
+
+ return HasVectorOfHalfType(E0) && (!E1 || HasVectorOfHalfType(E1));
}
/// CreateBuiltinBinOp - Creates a new built-in binary operation with
@@ -12964,14 +13738,6 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
}
}
- // Diagnose operations on the unsupported types for OpenMP device compilation.
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) {
- if (Opc != BO_Assign && Opc != BO_Comma) {
- checkOpenMPDeviceExpr(LHSExpr);
- checkOpenMPDeviceExpr(RHSExpr);
- }
- }
-
switch (Opc) {
case BO_Assign:
ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
@@ -13119,14 +13885,6 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
if (ResultTy.isNull() || LHS.isInvalid() || RHS.isInvalid())
return ExprError();
- if (ResultTy->isRealFloatingType() &&
- (getLangOpts().getFPRoundingMode() != LangOptions::FPR_ToNearest ||
- getLangOpts().getFPExceptionMode() != LangOptions::FPE_Ignore))
- // Mark the current function as usng floating point constrained intrinsics
- if (FunctionDecl *F = dyn_cast<FunctionDecl>(CurContext)) {
- F->setUsesFPIntrin(true);
- }
-
// Some of the binary operations require promoting operands of half vector to
// float vectors and truncating the result back to half vector. For now, we do
// this only when HalfArgsAndReturn is set (that is, when the target is arm or
@@ -13134,8 +13892,8 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
assert(isVector(RHS.get()->getType(), Context.HalfTy) ==
isVector(LHS.get()->getType(), Context.HalfTy) &&
"both sides are half vectors or neither sides are");
- ConvertHalfVec = needsConversionOfHalfVec(ConvertHalfVec, Context,
- LHS.get()->getType());
+ ConvertHalfVec =
+ needsConversionOfHalfVec(ConvertHalfVec, Context, LHS.get(), RHS.get());
// Check for array bounds violations for both sides of the BinaryOperator
CheckArrayAccess(LHS.get());
@@ -13165,9 +13923,9 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
if (CompResultTy.isNull()) {
if (ConvertHalfVec)
return convertHalfVecBinOp(*this, LHS, RHS, Opc, ResultTy, VK, OK, false,
- OpLoc, FPFeatures);
- return new (Context) BinaryOperator(LHS.get(), RHS.get(), Opc, ResultTy, VK,
- OK, OpLoc, FPFeatures);
+ OpLoc, CurFPFeatureOverrides());
+ return BinaryOperator::Create(Context, LHS.get(), RHS.get(), Opc, ResultTy,
+ VK, OK, OpLoc, CurFPFeatureOverrides());
}
// Handle compound assignments.
@@ -13177,13 +13935,19 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
OK = LHS.get()->getObjectKind();
}
+ // The LHS is not converted to the result type for fixed-point compound
+ // assignment as the common type is computed on demand. Reset the CompLHSTy
+ // to the LHS type we would have gotten after unary conversions.
+ if (CompResultTy->isFixedPointType())
+ CompLHSTy = UsualUnaryConversions(LHS.get()).get()->getType();
+
if (ConvertHalfVec)
return convertHalfVecBinOp(*this, LHS, RHS, Opc, ResultTy, VK, OK, true,
- OpLoc, FPFeatures);
+ OpLoc, CurFPFeatureOverrides());
- return new (Context) CompoundAssignOperator(
- LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, CompLHSTy, CompResultTy,
- OpLoc, FPFeatures);
+ return CompoundAssignOperator::Create(
+ Context, LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, OpLoc,
+ CurFPFeatureOverrides(), CompLHSTy, CompResultTy);
}
/// DiagnoseBitwisePrecedence - Emit a warning when bitwise and comparison
@@ -13436,7 +14200,7 @@ static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
RHS->getType(), Functions);
// In C++20 onwards, we may have a second operator to look up.
- if (S.getLangOpts().CPlusPlus2a) {
+ if (S.getLangOpts().CPlusPlus20) {
if (OverloadedOperatorKind ExtraOp = getRewrittenOverloadedOperator(OverOp))
S.LookupOverloadedOperatorName(ExtraOp, Sc, LHS->getType(),
RHS->getType(), Functions);
@@ -13586,12 +14350,6 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
<< Input.get()->getSourceRange());
}
}
- // Diagnose operations on the unsupported types for OpenMP device compilation.
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) {
- if (UnaryOperator::isIncrementDecrementOp(Opc) ||
- UnaryOperator::isArithmeticOp(Opc))
- checkOpenMPDeviceExpr(InputExpr);
- }
switch (Opc) {
case UO_PreInc:
@@ -13627,8 +14385,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
// float vector and truncating the result back to a half vector. For now, we
// do this only when HalfArgsAndReturns is set (that is, when the target is
// arm or arm64).
- ConvertHalfVec =
- needsConversionOfHalfVec(true, Context, Input.get()->getType());
+ ConvertHalfVec = needsConversionOfHalfVec(true, Context, Input.get());
// If the operand is a half vector, promote it to a float vector.
if (ConvertHalfVec)
@@ -13722,9 +14479,16 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
// Vector logical not returns the signed variant of the operand type.
resultType = GetSignedVectorType(resultType);
break;
+ } else if (Context.getLangOpts().CPlusPlus && resultType->isVectorType()) {
+ const VectorType *VTy = resultType->castAs<VectorType>();
+ if (VTy->getVectorKind() != VectorType::GenericVector)
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+
+ // Vector logical not returns the signed variant of the operand type.
+ resultType = GetSignedVectorType(resultType);
+ break;
} else {
- // FIXME: GCC's vector extension permits the usage of '!' with a vector
- // type in C++. We should allow that here too.
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
}
@@ -13771,8 +14535,9 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
if (Opc != UO_AddrOf && Opc != UO_Deref)
CheckArrayAccess(Input.get());
- auto *UO = new (Context)
- UnaryOperator(Input.get(), Opc, resultType, VK, OK, OpLoc, CanOverflow);
+ auto *UO =
+ UnaryOperator::Create(Context, Input.get(), Opc, resultType, VK, OK,
+ OpLoc, CanOverflow, CurFPFeatureOverrides());
if (Opc == UO_Deref && UO->getType()->hasAttr(attr::NoDeref) &&
!isa<ArrayType>(UO->getType().getDesugaredType(Context)))
@@ -13898,9 +14663,13 @@ void Sema::ActOnStmtExprError() {
PopExpressionEvaluationContext();
}
-ExprResult
-Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
- SourceLocation RPLoc) { // "({..})"
+ExprResult Sema::ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
+ SourceLocation RPLoc) {
+ return BuildStmtExpr(LPLoc, SubStmt, RPLoc, getTemplateDepth(S));
+}
+
+ExprResult Sema::BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
+ SourceLocation RPLoc, unsigned TemplateDepth) {
assert(SubStmt && isa<CompoundStmt>(SubStmt) && "Invalid action invocation!");
CompoundStmt *Compound = cast<CompoundStmt>(SubStmt);
@@ -13931,7 +14700,8 @@ Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
// FIXME: Check that expression type is complete/non-abstract; statement
// expressions are not lvalues.
- Expr *ResStmtExpr = new (Context) StmtExpr(Compound, Ty, LPLoc, RPLoc);
+ Expr *ResStmtExpr =
+ new (Context) StmtExpr(Compound, Ty, LPLoc, RPLoc, TemplateDepth);
if (StmtExprMayBindToTemp)
return MaybeBindToTemporary(ResStmtExpr);
return ResStmtExpr;
@@ -14159,11 +14929,9 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
ExprValueKind VK = VK_RValue;
ExprObjectKind OK = OK_Ordinary;
QualType resType;
- bool ValueDependent = false;
bool CondIsTrue = false;
if (CondExpr->isTypeDependent() || CondExpr->isValueDependent()) {
resType = Context.DependentTy;
- ValueDependent = true;
} else {
// The conditional expression is required to be a constant expression.
llvm::APSInt condEval(32);
@@ -14179,14 +14947,12 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *ActiveExpr = CondIsTrue ? LHSExpr : RHSExpr;
resType = ActiveExpr->getType();
- ValueDependent = ActiveExpr->isValueDependent();
VK = ActiveExpr->getValueKind();
OK = ActiveExpr->getObjectKind();
}
- return new (Context)
- ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr, resType, VK, OK, RPLoc,
- CondIsTrue, resType->isDependentType(), ValueDependent);
+ return new (Context) ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr,
+ resType, VK, OK, RPLoc, CondIsTrue);
}
//===----------------------------------------------------------------------===//
@@ -14297,11 +15063,12 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
if (ExplicitSignature) {
for (unsigned I = 0, E = ExplicitSignature.getNumParams(); I != E; ++I) {
ParmVarDecl *Param = ExplicitSignature.getParam(I);
- if (Param->getIdentifier() == nullptr &&
- !Param->isImplicit() &&
- !Param->isInvalidDecl() &&
- !getLangOpts().CPlusPlus)
- Diag(Param->getLocation(), diag::err_parameter_name_omitted);
+ if (Param->getIdentifier() == nullptr && !Param->isImplicit() &&
+ !Param->isInvalidDecl() && !getLangOpts().CPlusPlus) {
+ // Diagnose this as an extension in C17 and earlier.
+ if (!getLangOpts().C2x)
+ Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x);
+ }
Params.push_back(Param);
}
@@ -14690,21 +15457,15 @@ ExprResult Sema::BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocExpr(Context, Kind, BuiltinLoc, RPLoc, ParentContext);
}
-bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp,
- bool Diagnose) {
+bool Sema::CheckConversionToObjCLiteral(QualType DstType, Expr *&Exp,
+ bool Diagnose) {
if (!getLangOpts().ObjC)
return false;
const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
if (!PT)
return false;
-
- if (!PT->isObjCIdType()) {
- // Check if the destination is the 'NSString' interface.
- const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
- if (!ID || !ID->getIdentifier()->isStr("NSString"))
- return false;
- }
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
// Ignore any parens, implicit casts (should only be
// array-to-pointer decays), and not-so-opaque values. The last is
@@ -14714,15 +15475,41 @@ bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp,
if (OV->getSourceExpr())
SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts();
- StringLiteral *SL = dyn_cast<StringLiteral>(SrcExpr);
- if (!SL || !SL->isAscii())
- return false;
- if (Diagnose) {
- Diag(SL->getBeginLoc(), diag::err_missing_atsign_prefix)
- << FixItHint::CreateInsertion(SL->getBeginLoc(), "@");
- Exp = BuildObjCStringLiteral(SL->getBeginLoc(), SL).get();
+ if (auto *SL = dyn_cast<StringLiteral>(SrcExpr)) {
+ if (!PT->isObjCIdType() &&
+ !(ID && ID->getIdentifier()->isStr("NSString")))
+ return false;
+ if (!SL->isAscii())
+ return false;
+
+ if (Diagnose) {
+ Diag(SL->getBeginLoc(), diag::err_missing_atsign_prefix)
+ << /*string*/0 << FixItHint::CreateInsertion(SL->getBeginLoc(), "@");
+ Exp = BuildObjCStringLiteral(SL->getBeginLoc(), SL).get();
+ }
+ return true;
}
- return true;
+
+ if ((isa<IntegerLiteral>(SrcExpr) || isa<CharacterLiteral>(SrcExpr) ||
+ isa<FloatingLiteral>(SrcExpr) || isa<ObjCBoolLiteralExpr>(SrcExpr) ||
+ isa<CXXBoolLiteralExpr>(SrcExpr)) &&
+ !SrcExpr->isNullPointerConstant(
+ getASTContext(), Expr::NPC_NeverValueDependent)) {
+ if (!ID || !ID->getIdentifier()->isStr("NSNumber"))
+ return false;
+ if (Diagnose) {
+ Diag(SrcExpr->getBeginLoc(), diag::err_missing_atsign_prefix)
+ << /*number*/1
+ << FixItHint::CreateInsertion(SrcExpr->getBeginLoc(), "@");
+ Expr *NumLit =
+ BuildObjCNumericLiteral(SrcExpr->getBeginLoc(), SrcExpr).get();
+ if (NumLit)
+ Exp = NumLit;
+ }
+ return true;
+ }
+
+ return false;
}
static bool maybeDiagnoseAssignmentToFunction(Sema &S, QualType DstType,
@@ -14769,24 +15556,44 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
return false;
case PointerToInt:
- DiagKind = diag::ext_typecheck_convert_pointer_int;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_pointer_int;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_pointer_int;
+ }
ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
MayHaveConvFixit = true;
break;
case IntToPointer:
- DiagKind = diag::ext_typecheck_convert_int_pointer;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_int_pointer;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_int_pointer;
+ }
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ MayHaveConvFixit = true;
+ break;
+ case IncompatibleFunctionPointer:
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_incompatible_function_pointer;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_incompatible_function_pointer;
+ }
ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
MayHaveConvFixit = true;
break;
case IncompatiblePointer:
- if (Action == AA_Passing_CFAudited)
+ if (Action == AA_Passing_CFAudited) {
DiagKind = diag::err_arc_typecheck_convert_incompatible_pointer;
- else if (SrcType->isFunctionPointerType() &&
- DstType->isFunctionPointerType())
- DiagKind = diag::ext_typecheck_convert_incompatible_function_pointer;
- else
+ } else if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_incompatible_pointer;
+ isInvalid = true;
+ } else {
DiagKind = diag::ext_typecheck_convert_incompatible_pointer;
-
+ }
CheckInferredResultType = DstType->isObjCObjectPointerType() &&
SrcType->isObjCObjectPointerType();
if (Hint.isNull() && !CheckInferredResultType) {
@@ -14799,15 +15606,27 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
MayHaveConvFixit = true;
break;
case IncompatiblePointerSign:
- DiagKind = diag::ext_typecheck_convert_incompatible_pointer_sign;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_incompatible_pointer_sign;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_incompatible_pointer_sign;
+ }
break;
case FunctionVoidPointer:
- DiagKind = diag::ext_typecheck_convert_pointer_void_func;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_pointer_void_func;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_pointer_void_func;
+ }
break;
case IncompatiblePointerDiscardsQualifiers: {
// Perform array-to-pointer decay if necessary.
if (SrcType->isArrayType()) SrcType = Context.getArrayDecayedType(SrcType);
+ isInvalid = true;
+
Qualifiers lhq = SrcType->getPointeeType().getQualifiers();
Qualifiers rhq = DstType->getPointeeType().getQualifiers();
if (lhq.getAddressSpace() != rhq.getAddressSpace()) {
@@ -14835,19 +15654,33 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
if (getLangOpts().CPlusPlus &&
IsStringLiteralToNonConstPointerConversion(SrcExpr, DstType))
return false;
- DiagKind = diag::ext_typecheck_convert_discards_qualifiers;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_typecheck_convert_discards_qualifiers;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::ext_typecheck_convert_discards_qualifiers;
+ }
+
break;
case IncompatibleNestedPointerQualifiers:
- DiagKind = diag::ext_nested_pointer_qualifier_mismatch;
+ if (getLangOpts().CPlusPlus) {
+ isInvalid = true;
+ DiagKind = diag::err_nested_pointer_qualifier_mismatch;
+ } else {
+ DiagKind = diag::ext_nested_pointer_qualifier_mismatch;
+ }
break;
case IncompatibleNestedPointerAddressSpaceMismatch:
DiagKind = diag::err_typecheck_incompatible_nested_address_space;
+ isInvalid = true;
break;
case IntToBlockPointer:
DiagKind = diag::err_int_to_block_pointer;
+ isInvalid = true;
break;
case IncompatibleBlockPointer:
DiagKind = diag::err_typecheck_convert_incompatible_block_pointer;
+ isInvalid = true;
break;
case IncompatibleObjCQualifiedId: {
if (SrcType->isObjCQualifiedIdType()) {
@@ -14872,14 +15705,25 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
SrcType->castAs<ObjCObjectPointerType>()->getInterfaceType())
IFace = IFaceT->getDecl();
}
- DiagKind = diag::warn_incompatible_qualified_id;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_incompatible_qualified_id;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::warn_incompatible_qualified_id;
+ }
break;
}
case IncompatibleVectors:
- DiagKind = diag::warn_incompatible_vectors;
+ if (getLangOpts().CPlusPlus) {
+ DiagKind = diag::err_incompatible_vectors;
+ isInvalid = true;
+ } else {
+ DiagKind = diag::warn_incompatible_vectors;
+ }
break;
case IncompatibleObjCWeakRef:
DiagKind = diag::err_arc_weak_unavailable_assign;
+ isInvalid = true;
break;
case Incompatible:
if (maybeDiagnoseAssignmentToFunction(*this, DstType, SrcExpr)) {
@@ -14937,9 +15781,10 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
HandleFunctionTypeMismatch(FDiag, SecondType, FirstType);
Diag(Loc, FDiag);
- if (DiagKind == diag::warn_incompatible_qualified_id &&
+ if ((DiagKind == diag::warn_incompatible_qualified_id ||
+ DiagKind == diag::err_incompatible_qualified_id) &&
PDecl && IFace && !IFace->hasDefinition())
- Diag(IFace->getLocation(), diag::note_incomplete_class_and_qualified_id)
+ Diag(IFace->getLocation(), diag::note_incomplete_class_and_qualified_id)
<< IFace << PDecl;
if (SecondType == Context.OverloadTy)
@@ -15064,6 +15909,12 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
return ExprError();
}
+ ExprResult RValueExpr = DefaultLvalueConversion(E);
+ if (RValueExpr.isInvalid())
+ return ExprError();
+
+ E = RValueExpr.get();
+
// Circumvent ICE checking in C++11 to avoid evaluating the expression twice
// in the non-ICE case.
if (!getLangOpts().CPlusPlus11 && E->isIntegerConstantExpr(Context)) {
@@ -15251,7 +16102,7 @@ void Sema::WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec) {
/// and if so, remove it from the list of volatile-qualified assignments that
/// we are going to warn are deprecated.
void Sema::CheckUnusedVolatileAssignment(Expr *E) {
- if (!E->getType().isVolatileQualified() || !getLangOpts().CPlusPlus2a)
+ if (!E->getType().isVolatileQualified() || !getLangOpts().CPlusPlus20)
return;
// Note: ignoring parens here is not justified by the standard rules, but
@@ -15266,6 +16117,186 @@ void Sema::CheckUnusedVolatileAssignment(Expr *E) {
}
}
+ExprResult Sema::CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl) {
+ if (!E.isUsable() || !Decl || !Decl->isConsteval() || isConstantEvaluated() ||
+ RebuildingImmediateInvocation)
+ return E;
+
+ /// Opportunistically remove the callee from ReferencesToConsteval if we can.
+ /// It's OK if this fails; we'll also remove this in
+ /// HandleImmediateInvocations, but catching it here allows us to avoid
+ /// walking the AST looking for it in simple cases.
+ if (auto *Call = dyn_cast<CallExpr>(E.get()->IgnoreImplicit()))
+ if (auto *DeclRef =
+ dyn_cast<DeclRefExpr>(Call->getCallee()->IgnoreImplicit()))
+ ExprEvalContexts.back().ReferenceToConsteval.erase(DeclRef);
+
+ E = MaybeCreateExprWithCleanups(E);
+
+ ConstantExpr *Res = ConstantExpr::Create(
+ getASTContext(), E.get(),
+ ConstantExpr::getStorageKind(Decl->getReturnType().getTypePtr(),
+ getASTContext()),
+ /*IsImmediateInvocation*/ true);
+ ExprEvalContexts.back().ImmediateInvocationCandidates.emplace_back(Res, 0);
+ return Res;
+}
+
+static void EvaluateAndDiagnoseImmediateInvocation(
+ Sema &SemaRef, Sema::ImmediateInvocationCandidate Candidate) {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ Expr::EvalResult Eval;
+ Eval.Diag = &Notes;
+ ConstantExpr *CE = Candidate.getPointer();
+ bool Result = CE->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen,
+ SemaRef.getASTContext(), true);
+ if (!Result || !Notes.empty()) {
+ Expr *InnerExpr = CE->getSubExpr()->IgnoreImplicit();
+ if (auto *FunctionalCast = dyn_cast<CXXFunctionalCastExpr>(InnerExpr))
+ InnerExpr = FunctionalCast->getSubExpr();
+ FunctionDecl *FD = nullptr;
+ if (auto *Call = dyn_cast<CallExpr>(InnerExpr))
+ FD = cast<FunctionDecl>(Call->getCalleeDecl());
+ else if (auto *Call = dyn_cast<CXXConstructExpr>(InnerExpr))
+ FD = Call->getConstructor();
+ else
+ llvm_unreachable("unhandled decl kind");
+ assert(FD->isConsteval());
+ SemaRef.Diag(CE->getBeginLoc(), diag::err_invalid_consteval_call) << FD;
+ for (auto &Note : Notes)
+ SemaRef.Diag(Note.first, Note.second);
+ return;
+ }
+ CE->MoveIntoResult(Eval.Val, SemaRef.getASTContext());
+}
+
+static void RemoveNestedImmediateInvocation(
+ Sema &SemaRef, Sema::ExpressionEvaluationContextRecord &Rec,
+ SmallVector<Sema::ImmediateInvocationCandidate, 4>::reverse_iterator It) {
+ struct ComplexRemove : TreeTransform<ComplexRemove> {
+ using Base = TreeTransform<ComplexRemove>;
+ llvm::SmallPtrSetImpl<DeclRefExpr *> &DRSet;
+ SmallVector<Sema::ImmediateInvocationCandidate, 4> &IISet;
+ SmallVector<Sema::ImmediateInvocationCandidate, 4>::reverse_iterator
+ CurrentII;
+ ComplexRemove(Sema &SemaRef, llvm::SmallPtrSetImpl<DeclRefExpr *> &DR,
+ SmallVector<Sema::ImmediateInvocationCandidate, 4> &II,
+ SmallVector<Sema::ImmediateInvocationCandidate,
+ 4>::reverse_iterator Current)
+ : Base(SemaRef), DRSet(DR), IISet(II), CurrentII(Current) {}
+ void RemoveImmediateInvocation(ConstantExpr* E) {
+ auto It = std::find_if(CurrentII, IISet.rend(),
+ [E](Sema::ImmediateInvocationCandidate Elem) {
+ return Elem.getPointer() == E;
+ });
+ assert(It != IISet.rend() &&
+ "ConstantExpr marked IsImmediateInvocation should "
+ "be present");
+ It->setInt(1); // Mark as deleted
+ }
+ ExprResult TransformConstantExpr(ConstantExpr *E) {
+ if (!E->isImmediateInvocation())
+ return Base::TransformConstantExpr(E);
+ RemoveImmediateInvocation(E);
+ return Base::TransformExpr(E->getSubExpr());
+ }
+ /// Base::TransfromCXXOperatorCallExpr doesn't traverse the callee so
+ /// we need to remove its DeclRefExpr from the DRSet.
+ ExprResult TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ DRSet.erase(cast<DeclRefExpr>(E->getCallee()->IgnoreImplicit()));
+ return Base::TransformCXXOperatorCallExpr(E);
+ }
+ /// Base::TransformInitializer skip ConstantExpr so we need to visit them
+ /// here.
+ ExprResult TransformInitializer(Expr *Init, bool NotCopyInit) {
+ if (!Init)
+ return Init;
+ /// ConstantExpr are the first layer of implicit node to be removed so if
+ /// Init isn't a ConstantExpr, no ConstantExpr will be skipped.
+ if (auto *CE = dyn_cast<ConstantExpr>(Init))
+ if (CE->isImmediateInvocation())
+ RemoveImmediateInvocation(CE);
+ return Base::TransformInitializer(Init, NotCopyInit);
+ }
+ ExprResult TransformDeclRefExpr(DeclRefExpr *E) {
+ DRSet.erase(E);
+ return E;
+ }
+ bool AlwaysRebuild() { return false; }
+ bool ReplacingOriginal() { return true; }
+ bool AllowSkippingCXXConstructExpr() {
+ bool Res = AllowSkippingFirstCXXConstructExpr;
+ AllowSkippingFirstCXXConstructExpr = true;
+ return Res;
+ }
+ bool AllowSkippingFirstCXXConstructExpr = true;
+ } Transformer(SemaRef, Rec.ReferenceToConsteval,
+ Rec.ImmediateInvocationCandidates, It);
+
+ /// CXXConstructExpr with a single argument are getting skipped by
+ /// TreeTransform in some situtation because they could be implicit. This
+ /// can only occur for the top-level CXXConstructExpr because it is used
+ /// nowhere in the expression being transformed therefore will not be rebuilt.
+ /// Setting AllowSkippingFirstCXXConstructExpr to false will prevent from
+ /// skipping the first CXXConstructExpr.
+ if (isa<CXXConstructExpr>(It->getPointer()->IgnoreImplicit()))
+ Transformer.AllowSkippingFirstCXXConstructExpr = false;
+
+ ExprResult Res = Transformer.TransformExpr(It->getPointer()->getSubExpr());
+ assert(Res.isUsable());
+ Res = SemaRef.MaybeCreateExprWithCleanups(Res);
+ It->getPointer()->setSubExpr(Res.get());
+}
+
+static void
+HandleImmediateInvocations(Sema &SemaRef,
+ Sema::ExpressionEvaluationContextRecord &Rec) {
+ if ((Rec.ImmediateInvocationCandidates.size() == 0 &&
+ Rec.ReferenceToConsteval.size() == 0) ||
+ SemaRef.RebuildingImmediateInvocation)
+ return;
+
+ /// When we have more then 1 ImmediateInvocationCandidates we need to check
+ /// for nested ImmediateInvocationCandidates. when we have only 1 we only
+ /// need to remove ReferenceToConsteval in the immediate invocation.
+ if (Rec.ImmediateInvocationCandidates.size() > 1) {
+
+ /// Prevent sema calls during the tree transform from adding pointers that
+ /// are already in the sets.
+ llvm::SaveAndRestore<bool> DisableIITracking(
+ SemaRef.RebuildingImmediateInvocation, true);
+
+ /// Prevent diagnostic during tree transfrom as they are duplicates
+ Sema::TentativeAnalysisScope DisableDiag(SemaRef);
+
+ for (auto It = Rec.ImmediateInvocationCandidates.rbegin();
+ It != Rec.ImmediateInvocationCandidates.rend(); It++)
+ if (!It->getInt())
+ RemoveNestedImmediateInvocation(SemaRef, Rec, It);
+ } else if (Rec.ImmediateInvocationCandidates.size() == 1 &&
+ Rec.ReferenceToConsteval.size()) {
+ struct SimpleRemove : RecursiveASTVisitor<SimpleRemove> {
+ llvm::SmallPtrSetImpl<DeclRefExpr *> &DRSet;
+ SimpleRemove(llvm::SmallPtrSetImpl<DeclRefExpr *> &S) : DRSet(S) {}
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ DRSet.erase(E);
+ return DRSet.size();
+ }
+ } Visitor(Rec.ReferenceToConsteval);
+ Visitor.TraverseStmt(
+ Rec.ImmediateInvocationCandidates.front().getPointer()->getSubExpr());
+ }
+ for (auto CE : Rec.ImmediateInvocationCandidates)
+ if (!CE.getInt())
+ EvaluateAndDiagnoseImmediateInvocation(SemaRef, CE);
+ for (auto DR : Rec.ReferenceToConsteval) {
+ auto *FD = cast<FunctionDecl>(DR->getDecl());
+ SemaRef.Diag(DR->getBeginLoc(), diag::err_invalid_consteval_take_address)
+ << FD;
+ SemaRef.Diag(FD->getLocation(), diag::note_declared_at);
+ }
+}
+
void Sema::PopExpressionEvaluationContext() {
ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back();
unsigned NumTypos = Rec.NumTypos;
@@ -15299,6 +16330,7 @@ void Sema::PopExpressionEvaluationContext() {
}
WarnOnPendingNoDerefs(Rec);
+ HandleImmediateInvocations(*this, Rec);
// Warn on any volatile-qualified simple-assignments that are not discarded-
// value expressions nor unevaluated operands (those cases get removed from
@@ -15584,6 +16616,9 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
if (getLangOpts().CUDA)
CheckCUDACall(Loc, Func);
+ if (getLangOpts().SYCLIsDevice)
+ checkSYCLDeviceFunction(Loc, Func);
+
// If we need a definition, try to create one.
if (NeedDefinition && !Func->getBody()) {
runWithSufficientStackSpace(Loc, [&] {
@@ -15722,15 +16757,21 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
if (funcHasParameterSizeMangling(*this, Func))
CheckCompleteParameterTypesForMangler(*this, Func, Loc);
- Func->markUsed(Context);
- }
+ // In the MS C++ ABI, the compiler emits destructor variants where they are
+ // used. If the destructor is used here but defined elsewhere, mark the
+ // virtual base destructors referenced. If those virtual base destructors
+ // are inline, this will ensure they are defined when emitting the complete
+ // destructor variant. This checking may be redundant if the destructor is
+ // provided later in this TU.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (auto *Dtor = dyn_cast<CXXDestructorDecl>(Func)) {
+ CXXRecordDecl *Parent = Dtor->getParent();
+ if (Parent->getNumVBases() > 0 && !Dtor->getBody())
+ CheckCompleteDestructorVariant(Loc, Dtor);
+ }
+ }
- if (LangOpts.OpenMP) {
- markOpenMPDeclareVariantFuncsReferenced(Loc, Func, MightBeOdrUse);
- if (LangOpts.OpenMPIsDevice)
- checkOpenMPDeviceFunction(Loc, Func);
- else
- checkOpenMPHostFunction(Loc, Func);
+ Func->markUsed(Context);
}
}
@@ -16022,6 +17063,10 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
if (HasConst)
DeclRefType.addConst();
}
+ // Do not capture firstprivates in tasks.
+ if (S.isOpenMPPrivateDecl(Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel) !=
+ OMPC_unknown)
+ return true;
ByRef = S.isOpenMPCapturedByRef(Var, RSI->OpenMPLevel,
RSI->OpenMPCaptureLevel);
}
@@ -16106,9 +17151,10 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
// Make sure that by-copy captures are of a complete and non-abstract type.
if (!Invalid && BuildAndDiagnose) {
if (!CaptureType->isDependentType() &&
- S.RequireCompleteType(Loc, CaptureType,
- diag::err_capture_of_incomplete_type,
- Var->getDeclName()))
+ S.RequireCompleteSizedType(
+ Loc, CaptureType,
+ diag::err_capture_of_incomplete_or_sizeless_type,
+ Var->getDeclName()))
Invalid = true;
else if (S.RequireNonAbstractType(Loc, CaptureType,
diag::err_capture_of_abstract_type))
@@ -16250,12 +17296,14 @@ bool Sema::tryCaptureVariable(
// just break here. Similarly, global variables that are captured in a
// target region should not be captured outside the scope of the region.
if (RSI->CapRegionKind == CR_OpenMP) {
- bool IsOpenMPPrivateDecl = isOpenMPPrivateDecl(Var, RSI->OpenMPLevel);
+ OpenMPClauseKind IsOpenMPPrivateDecl = isOpenMPPrivateDecl(
+ Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel);
// If the variable is private (i.e. not captured) and has variably
// modified type, we still need to capture the type for correct
// codegen in all regions, associated with the construct. Currently,
// it is captured in the innermost captured region only.
- if (IsOpenMPPrivateDecl && Var->getType()->isVariablyModifiedType()) {
+ if (IsOpenMPPrivateDecl != OMPC_unknown &&
+ Var->getType()->isVariablyModifiedType()) {
QualType QTy = Var->getType();
if (ParmVarDecl *PVD = dyn_cast_or_null<ParmVarDecl>(Var))
QTy = PVD->getOriginalType();
@@ -16269,15 +17317,23 @@ bool Sema::tryCaptureVariable(
captureVariablyModifiedType(Context, QTy, OuterRSI);
}
}
- bool IsTargetCap = !IsOpenMPPrivateDecl &&
- isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel);
+ bool IsTargetCap =
+ IsOpenMPPrivateDecl != OMPC_private &&
+ isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel,
+ RSI->OpenMPCaptureLevel);
+ // Do not capture global if it is not privatized in outer regions.
+ bool IsGlobalCap =
+ IsGlobal && isOpenMPGlobalCapturedDecl(Var, RSI->OpenMPLevel,
+ RSI->OpenMPCaptureLevel);
+
// When we detect target captures we are looking from inside the
// target region, therefore we need to propagate the capture from the
// enclosing region. Therefore, the capture is not initially nested.
if (IsTargetCap)
adjustOpenMPTargetScopeIndex(FunctionScopesIndex, RSI->OpenMPLevel);
- if (IsTargetCap || IsOpenMPPrivateDecl) {
+ if (IsTargetCap || IsOpenMPPrivateDecl == OMPC_private ||
+ (IsGlobal && !IsGlobalCap)) {
Nested = !IsTargetCap;
DeclRefType = DeclRefType.getUnqualifiedType();
CaptureType = Context.getLValueReferenceType(DeclRefType);
@@ -16493,7 +17549,7 @@ static ExprResult rebuildPotentialResultsAsNonOdrUsed(Sema &S, Expr *E,
// Mark that this expression does not constitute an odr-use.
auto MarkNotOdrUsed = [&] {
- S.MaybeODRUseExprs.erase(E);
+ S.MaybeODRUseExprs.remove(E);
if (LambdaScopeInfo *LSI = S.getCurLambda())
LSI->markVariableExprAsNonODRUsed(E);
};
@@ -17025,6 +18081,11 @@ void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
if (Method->isVirtual() &&
!Method->getDevirtualizedMethod(Base, getLangOpts().AppleKext))
OdrUse = false;
+
+ if (auto *FD = dyn_cast<FunctionDecl>(E->getDecl()))
+ if (!isConstantEvaluated() && FD->isConsteval() &&
+ !RebuildingImmediateInvocation)
+ ExprEvalContexts.back().ReferenceToConsteval.insert(E);
MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse);
}
@@ -17116,71 +18177,36 @@ void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) {
}
namespace {
- /// Helper class that marks all of the declarations referenced by
- /// potentially-evaluated subexpressions as "referenced".
- class EvaluatedExprMarker : public EvaluatedExprVisitor<EvaluatedExprMarker> {
- Sema &S;
- bool SkipLocalVariables;
-
- public:
- typedef EvaluatedExprVisitor<EvaluatedExprMarker> Inherited;
-
- EvaluatedExprMarker(Sema &S, bool SkipLocalVariables)
- : Inherited(S.Context), S(S), SkipLocalVariables(SkipLocalVariables) { }
-
- void VisitDeclRefExpr(DeclRefExpr *E) {
- // If we were asked not to visit local variables, don't.
- if (SkipLocalVariables) {
- if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
- if (VD->hasLocalStorage())
- return;
- }
-
- S.MarkDeclRefReferenced(E);
- }
-
- void VisitMemberExpr(MemberExpr *E) {
- S.MarkMemberReferenced(E);
- Inherited::VisitMemberExpr(E);
- }
-
- void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
- S.MarkFunctionReferenced(
- E->getBeginLoc(),
- const_cast<CXXDestructorDecl *>(E->getTemporary()->getDestructor()));
- Visit(E->getSubExpr());
- }
-
- void VisitCXXNewExpr(CXXNewExpr *E) {
- if (E->getOperatorNew())
- S.MarkFunctionReferenced(E->getBeginLoc(), E->getOperatorNew());
- if (E->getOperatorDelete())
- S.MarkFunctionReferenced(E->getBeginLoc(), E->getOperatorDelete());
- Inherited::VisitCXXNewExpr(E);
- }
+/// Helper class that marks all of the declarations referenced by
+/// potentially-evaluated subexpressions as "referenced".
+class EvaluatedExprMarker : public UsedDeclVisitor<EvaluatedExprMarker> {
+public:
+ typedef UsedDeclVisitor<EvaluatedExprMarker> Inherited;
+ bool SkipLocalVariables;
- void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
- if (E->getOperatorDelete())
- S.MarkFunctionReferenced(E->getBeginLoc(), E->getOperatorDelete());
- QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
- if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- S.MarkFunctionReferenced(E->getBeginLoc(), S.LookupDestructor(Record));
- }
+ EvaluatedExprMarker(Sema &S, bool SkipLocalVariables)
+ : Inherited(S), SkipLocalVariables(SkipLocalVariables) {}
- Inherited::VisitCXXDeleteExpr(E);
- }
+ void visitUsedDecl(SourceLocation Loc, Decl *D) {
+ S.MarkFunctionReferenced(Loc, cast<FunctionDecl>(D));
+ }
- void VisitCXXConstructExpr(CXXConstructExpr *E) {
- S.MarkFunctionReferenced(E->getBeginLoc(), E->getConstructor());
- Inherited::VisitCXXConstructExpr(E);
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ // If we were asked not to visit local variables, don't.
+ if (SkipLocalVariables) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ if (VD->hasLocalStorage())
+ return;
}
+ S.MarkDeclRefReferenced(E);
+ }
- void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
- Visit(E->getExpr());
- }
- };
-}
+ void VisitMemberExpr(MemberExpr *E) {
+ S.MarkMemberReferenced(E);
+ Visit(E->getBase());
+ }
+};
+} // namespace
/// Mark any declarations that appear within this expression or any
/// potentially-evaluated subexpressions as "referenced".
@@ -18060,11 +19086,25 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
return ExprError();
}
+ case BuiltinType::IncompleteMatrixIdx:
+ Diag(cast<MatrixSubscriptExpr>(E->IgnoreParens())
+ ->getRowIdx()
+ ->getBeginLoc(),
+ diag::err_matrix_incomplete_index);
+ return ExprError();
+
// Expressions of unknown type.
case BuiltinType::OMPArraySection:
Diag(E->getBeginLoc(), diag::err_omp_array_section_use);
return ExprError();
+ // Expressions of unknown type.
+ case BuiltinType::OMPArrayShaping:
+ return ExprError(Diag(E->getBeginLoc(), diag::err_omp_array_shaping_use));
+
+ case BuiltinType::OMPIterator:
+ return ExprError(Diag(E->getBeginLoc(), diag::err_omp_iterator_use));
+
// Everything else should be impossible.
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
@@ -18138,7 +19178,16 @@ ExprResult Sema::ActOnObjCAvailabilityCheckExpr(
ObjCAvailabilityCheckExpr(Version, AtLoc, RParen, Context.BoolTy);
}
-bool Sema::IsDependentFunctionNameExpr(Expr *E) {
- assert(E->isTypeDependent());
- return isa<UnresolvedLookupExpr>(E);
+ExprResult Sema::CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
+ ArrayRef<Expr *> SubExprs, QualType T) {
+ if (!Context.getLangOpts().RecoveryAST)
+ return ExprError();
+
+ if (isSFINAEContext())
+ return ExprError();
+
+ if (T.isNull() || !Context.getLangOpts().RecoveryASTType)
+ // We don't know the concrete type, fallback to dependent type.
+ T = Context.DependentTy;
+ return RecoveryExpr::Create(Context, T, Begin, End, SubExprs);
}
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index a73e6906fceb..d885920b6c14 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -11,6 +11,7 @@
///
//===----------------------------------------------------------------------===//
+#include "clang/Sema/Template.h"
#include "clang/Sema/SemaInternal.h"
#include "TreeTransform.h"
#include "TypeLocBuilder.h"
@@ -155,196 +156,203 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
// }
//
// See also PR6358 and PR6359.
- // For this reason, we're currently only doing the C++03 version of this
- // code; the C++0x version has to wait until we get a proper spec.
- QualType SearchType;
- DeclContext *LookupCtx = nullptr;
- bool isDependent = false;
- bool LookInScope = false;
+ //
+ // For now, we accept all the cases in which the name given could plausibly
+ // be interpreted as a correct destructor name, issuing off-by-default
+ // extension diagnostics on the cases that don't strictly conform to the
+ // C++20 rules. This basically means we always consider looking in the
+ // nested-name-specifier prefix, the complete nested-name-specifier, and
+ // the scope, and accept if we find the expected type in any of the three
+ // places.
if (SS.isInvalid())
return nullptr;
+ // Whether we've failed with a diagnostic already.
+ bool Failed = false;
+
+ llvm::SmallVector<NamedDecl*, 8> FoundDecls;
+ llvm::SmallSet<CanonicalDeclPtr<Decl>, 8> FoundDeclSet;
+
// If we have an object type, it's because we are in a
// pseudo-destructor-expression or a member access expression, and
// we know what type we're looking for.
- if (ObjectTypePtr)
- SearchType = GetTypeFromParser(ObjectTypePtr);
+ QualType SearchType =
+ ObjectTypePtr ? GetTypeFromParser(ObjectTypePtr) : QualType();
- if (SS.isSet()) {
- NestedNameSpecifier *NNS = SS.getScopeRep();
-
- bool AlreadySearched = false;
- bool LookAtPrefix = true;
- // C++11 [basic.lookup.qual]p6:
- // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier,
- // the type-names are looked up as types in the scope designated by the
- // nested-name-specifier. Similarly, in a qualified-id of the form:
- //
- // nested-name-specifier[opt] class-name :: ~ class-name
- //
- // the second class-name is looked up in the same scope as the first.
- //
- // Here, we determine whether the code below is permitted to look at the
- // prefix of the nested-name-specifier.
- DeclContext *DC = computeDeclContext(SS, EnteringContext);
- if (DC && DC->isFileContext()) {
- AlreadySearched = true;
- LookupCtx = DC;
- isDependent = false;
- } else if (DC && isa<CXXRecordDecl>(DC)) {
- LookAtPrefix = false;
- LookInScope = true;
- }
-
- // The second case from the C++03 rules quoted further above.
- NestedNameSpecifier *Prefix = nullptr;
- if (AlreadySearched) {
- // Nothing left to do.
- } else if (LookAtPrefix && (Prefix = NNS->getPrefix())) {
- CXXScopeSpec PrefixSS;
- PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
- LookupCtx = computeDeclContext(PrefixSS, EnteringContext);
- isDependent = isDependentScopeSpecifier(PrefixSS);
- } else if (ObjectTypePtr) {
- LookupCtx = computeDeclContext(SearchType);
- isDependent = SearchType->isDependentType();
- } else {
- LookupCtx = computeDeclContext(SS, EnteringContext);
- isDependent = LookupCtx && LookupCtx->isDependentContext();
- }
- } else if (ObjectTypePtr) {
- // C++ [basic.lookup.classref]p3:
- // If the unqualified-id is ~type-name, the type-name is looked up
- // in the context of the entire postfix-expression. If the type T
- // of the object expression is of a class type C, the type-name is
- // also looked up in the scope of class C. At least one of the
- // lookups shall find a name that refers to (possibly
- // cv-qualified) T.
- LookupCtx = computeDeclContext(SearchType);
- isDependent = SearchType->isDependentType();
- assert((isDependent || !SearchType->isIncompleteType()) &&
- "Caller should have completed object type");
-
- LookInScope = true;
- } else {
- // Perform lookup into the current scope (only).
- LookInScope = true;
- }
-
- TypeDecl *NonMatchingTypeDecl = nullptr;
- LookupResult Found(*this, &II, NameLoc, LookupOrdinaryName);
- for (unsigned Step = 0; Step != 2; ++Step) {
- // Look for the name first in the computed lookup context (if we
- // have one) and, if that fails to find a match, in the scope (if
- // we're allowed to look there).
- Found.clear();
- if (Step == 0 && LookupCtx) {
- if (RequireCompleteDeclContext(SS, LookupCtx))
- return nullptr;
- LookupQualifiedName(Found, LookupCtx);
- } else if (Step == 1 && LookInScope && S) {
- LookupName(Found, S);
- } else {
- continue;
- }
+ auto CheckLookupResult = [&](LookupResult &Found) -> ParsedType {
+ auto IsAcceptableResult = [&](NamedDecl *D) -> bool {
+ auto *Type = dyn_cast<TypeDecl>(D->getUnderlyingDecl());
+ if (!Type)
+ return false;
- // FIXME: Should we be suppressing ambiguities here?
- if (Found.isAmbiguous())
- return nullptr;
+ if (SearchType.isNull() || SearchType->isDependentType())
+ return true;
- if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
QualType T = Context.getTypeDeclType(Type);
- MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
+ return Context.hasSameUnqualifiedType(T, SearchType);
+ };
- if (SearchType.isNull() || SearchType->isDependentType() ||
- Context.hasSameUnqualifiedType(T, SearchType)) {
- // We found our type!
+ unsigned NumAcceptableResults = 0;
+ for (NamedDecl *D : Found) {
+ if (IsAcceptableResult(D))
+ ++NumAcceptableResults;
+
+ // Don't list a class twice in the lookup failure diagnostic if it's
+ // found by both its injected-class-name and by the name in the enclosing
+ // scope.
+ if (auto *RD = dyn_cast<CXXRecordDecl>(D))
+ if (RD->isInjectedClassName())
+ D = cast<NamedDecl>(RD->getParent());
+
+ if (FoundDeclSet.insert(D).second)
+ FoundDecls.push_back(D);
+ }
+
+ // As an extension, attempt to "fix" an ambiguity by erasing all non-type
+ // results, and all non-matching results if we have a search type. It's not
+ // clear what the right behavior is if destructor lookup hits an ambiguity,
+ // but other compilers do generally accept at least some kinds of
+ // ambiguity.
+ if (Found.isAmbiguous() && NumAcceptableResults == 1) {
+ Diag(NameLoc, diag::ext_dtor_name_ambiguous);
+ LookupResult::Filter F = Found.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
+ Diag(D->getLocation(), diag::note_destructor_type_here)
+ << Context.getTypeDeclType(TD);
+ else
+ Diag(D->getLocation(), diag::note_destructor_nontype_here);
+
+ if (!IsAcceptableResult(D))
+ F.erase();
+ }
+ F.done();
+ }
+
+ if (Found.isAmbiguous())
+ Failed = true;
+ if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
+ if (IsAcceptableResult(Type)) {
+ QualType T = Context.getTypeDeclType(Type);
+ MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
return CreateParsedType(T,
Context.getTrivialTypeSourceInfo(T, NameLoc));
}
+ }
- if (!SearchType.isNull())
- NonMatchingTypeDecl = Type;
- }
-
- // If the name that we found is a class template name, and it is
- // the same name as the template name in the last part of the
- // nested-name-specifier (if present) or the object type, then
- // this is the destructor for that class.
- // FIXME: This is a workaround until we get real drafting for core
- // issue 399, for which there isn't even an obvious direction.
- if (ClassTemplateDecl *Template = Found.getAsSingle<ClassTemplateDecl>()) {
- QualType MemberOfType;
- if (SS.isSet()) {
- if (DeclContext *Ctx = computeDeclContext(SS, EnteringContext)) {
- // Figure out the type of the context, if it has one.
- if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx))
- MemberOfType = Context.getTypeDeclType(Record);
- }
- }
- if (MemberOfType.isNull())
- MemberOfType = SearchType;
+ return nullptr;
+ };
- if (MemberOfType.isNull())
- continue;
+ bool IsDependent = false;
- // We're referring into a class template specialization. If the
- // class template we found is the same as the template being
- // specialized, we found what we are looking for.
- if (const RecordType *Record = MemberOfType->getAs<RecordType>()) {
- if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
- if (Spec->getSpecializedTemplate()->getCanonicalDecl() ==
- Template->getCanonicalDecl())
- return CreateParsedType(
- MemberOfType,
- Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
- }
+ auto LookupInObjectType = [&]() -> ParsedType {
+ if (Failed || SearchType.isNull())
+ return nullptr;
- continue;
- }
+ IsDependent |= SearchType->isDependentType();
- // We're referring to an unresolved class template
- // specialization. Determine whether we class template we found
- // is the same as the template being specialized or, if we don't
- // know which template is being specialized, that it at least
- // has the same name.
- if (const TemplateSpecializationType *SpecType
- = MemberOfType->getAs<TemplateSpecializationType>()) {
- TemplateName SpecName = SpecType->getTemplateName();
-
- // The class template we found is the same template being
- // specialized.
- if (TemplateDecl *SpecTemplate = SpecName.getAsTemplateDecl()) {
- if (SpecTemplate->getCanonicalDecl() == Template->getCanonicalDecl())
- return CreateParsedType(
- MemberOfType,
- Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
+ LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
+ DeclContext *LookupCtx = computeDeclContext(SearchType);
+ if (!LookupCtx)
+ return nullptr;
+ LookupQualifiedName(Found, LookupCtx);
+ return CheckLookupResult(Found);
+ };
- continue;
- }
+ auto LookupInNestedNameSpec = [&](CXXScopeSpec &LookupSS) -> ParsedType {
+ if (Failed)
+ return nullptr;
- // The class template we found has the same name as the
- // (dependent) template name being specialized.
- if (DependentTemplateName *DepTemplate
- = SpecName.getAsDependentTemplateName()) {
- if (DepTemplate->isIdentifier() &&
- DepTemplate->getIdentifier() == Template->getIdentifier())
- return CreateParsedType(
- MemberOfType,
- Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc));
+ IsDependent |= isDependentScopeSpecifier(LookupSS);
+ DeclContext *LookupCtx = computeDeclContext(LookupSS, EnteringContext);
+ if (!LookupCtx)
+ return nullptr;
- continue;
- }
- }
+ LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
+ if (RequireCompleteDeclContext(LookupSS, LookupCtx)) {
+ Failed = true;
+ return nullptr;
}
+ LookupQualifiedName(Found, LookupCtx);
+ return CheckLookupResult(Found);
+ };
+
+ auto LookupInScope = [&]() -> ParsedType {
+ if (Failed || !S)
+ return nullptr;
+
+ LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
+ LookupName(Found, S);
+ return CheckLookupResult(Found);
+ };
+
+ // C++2a [basic.lookup.qual]p6:
+ // In a qualified-id of the form
+ //
+ // nested-name-specifier[opt] type-name :: ~ type-name
+ //
+ // the second type-name is looked up in the same scope as the first.
+ //
+ // We interpret this as meaning that if you do a dual-scope lookup for the
+ // first name, you also do a dual-scope lookup for the second name, per
+ // C++ [basic.lookup.classref]p4:
+ //
+ // If the id-expression in a class member access is a qualified-id of the
+ // form
+ //
+ // class-name-or-namespace-name :: ...
+ //
+ // the class-name-or-namespace-name following the . or -> is first looked
+ // up in the class of the object expression and the name, if found, is used.
+ // Otherwise, it is looked up in the context of the entire
+ // postfix-expression.
+ //
+ // This looks in the same scopes as for an unqualified destructor name:
+ //
+ // C++ [basic.lookup.classref]p3:
+ // If the unqualified-id is ~ type-name, the type-name is looked up
+ // in the context of the entire postfix-expression. If the type T
+ // of the object expression is of a class type C, the type-name is
+ // also looked up in the scope of class C. At least one of the
+ // lookups shall find a name that refers to cv T.
+ //
+ // FIXME: The intent is unclear here. Should type-name::~type-name look in
+ // the scope anyway if it finds a non-matching name declared in the class?
+ // If both lookups succeed and find a dependent result, which result should
+ // we retain? (Same question for p->~type-name().)
+
+ if (NestedNameSpecifier *Prefix =
+ SS.isSet() ? SS.getScopeRep()->getPrefix() : nullptr) {
+ // This is
+ //
+ // nested-name-specifier type-name :: ~ type-name
+ //
+ // Look for the second type-name in the nested-name-specifier.
+ CXXScopeSpec PrefixSS;
+ PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
+ if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
+ return T;
+ } else {
+ // This is one of
+ //
+ // type-name :: ~ type-name
+ // ~ type-name
+ //
+ // Look in the scope and (if any) the object type.
+ if (ParsedType T = LookupInScope())
+ return T;
+ if (ParsedType T = LookupInObjectType())
+ return T;
}
- if (isDependent) {
- // We didn't find our type, but that's okay: it's dependent
- // anyway.
+ if (Failed)
+ return nullptr;
+
+ if (IsDependent) {
+ // We didn't find our type, but that's OK: it's dependent anyway.
// FIXME: What if we have no nested-name-specifier?
QualType T = CheckTypenameType(ETK_None, SourceLocation(),
@@ -353,26 +361,98 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
return ParsedType::make(T);
}
- if (NonMatchingTypeDecl) {
- QualType T = Context.getTypeDeclType(NonMatchingTypeDecl);
- Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
- << T << SearchType;
- Diag(NonMatchingTypeDecl->getLocation(), diag::note_destructor_type_here)
- << T;
- } else if (ObjectTypePtr)
- Diag(NameLoc, diag::err_ident_in_dtor_not_a_type)
- << &II;
- else {
- SemaDiagnosticBuilder DtorDiag = Diag(NameLoc,
- diag::err_destructor_class_name);
- if (S) {
- const DeclContext *Ctx = S->getEntity();
- if (const CXXRecordDecl *Class = dyn_cast_or_null<CXXRecordDecl>(Ctx))
- DtorDiag << FixItHint::CreateReplacement(SourceRange(NameLoc),
- Class->getNameAsString());
+ // The remaining cases are all non-standard extensions imitating the behavior
+ // of various other compilers.
+ unsigned NumNonExtensionDecls = FoundDecls.size();
+
+ if (SS.isSet()) {
+ // For compatibility with older broken C++ rules and existing code,
+ //
+ // nested-name-specifier :: ~ type-name
+ //
+ // also looks for type-name within the nested-name-specifier.
+ if (ParsedType T = LookupInNestedNameSpec(SS)) {
+ Diag(SS.getEndLoc(), diag::ext_dtor_named_in_wrong_scope)
+ << SS.getRange()
+ << FixItHint::CreateInsertion(SS.getEndLoc(),
+ ("::" + II.getName()).str());
+ return T;
+ }
+
+ // For compatibility with other compilers and older versions of Clang,
+ //
+ // nested-name-specifier type-name :: ~ type-name
+ //
+ // also looks for type-name in the scope. Unfortunately, we can't
+ // reasonably apply this fallback for dependent nested-name-specifiers.
+ if (SS.getScopeRep()->getPrefix()) {
+ if (ParsedType T = LookupInScope()) {
+ Diag(SS.getEndLoc(), diag::ext_qualified_dtor_named_in_lexical_scope)
+ << FixItHint::CreateRemoval(SS.getRange());
+ Diag(FoundDecls.back()->getLocation(), diag::note_destructor_type_here)
+ << GetTypeFromParser(T);
+ return T;
+ }
}
}
+ // We didn't find anything matching; tell the user what we did find (if
+ // anything).
+
+ // Don't tell the user about declarations we shouldn't have found.
+ FoundDecls.resize(NumNonExtensionDecls);
+
+ // List types before non-types.
+ std::stable_sort(FoundDecls.begin(), FoundDecls.end(),
+ [](NamedDecl *A, NamedDecl *B) {
+ return isa<TypeDecl>(A->getUnderlyingDecl()) >
+ isa<TypeDecl>(B->getUnderlyingDecl());
+ });
+
+ // Suggest a fixit to properly name the destroyed type.
+ auto MakeFixItHint = [&]{
+ const CXXRecordDecl *Destroyed = nullptr;
+ // FIXME: If we have a scope specifier, suggest its last component?
+ if (!SearchType.isNull())
+ Destroyed = SearchType->getAsCXXRecordDecl();
+ else if (S)
+ Destroyed = dyn_cast_or_null<CXXRecordDecl>(S->getEntity());
+ if (Destroyed)
+ return FixItHint::CreateReplacement(SourceRange(NameLoc),
+ Destroyed->getNameAsString());
+ return FixItHint();
+ };
+
+ if (FoundDecls.empty()) {
+ // FIXME: Attempt typo-correction?
+ Diag(NameLoc, diag::err_undeclared_destructor_name)
+ << &II << MakeFixItHint();
+ } else if (!SearchType.isNull() && FoundDecls.size() == 1) {
+ if (auto *TD = dyn_cast<TypeDecl>(FoundDecls[0]->getUnderlyingDecl())) {
+ assert(!SearchType.isNull() &&
+ "should only reject a type result if we have a search type");
+ QualType T = Context.getTypeDeclType(TD);
+ Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
+ << T << SearchType << MakeFixItHint();
+ } else {
+ Diag(NameLoc, diag::err_destructor_expr_nontype)
+ << &II << MakeFixItHint();
+ }
+ } else {
+ Diag(NameLoc, SearchType.isNull() ? diag::err_destructor_name_nontype
+ : diag::err_destructor_expr_mismatch)
+ << &II << SearchType << MakeFixItHint();
+ }
+
+ for (NamedDecl *FoundD : FoundDecls) {
+ if (auto *TD = dyn_cast<TypeDecl>(FoundD->getUnderlyingDecl()))
+ Diag(FoundD->getLocation(), diag::note_destructor_type_here)
+ << Context.getTypeDeclType(TD);
+ else
+ Diag(FoundD->getLocation(), diag::note_destructor_nontype_here)
+ << FoundD;
+ }
+
return nullptr;
}
@@ -624,11 +704,11 @@ getUuidAttrOfType(Sema &SemaRef, QualType QT,
}
/// Build a Microsoft __uuidof expression with a type operand.
-ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
+ExprResult Sema::BuildCXXUuidof(QualType Type,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc) {
- StringRef UuidStr;
+ MSGuidDecl *Guid = nullptr;
if (!Operand->getType()->isDependentType()) {
llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
getUuidAttrOfType(*this, Operand->getType(), UuidAttrs);
@@ -636,22 +716,21 @@ ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
if (UuidAttrs.size() > 1)
return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
- UuidStr = UuidAttrs.back()->getGuid();
+ Guid = UuidAttrs.back()->getGuidDecl();
}
- return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), Operand, UuidStr,
- SourceRange(TypeidLoc, RParenLoc));
+ return new (Context)
+ CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
}
/// Build a Microsoft __uuidof expression with an expression operand.
-ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
- SourceLocation TypeidLoc,
- Expr *E,
- SourceLocation RParenLoc) {
- StringRef UuidStr;
+ExprResult Sema::BuildCXXUuidof(QualType Type, SourceLocation TypeidLoc,
+ Expr *E, SourceLocation RParenLoc) {
+ MSGuidDecl *Guid = nullptr;
if (!E->getType()->isDependentType()) {
if (E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
- UuidStr = "00000000-0000-0000-0000-000000000000";
+ // A null pointer results in {00000000-0000-0000-0000-000000000000}.
+ Guid = Context.getMSGuidDecl(MSGuidDecl::Parts{});
} else {
llvm::SmallSetVector<const UuidAttr *, 1> UuidAttrs;
getUuidAttrOfType(*this, E->getType(), UuidAttrs);
@@ -659,29 +738,20 @@ ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
if (UuidAttrs.size() > 1)
return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
- UuidStr = UuidAttrs.back()->getGuid();
+ Guid = UuidAttrs.back()->getGuidDecl();
}
}
- return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), E, UuidStr,
- SourceRange(TypeidLoc, RParenLoc));
+ return new (Context)
+ CXXUuidofExpr(Type, E, Guid, SourceRange(TypeidLoc, RParenLoc));
}
/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
ExprResult
Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
- // If MSVCGuidDecl has not been cached, do the lookup.
- if (!MSVCGuidDecl) {
- IdentifierInfo *GuidII = &PP.getIdentifierTable().get("_GUID");
- LookupResult R(*this, GuidII, SourceLocation(), LookupTagName);
- LookupQualifiedName(R, Context.getTranslationUnitDecl());
- MSVCGuidDecl = R.getAsSingle<RecordDecl>();
- if (!MSVCGuidDecl)
- return ExprError(Diag(OpLoc, diag::err_need_header_before_ms_uuidof));
- }
-
- QualType GuidType = Context.getTypeDeclType(MSVCGuidDecl);
+ QualType GuidType = Context.getMSGuidType();
+ GuidType.addConst();
if (isType) {
// The operand is a type; handle it as such.
@@ -876,6 +946,11 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
E->getSourceRange()))
return true;
+ if (!isPointer && Ty->isSizelessType()) {
+ Diag(ThrowLoc, diag::err_throw_sizeless) << Ty << E->getSourceRange();
+ return true;
+ }
+
if (RequireNonAbstractType(ThrowLoc, ExceptionObjectTy,
diag::err_throw_abstract_type, E))
return true;
@@ -1742,8 +1817,9 @@ Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
return false;
if (FD.isDefined())
return false;
- bool IsAligned = false;
- if (FD.isReplaceableGlobalAllocationFunction(&IsAligned) && IsAligned)
+ Optional<unsigned> AlignmentParam;
+ if (FD.isReplaceableGlobalAllocationFunction(&AlignmentParam) &&
+ AlignmentParam.hasValue())
return true;
return false;
}
@@ -2061,8 +2137,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
SmallVector<Expr *, 8> AllPlaceArgs;
if (OperatorNew) {
- const FunctionProtoType *Proto =
- OperatorNew->getType()->getAs<FunctionProtoType>();
+ auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
VariadicCallType CallType = Proto->isVariadic() ? VariadicFunction
: VariadicDoesNotApply;
@@ -2070,18 +2145,80 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// arguments. Skip the first parameter because we don't have a corresponding
// argument. Skip the second parameter too if we're passing in the
// alignment; we've already filled it in.
+ unsigned NumImplicitArgs = PassAlignment ? 2 : 1;
if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto,
- PassAlignment ? 2 : 1, PlacementArgs,
- AllPlaceArgs, CallType))
+ NumImplicitArgs, PlacementArgs, AllPlaceArgs,
+ CallType))
return ExprError();
if (!AllPlaceArgs.empty())
PlacementArgs = AllPlaceArgs;
- // FIXME: This is wrong: PlacementArgs misses out the first (size) argument.
- DiagnoseSentinelCalls(OperatorNew, PlacementLParen, PlacementArgs);
-
- // FIXME: Missing call to CheckFunctionCall or equivalent
+ // We would like to perform some checking on the given `operator new` call,
+ // but the PlacementArgs does not contain the implicit arguments,
+ // namely allocation size and maybe allocation alignment,
+ // so we need to conjure them.
+
+ QualType SizeTy = Context.getSizeType();
+ unsigned SizeTyWidth = Context.getTypeSize(SizeTy);
+
+ llvm::APInt SingleEltSize(
+ SizeTyWidth, Context.getTypeSizeInChars(AllocType).getQuantity());
+
+ // How many bytes do we want to allocate here?
+ llvm::Optional<llvm::APInt> AllocationSize;
+ if (!ArraySize.hasValue() && !AllocType->isDependentType()) {
+ // For non-array operator new, we only want to allocate one element.
+ AllocationSize = SingleEltSize;
+ } else if (KnownArraySize.hasValue() && !AllocType->isDependentType()) {
+ // For array operator new, only deal with static array size case.
+ bool Overflow;
+ AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
+ .umul_ov(SingleEltSize, Overflow);
+ (void)Overflow;
+ assert(
+ !Overflow &&
+ "Expected that all the overflows would have been handled already.");
+ }
+
+ IntegerLiteral AllocationSizeLiteral(
+ Context,
+ AllocationSize.getValueOr(llvm::APInt::getNullValue(SizeTyWidth)),
+ SizeTy, SourceLocation());
+ // Otherwise, if we failed to constant-fold the allocation size, we'll
+ // just give up and pass-in something opaque, that isn't a null pointer.
+ OpaqueValueExpr OpaqueAllocationSize(SourceLocation(), SizeTy, VK_RValue,
+ OK_Ordinary, /*SourceExpr=*/nullptr);
+
+ // Let's synthesize the alignment argument in case we will need it.
+ // Since we *really* want to allocate these on stack, this is slightly ugly
+ // because there might not be a `std::align_val_t` type.
+ EnumDecl *StdAlignValT = getStdAlignValT();
+ QualType AlignValT =
+ StdAlignValT ? Context.getTypeDeclType(StdAlignValT) : SizeTy;
+ IntegerLiteral AlignmentLiteral(
+ Context,
+ llvm::APInt(Context.getTypeSize(SizeTy),
+ Alignment / Context.getCharWidth()),
+ SizeTy, SourceLocation());
+ ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
+ CK_IntegralCast, &AlignmentLiteral,
+ VK_RValue);
+
+ // Adjust placement args by prepending conjured size and alignment exprs.
+ llvm::SmallVector<Expr *, 8> CallArgs;
+ CallArgs.reserve(NumImplicitArgs + PlacementArgs.size());
+ CallArgs.emplace_back(AllocationSize.hasValue()
+ ? static_cast<Expr *>(&AllocationSizeLiteral)
+ : &OpaqueAllocationSize);
+ if (PassAlignment)
+ CallArgs.emplace_back(&DesiredAlignment);
+ CallArgs.insert(CallArgs.end(), PlacementArgs.begin(), PlacementArgs.end());
+
+ DiagnoseSentinelCalls(OperatorNew, PlacementLParen, CallArgs);
+
+ checkCall(OperatorNew, Proto, /*ThisArg=*/nullptr, CallArgs,
+ /*IsMemberFunction=*/false, StartLoc, Range, CallType);
// Warn if the type is over-aligned and is being allocated by (unaligned)
// global operator new.
@@ -2193,7 +2330,8 @@ bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
return Diag(Loc, diag::err_bad_new_type)
<< AllocType << 1 << R;
else if (!AllocType->isDependentType() &&
- RequireCompleteType(Loc, AllocType, diag::err_new_incomplete_type,R))
+ RequireCompleteSizedType(
+ Loc, AllocType, diag::err_new_incomplete_or_sizeless_type, R))
return true;
else if (RequireNonAbstractType(Loc, AllocType,
diag::err_allocation_of_abstract_type))
@@ -2515,8 +2653,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// for template argument deduction and for comparison purposes.
QualType ExpectedFunctionType;
{
- const FunctionProtoType *Proto
- = OperatorNew->getType()->getAs<FunctionProtoType>();
+ auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
SmallVector<QualType, 4> ArgTypes;
ArgTypes.push_back(Context.VoidPtrTy);
@@ -2835,6 +2972,7 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
Alloc->setParams(ParamDecls);
if (ExtraAttr)
Alloc->addAttr(ExtraAttr);
+ AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(Alloc);
Context.getTranslationUnitDecl()->addDecl(Alloc);
IdResolver.tryAddTopLevelDecl(Alloc, Name);
};
@@ -3319,7 +3457,8 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// this, so we treat it as a warning unless we're in a SFINAE context.
Diag(StartLoc, diag::ext_delete_void_ptr_operand)
<< Type << Ex.get()->getSourceRange();
- } else if (Pointee->isFunctionType() || Pointee->isVoidType()) {
+ } else if (Pointee->isFunctionType() || Pointee->isVoidType() ||
+ Pointee->isSizelessType()) {
return ExprError(Diag(StartLoc, diag::err_delete_operand)
<< Type << Ex.get()->getSourceRange());
} else if (!Pointee->isDependentType()) {
@@ -3865,15 +4004,17 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
ICS.DiagnoseAmbiguousConversion(*this, From->getExprLoc(),
PDiag(diag::err_typecheck_ambiguous_condition)
<< From->getSourceRange());
- return ExprError();
+ return ExprError();
case ImplicitConversionSequence::EllipsisConversion:
llvm_unreachable("Cannot perform an ellipsis conversion");
case ImplicitConversionSequence::BadConversion:
- bool Diagnosed =
- DiagnoseAssignmentResult(Incompatible, From->getExprLoc(), ToType,
- From->getType(), From, Action);
+ Sema::AssignConvertType ConvTy =
+ CheckAssignmentConstraints(From->getExprLoc(), ToType, From->getType());
+ bool Diagnosed = DiagnoseAssignmentResult(
+ ConvTy == Compatible ? Incompatible : ConvTy, From->getExprLoc(),
+ ToType, From->getType(), From, Action);
assert(Diagnosed && "failed to diagnose bad conversion"); (void)Diagnosed;
return ExprError();
}
@@ -4062,8 +4203,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
break;
case ICK_Compatible_Conversion:
- From = ImpCastExprToType(From, ToType, CK_NoOp,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, ToType, CK_NoOp, From->getValueKind(),
+ /*BasePath=*/nullptr, CCK).get();
break;
case ICK_Writeback_Conversion:
@@ -4213,9 +4354,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Case 2. _Complex x -> y
} else {
- const ComplexType *FromComplex = From->getType()->getAs<ComplexType>();
- assert(FromComplex);
-
+ auto *FromComplex = From->getType()->castAs<ComplexType>();
QualType ElType = FromComplex->getElementType();
bool isFloatingComplex = ElType->isRealFloatingType();
@@ -4302,11 +4441,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
break;
case ICK_Qualification: {
- // The qualification keeps the category of the inner expression, unless the
- // target type isn't a reference.
- ExprValueKind VK =
- ToType->isReferenceType() ? From->getValueKind() : VK_RValue;
-
+ ExprValueKind VK = From->getValueKind();
CastKind CK = CK_NoOp;
if (ToType->isReferenceType() &&
@@ -4348,6 +4483,16 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
VK_RValue, nullptr, CCK).get();
}
+ // Materialize a temporary if we're implicitly converting to a reference
+ // type. This is not required by the C++ rules but is necessary to maintain
+ // AST invariants.
+ if (ToType->isReferenceType() && From->isRValue()) {
+ ExprResult Res = TemporaryMaterializationConversion(From);
+ if (Res.isInvalid())
+ return ExprError();
+ From = Res.get();
+ }
+
// If this conversion sequence succeeded and involved implicitly converting a
// _Nullable type to a _Nonnull one, complain.
if (!isCast(CCK))
@@ -4504,8 +4649,7 @@ static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op,
CXXMethodDecl *Operator = cast<CXXMethodDecl>(*Op);
if((Operator->*IsDesiredOp)()) {
FoundOperator = true;
- const FunctionProtoType *CPT =
- Operator->getType()->getAs<FunctionProtoType>();
+ auto *CPT = Operator->getType()->castAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT || !CPT->isNothrow())
return false;
@@ -4534,7 +4678,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
case UTT_IsArray:
return T->isArrayType();
case UTT_IsPointer:
- return T->isPointerType();
+ return T->isAnyPointerType();
case UTT_IsLvalueReference:
return T->isLValueReferenceType();
case UTT_IsRvalueReference:
@@ -4754,8 +4898,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
if (C.getLangOpts().AccessControl && Destructor->getAccess() != AS_public)
return false;
if (UTT == UTT_IsNothrowDestructible) {
- const FunctionProtoType *CPT =
- Destructor->getType()->getAs<FunctionProtoType>();
+ auto *CPT = Destructor->getType()->castAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT || !CPT->isNothrow())
return false;
@@ -4843,8 +4986,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
auto *Constructor = cast<CXXConstructorDecl>(ND->getUnderlyingDecl());
if (Constructor->isCopyConstructor(FoundTQs)) {
FoundConstructor = true;
- const FunctionProtoType *CPT
- = Constructor->getType()->getAs<FunctionProtoType>();
+ auto *CPT = Constructor->getType()->castAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT)
return false;
@@ -4882,8 +5024,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
auto *Constructor = cast<CXXConstructorDecl>(ND->getUnderlyingDecl());
if (Constructor->isDefaultConstructor()) {
FoundConstructor = true;
- const FunctionProtoType *CPT
- = Constructor->getType()->getAs<FunctionProtoType>();
+ auto *CPT = Constructor->getType()->castAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
if (!CPT)
return false;
@@ -4976,20 +5117,19 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
if (RD && RD->isAbstract())
return false;
- SmallVector<OpaqueValueExpr, 2> OpaqueArgExprs;
+ llvm::BumpPtrAllocator OpaqueExprAllocator;
SmallVector<Expr *, 2> ArgExprs;
ArgExprs.reserve(Args.size() - 1);
for (unsigned I = 1, N = Args.size(); I != N; ++I) {
QualType ArgTy = Args[I]->getType();
if (ArgTy->isObjectType() || ArgTy->isFunctionType())
ArgTy = S.Context.getRValueReferenceType(ArgTy);
- OpaqueArgExprs.push_back(
- OpaqueValueExpr(Args[I]->getTypeLoc().getBeginLoc(),
- ArgTy.getNonLValueExprType(S.Context),
- Expr::getValueKindForType(ArgTy)));
+ ArgExprs.push_back(
+ new (OpaqueExprAllocator.Allocate<OpaqueValueExpr>())
+ OpaqueValueExpr(Args[I]->getTypeLoc().getBeginLoc(),
+ ArgTy.getNonLValueExprType(S.Context),
+ Expr::getValueKindForType(ArgTy)));
}
- for (Expr &E : OpaqueArgExprs)
- ArgExprs.push_back(&E);
// Perform the initialization in an unevaluated context within a SFINAE
// trap at translation unit scope.
@@ -5539,7 +5679,7 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
// C++2a allows functions with ref-qualifier & if their cv-qualifier-seq
// is (exactly) 'const'.
if (Proto->isConst() && !Proto->isVolatile())
- Diag(Loc, getLangOpts().CPlusPlus2a
+ Diag(Loc, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_pointer_to_const_ref_member_on_rvalue
: diag::ext_pointer_to_const_ref_member_on_rvalue);
else
@@ -5768,7 +5908,7 @@ QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
QualType CondType = Cond.get()->getType();
- const auto *CondVT = CondType->getAs<VectorType>();
+ const auto *CondVT = CondType->castAs<VectorType>();
QualType CondElementTy = CondVT->getElementType();
unsigned CondElementCount = CondVT->getNumElements();
QualType LHSType = LHS.get()->getType();
@@ -5824,7 +5964,7 @@ QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
return {};
}
ResultType = Context.getVectorType(
- ResultElementTy, CondType->getAs<VectorType>()->getNumElements(),
+ ResultElementTy, CondType->castAs<VectorType>()->getNumElements(),
VectorType::GenericVector);
LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat);
@@ -5833,9 +5973,9 @@ QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
assert(!ResultType.isNull() && ResultType->isVectorType() &&
"Result should have been a vector type");
- QualType ResultElementTy = ResultType->getAs<VectorType>()->getElementType();
- unsigned ResultElementCount =
- ResultType->getAs<VectorType>()->getNumElements();
+ auto *ResultVectorTy = ResultType->castAs<VectorType>();
+ QualType ResultElementTy = ResultVectorTy->getElementType();
+ unsigned ResultElementCount = ResultVectorTy->getNumElements();
if (ResultElementCount != CondElementCount) {
Diag(QuestionLoc, diag::err_conditional_vector_size) << CondType
@@ -6632,8 +6772,7 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
T = MemPtr->getPointeeType();
- const FunctionType *FTy = T->getAs<FunctionType>();
- assert(FTy && "call to value not of function type?");
+ auto *FTy = T->castAs<FunctionType>();
ReturnsRetained = FTy->getExtInfo().getProducesResult();
// ActOnStmtExpr arranges things so that StmtExprs of retainable
@@ -6697,6 +6836,9 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
VK_RValue);
}
+ if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ Cleanup.setExprNeedsCleanups(true);
+
if (!getLangOpts().CPlusPlus)
return E;
@@ -6799,8 +6941,9 @@ Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
// a new AsmStmtWithTemporaries.
CompoundStmt *CompStmt = CompoundStmt::Create(
Context, SubStmt, SourceLocation(), SourceLocation());
- Expr *E = new (Context) StmtExpr(CompStmt, Context.VoidTy, SourceLocation(),
- SourceLocation());
+ Expr *E = new (Context)
+ StmtExpr(CompStmt, Context.VoidTy, SourceLocation(), SourceLocation(),
+ /*FIXME TemplateDepth=*/0);
return MaybeCreateExprWithCleanups(E);
}
@@ -6843,9 +6986,10 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
return ExprError();
if (RHS.get() == BO->getRHS())
return E;
- return new (Context) BinaryOperator(
- BO->getLHS(), RHS.get(), BO_Comma, BO->getType(), BO->getValueKind(),
- BO->getObjectKind(), BO->getOperatorLoc(), BO->getFPFeatures());
+ return BinaryOperator::Create(Context, BO->getLHS(), RHS.get(), BO_Comma,
+ BO->getType(), BO->getValueKind(),
+ BO->getObjectKind(), BO->getOperatorLoc(),
+ BO->getFPFeatures(getLangOpts()));
}
}
@@ -7317,7 +7461,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
TypeResult T = ActOnTemplateIdType(S,
- TemplateId->SS,
+ SS,
TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->Name,
@@ -7370,7 +7514,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
TypeResult T = ActOnTemplateIdType(S,
- TemplateId->SS,
+ SS,
TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->Name,
@@ -7447,13 +7591,13 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
// a difference in ARC, but outside of ARC the resulting block literal
// follows the normal lifetime rules for block literals instead of being
// autoreleased.
- DiagnosticErrorTrap Trap(Diags);
PushExpressionEvaluationContext(
ExpressionEvaluationContext::PotentiallyEvaluated);
ExprResult BlockExp = BuildBlockForLambdaConversion(
Exp.get()->getExprLoc(), Exp.get()->getExprLoc(), Method, Exp.get());
PopExpressionEvaluationContext();
+ // FIXME: This note should be produced by a CodeSynthesisContext.
if (BlockExp.isInvalid())
Diag(Exp.get()->getExprLoc(), diag::note_lambda_to_block_conv);
return BlockExp;
@@ -7512,61 +7656,6 @@ ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
}
-static bool IsSpecialDiscardedValue(Expr *E) {
- // In C++11, discarded-value expressions of a certain form are special,
- // according to [expr]p10:
- // The lvalue-to-rvalue conversion (4.1) is applied only if the
- // expression is an lvalue of volatile-qualified type and it has
- // one of the following forms:
- E = E->IgnoreParens();
-
- // - id-expression (5.1.1),
- if (isa<DeclRefExpr>(E))
- return true;
-
- // - subscripting (5.2.1),
- if (isa<ArraySubscriptExpr>(E))
- return true;
-
- // - class member access (5.2.5),
- if (isa<MemberExpr>(E))
- return true;
-
- // - indirection (5.3.1),
- if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E))
- if (UO->getOpcode() == UO_Deref)
- return true;
-
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
- // - pointer-to-member operation (5.5),
- if (BO->isPtrMemOp())
- return true;
-
- // - comma expression (5.18) where the right operand is one of the above.
- if (BO->getOpcode() == BO_Comma)
- return IsSpecialDiscardedValue(BO->getRHS());
- }
-
- // - conditional expression (5.16) where both the second and the third
- // operands are one of the above, or
- if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E))
- return IsSpecialDiscardedValue(CO->getTrueExpr()) &&
- IsSpecialDiscardedValue(CO->getFalseExpr());
- // The related edge case of "*x ?: *x".
- if (BinaryConditionalOperator *BCO =
- dyn_cast<BinaryConditionalOperator>(E)) {
- if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(BCO->getTrueExpr()))
- return IsSpecialDiscardedValue(OVE->getSourceExpr()) &&
- IsSpecialDiscardedValue(BCO->getFalseExpr());
- }
-
- // Objective-C++ extensions to the rule.
- if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E))
- return true;
-
- return false;
-}
-
/// Perform the conversions required for an expression used in a
/// context that ignores the result.
ExprResult Sema::IgnoredValueConversions(Expr *E) {
@@ -7591,23 +7680,20 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
return E;
}
- if (getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// The C++11 standard defines the notion of a discarded-value expression;
// normally, we don't need to do anything to handle it, but if it is a
// volatile lvalue with a special form, we perform an lvalue-to-rvalue
// conversion.
- if (getLangOpts().CPlusPlus11 && E->isGLValue() &&
- E->getType().isVolatileQualified()) {
- if (IsSpecialDiscardedValue(E)) {
- ExprResult Res = DefaultLvalueConversion(E);
- if (Res.isInvalid())
- return E;
- E = Res.get();
- } else {
- // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
- // it occurs as a discarded-value expression.
- CheckUnusedVolatileAssignment(E);
- }
+ if (getLangOpts().CPlusPlus11 && E->isReadIfDiscardedInCPlusPlus11()) {
+ ExprResult Res = DefaultLvalueConversion(E);
+ if (Res.isInvalid())
+ return E;
+ E = Res.get();
+ } else {
+ // Per C++2a [expr.ass]p5, a volatile assignment is not deprecated if
+ // it occurs as a discarded-value expression.
+ CheckUnusedVolatileAssignment(E);
}
// C++1z:
@@ -8163,6 +8249,7 @@ public:
ExprResult
Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
+ bool RecoverUncorrectedTypos,
llvm::function_ref<ExprResult(Expr *)> Filter) {
// If the current evaluation context indicates there are uncorrected typos
// and the current expression isn't guaranteed to not have typos, try to
@@ -8175,6 +8262,16 @@ Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
TyposResolved -= DelayedTypos.size();
if (Result.isInvalid() || Result.get() != E) {
ExprEvalContexts.back().NumTypos -= TyposResolved;
+ if (Result.isInvalid() && RecoverUncorrectedTypos) {
+ struct TyposReplace : TreeTransform<TyposReplace> {
+ TyposReplace(Sema &SemaRef) : TreeTransform(SemaRef) {}
+ ExprResult TransformTypoExpr(clang::TypoExpr *E) {
+ return this->SemaRef.CreateRecoveryExpr(E->getBeginLoc(),
+ E->getEndLoc(), {});
+ }
+ } TT(*this);
+ return TT.TransformExpr(E);
+ }
return Result;
}
assert(TyposResolved == 0 && "Corrected typo but got same Expr back?");
@@ -8213,7 +8310,8 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
DiagnoseUnusedExprResult(FullExpr.get());
}
- FullExpr = CorrectDelayedTyposInExpr(FullExpr.get());
+ FullExpr = CorrectDelayedTyposInExpr(FullExpr.get(), /*InitDecl=*/nullptr,
+ /*RecoverUncorrectedTypos=*/true);
if (FullExpr.isInvalid())
return ExprError();
@@ -8331,3 +8429,216 @@ Sema::CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo);
}
+
+concepts::Requirement *Sema::ActOnSimpleRequirement(Expr *E) {
+ return BuildExprRequirement(E, /*IsSimple=*/true,
+ /*NoexceptLoc=*/SourceLocation(),
+ /*ReturnTypeRequirement=*/{});
+}
+
+concepts::Requirement *
+Sema::ActOnTypeRequirement(SourceLocation TypenameKWLoc, CXXScopeSpec &SS,
+ SourceLocation NameLoc, IdentifierInfo *TypeName,
+ TemplateIdAnnotation *TemplateId) {
+ assert(((!TypeName && TemplateId) || (TypeName && !TemplateId)) &&
+ "Exactly one of TypeName and TemplateId must be specified.");
+ TypeSourceInfo *TSI = nullptr;
+ if (TypeName) {
+ QualType T = CheckTypenameType(ETK_Typename, TypenameKWLoc,
+ SS.getWithLocInContext(Context), *TypeName,
+ NameLoc, &TSI, /*DeducedTypeContext=*/false);
+ if (T.isNull())
+ return nullptr;
+ } else {
+ ASTTemplateArgsPtr ArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ TypeResult T = ActOnTypenameType(CurScope, TypenameKWLoc, SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template, TemplateId->Name,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc, ArgsPtr,
+ TemplateId->RAngleLoc);
+ if (T.isInvalid())
+ return nullptr;
+ if (GetTypeFromParser(T.get(), &TSI).isNull())
+ return nullptr;
+ }
+ return BuildTypeRequirement(TSI);
+}
+
+concepts::Requirement *
+Sema::ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc) {
+ return BuildExprRequirement(E, /*IsSimple=*/false, NoexceptLoc,
+ /*ReturnTypeRequirement=*/{});
+}
+
+concepts::Requirement *
+Sema::ActOnCompoundRequirement(
+ Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
+ TemplateIdAnnotation *TypeConstraint, unsigned Depth) {
+ // C++2a [expr.prim.req.compound] p1.3.3
+ // [..] the expression is deduced against an invented function template
+ // F [...] F is a void function template with a single type template
+ // parameter T declared with the constrained-parameter. Form a new
+ // cv-qualifier-seq cv by taking the union of const and volatile specifiers
+ // around the constrained-parameter. F has a single parameter whose
+ // type-specifier is cv T followed by the abstract-declarator. [...]
+ //
+ // The cv part is done in the calling function - we get the concept with
+ // arguments and the abstract declarator with the correct CV qualification and
+ // have to synthesize T and the single parameter of F.
+ auto &II = Context.Idents.get("expr-type");
+ auto *TParam = TemplateTypeParmDecl::Create(Context, CurContext,
+ SourceLocation(),
+ SourceLocation(), Depth,
+ /*Index=*/0, &II,
+ /*Typename=*/true,
+ /*ParameterPack=*/false,
+ /*HasTypeConstraint=*/true);
+
+ if (ActOnTypeConstraint(SS, TypeConstraint, TParam,
+ /*EllpsisLoc=*/SourceLocation()))
+ // Just produce a requirement with no type requirements.
+ return BuildExprRequirement(E, /*IsSimple=*/false, NoexceptLoc, {});
+
+ auto *TPL = TemplateParameterList::Create(Context, SourceLocation(),
+ SourceLocation(),
+ ArrayRef<NamedDecl *>(TParam),
+ SourceLocation(),
+ /*RequiresClause=*/nullptr);
+ return BuildExprRequirement(
+ E, /*IsSimple=*/false, NoexceptLoc,
+ concepts::ExprRequirement::ReturnTypeRequirement(TPL));
+}
+
+concepts::ExprRequirement *
+Sema::BuildExprRequirement(
+ Expr *E, bool IsSimple, SourceLocation NoexceptLoc,
+ concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
+ auto Status = concepts::ExprRequirement::SS_Satisfied;
+ ConceptSpecializationExpr *SubstitutedConstraintExpr = nullptr;
+ if (E->isInstantiationDependent() || ReturnTypeRequirement.isDependent())
+ Status = concepts::ExprRequirement::SS_Dependent;
+ else if (NoexceptLoc.isValid() && canThrow(E) == CanThrowResult::CT_Can)
+ Status = concepts::ExprRequirement::SS_NoexceptNotMet;
+ else if (ReturnTypeRequirement.isSubstitutionFailure())
+ Status = concepts::ExprRequirement::SS_TypeRequirementSubstitutionFailure;
+ else if (ReturnTypeRequirement.isTypeConstraint()) {
+ // C++2a [expr.prim.req]p1.3.3
+ // The immediately-declared constraint ([temp]) of decltype((E)) shall
+ // be satisfied.
+ TemplateParameterList *TPL =
+ ReturnTypeRequirement.getTypeConstraintTemplateParameterList();
+ QualType MatchedType =
+ BuildDecltypeType(E, E->getBeginLoc()).getCanonicalType();
+ llvm::SmallVector<TemplateArgument, 1> Args;
+ Args.push_back(TemplateArgument(MatchedType));
+ TemplateArgumentList TAL(TemplateArgumentList::OnStack, Args);
+ MultiLevelTemplateArgumentList MLTAL(TAL);
+ for (unsigned I = 0; I < TPL->getDepth(); ++I)
+ MLTAL.addOuterRetainedLevel();
+ Expr *IDC =
+ cast<TemplateTypeParmDecl>(TPL->getParam(0))->getTypeConstraint()
+ ->getImmediatelyDeclaredConstraint();
+ ExprResult Constraint = SubstExpr(IDC, MLTAL);
+ assert(!Constraint.isInvalid() &&
+ "Substitution cannot fail as it is simply putting a type template "
+ "argument into a concept specialization expression's parameter.");
+
+ SubstitutedConstraintExpr =
+ cast<ConceptSpecializationExpr>(Constraint.get());
+ if (!SubstitutedConstraintExpr->isSatisfied())
+ Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
+ }
+ return new (Context) concepts::ExprRequirement(E, IsSimple, NoexceptLoc,
+ ReturnTypeRequirement, Status,
+ SubstitutedConstraintExpr);
+}
+
+concepts::ExprRequirement *
+Sema::BuildExprRequirement(
+ concepts::Requirement::SubstitutionDiagnostic *ExprSubstitutionDiagnostic,
+ bool IsSimple, SourceLocation NoexceptLoc,
+ concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
+ return new (Context) concepts::ExprRequirement(ExprSubstitutionDiagnostic,
+ IsSimple, NoexceptLoc,
+ ReturnTypeRequirement);
+}
+
+concepts::TypeRequirement *
+Sema::BuildTypeRequirement(TypeSourceInfo *Type) {
+ return new (Context) concepts::TypeRequirement(Type);
+}
+
+concepts::TypeRequirement *
+Sema::BuildTypeRequirement(
+ concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
+ return new (Context) concepts::TypeRequirement(SubstDiag);
+}
+
+concepts::Requirement *Sema::ActOnNestedRequirement(Expr *Constraint) {
+ return BuildNestedRequirement(Constraint);
+}
+
+concepts::NestedRequirement *
+Sema::BuildNestedRequirement(Expr *Constraint) {
+ ConstraintSatisfaction Satisfaction;
+ if (!Constraint->isInstantiationDependent() &&
+ CheckConstraintSatisfaction(nullptr, {Constraint}, /*TemplateArgs=*/{},
+ Constraint->getSourceRange(), Satisfaction))
+ return nullptr;
+ return new (Context) concepts::NestedRequirement(Context, Constraint,
+ Satisfaction);
+}
+
+concepts::NestedRequirement *
+Sema::BuildNestedRequirement(
+ concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
+ return new (Context) concepts::NestedRequirement(SubstDiag);
+}
+
+RequiresExprBodyDecl *
+Sema::ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
+ ArrayRef<ParmVarDecl *> LocalParameters,
+ Scope *BodyScope) {
+ assert(BodyScope);
+
+ RequiresExprBodyDecl *Body = RequiresExprBodyDecl::Create(Context, CurContext,
+ RequiresKWLoc);
+
+ PushDeclContext(BodyScope, Body);
+
+ for (ParmVarDecl *Param : LocalParameters) {
+ if (Param->hasDefaultArg())
+ // C++2a [expr.prim.req] p4
+ // [...] A local parameter of a requires-expression shall not have a
+ // default argument. [...]
+ Diag(Param->getDefaultArgRange().getBegin(),
+ diag::err_requires_expr_local_parameter_default_argument);
+ // Ignore default argument and move on
+
+ Param->setDeclContext(Body);
+ // If this has an identifier, add it to the scope stack.
+ if (Param->getIdentifier()) {
+ CheckShadow(BodyScope, Param);
+ PushOnScopeChains(Param, BodyScope);
+ }
+ }
+ return Body;
+}
+
+void Sema::ActOnFinishRequiresExpr() {
+ assert(CurContext && "DeclContext imbalance!");
+ CurContext = CurContext->getLexicalParent();
+ assert(CurContext && "Popped translation unit!");
+}
+
+ExprResult
+Sema::ActOnRequiresExpr(SourceLocation RequiresKWLoc,
+ RequiresExprBodyDecl *Body,
+ ArrayRef<ParmVarDecl *> LocalParameters,
+ ArrayRef<concepts::Requirement *> Requirements,
+ SourceLocation ClosingBraceLoc) {
+ return RequiresExpr::Create(Context, RequiresKWLoc, Body, LocalParameters,
+ Requirements, ClosingBraceLoc);
+}
diff --git a/clang/lib/Sema/SemaExprObjC.cpp b/clang/lib/Sema/SemaExprObjC.cpp
index c61b13cf5980..228a1ec3ba1f 100644
--- a/clang/lib/Sema/SemaExprObjC.cpp
+++ b/clang/lib/Sema/SemaExprObjC.cpp
@@ -894,6 +894,62 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
ArrayWithObjectsMethod, SR));
}
+/// Check for duplicate keys in an ObjC dictionary literal. For instance:
+/// NSDictionary *nd = @{ @"foo" : @"bar", @"foo" : @"baz" };
+static void
+CheckObjCDictionaryLiteralDuplicateKeys(Sema &S,
+ ObjCDictionaryLiteral *Literal) {
+ if (Literal->isValueDependent() || Literal->isTypeDependent())
+ return;
+
+ // NSNumber has quite relaxed equality semantics (for instance, @YES is
+ // considered equal to @1.0). For now, ignore floating points and just do a
+ // bit-width and sign agnostic integer compare.
+ struct APSIntCompare {
+ bool operator()(const llvm::APSInt &LHS, const llvm::APSInt &RHS) const {
+ return llvm::APSInt::compareValues(LHS, RHS) < 0;
+ }
+ };
+
+ llvm::DenseMap<StringRef, SourceLocation> StringKeys;
+ std::map<llvm::APSInt, SourceLocation, APSIntCompare> IntegralKeys;
+
+ auto checkOneKey = [&](auto &Map, const auto &Key, SourceLocation Loc) {
+ auto Pair = Map.insert({Key, Loc});
+ if (!Pair.second) {
+ S.Diag(Loc, diag::warn_nsdictionary_duplicate_key);
+ S.Diag(Pair.first->second, diag::note_nsdictionary_duplicate_key_here);
+ }
+ };
+
+ for (unsigned Idx = 0, End = Literal->getNumElements(); Idx != End; ++Idx) {
+ Expr *Key = Literal->getKeyValueElement(Idx).Key->IgnoreParenImpCasts();
+
+ if (auto *StrLit = dyn_cast<ObjCStringLiteral>(Key)) {
+ StringRef Bytes = StrLit->getString()->getBytes();
+ SourceLocation Loc = StrLit->getExprLoc();
+ checkOneKey(StringKeys, Bytes, Loc);
+ }
+
+ if (auto *BE = dyn_cast<ObjCBoxedExpr>(Key)) {
+ Expr *Boxed = BE->getSubExpr();
+ SourceLocation Loc = BE->getExprLoc();
+
+ // Check for @("foo").
+ if (auto *Str = dyn_cast<StringLiteral>(Boxed->IgnoreParenImpCasts())) {
+ checkOneKey(StringKeys, Str->getBytes(), Loc);
+ continue;
+ }
+
+ Expr::EvalResult Result;
+ if (Boxed->EvaluateAsInt(Result, S.getASTContext(),
+ Expr::SE_AllowSideEffects)) {
+ checkOneKey(IntegralKeys, Result.Val.getInt(), Loc);
+ }
+ }
+ }
+}
+
ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements) {
SourceLocation Loc = SR.getBegin();
@@ -1061,12 +1117,14 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
HasPackExpansions = true;
}
- QualType Ty
- = Context.getObjCObjectPointerType(
- Context.getObjCInterfaceType(NSDictionaryDecl));
- return MaybeBindToTemporary(ObjCDictionaryLiteral::Create(
- Context, Elements, HasPackExpansions, Ty,
- DictionaryWithObjectsMethod, SR));
+ QualType Ty = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSDictionaryDecl));
+
+ auto *Literal =
+ ObjCDictionaryLiteral::Create(Context, Elements, HasPackExpansions, Ty,
+ DictionaryWithObjectsMethod, SR);
+ CheckObjCDictionaryLiteralDuplicateKeys(*this, Literal);
+ return MaybeBindToTemporary(Literal);
}
ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
@@ -1170,33 +1228,66 @@ static void DiagnoseMismatchedSelectors(Sema &S, SourceLocation AtLoc,
}
}
-static void HelperToDiagnoseDirectSelectorsExpr(Sema &S, SourceLocation AtLoc,
- Selector Sel,
- ObjCMethodList &MethList,
- bool &onlyDirect) {
+static ObjCMethodDecl *LookupDirectMethodInMethodList(Sema &S, Selector Sel,
+ ObjCMethodList &MethList,
+ bool &onlyDirect,
+ bool &anyDirect) {
+ (void)Sel;
ObjCMethodList *M = &MethList;
- for (M = M->getNext(); M; M = M->getNext()) {
+ ObjCMethodDecl *DirectMethod = nullptr;
+ for (; M; M = M->getNext()) {
ObjCMethodDecl *Method = M->getMethod();
- if (Method->getSelector() != Sel)
+ if (!Method)
continue;
- if (!Method->isDirectMethod())
+ assert(Method->getSelector() == Sel && "Method with wrong selector in method list");
+ if (Method->isDirectMethod()) {
+ anyDirect = true;
+ DirectMethod = Method;
+ } else
onlyDirect = false;
}
+
+ return DirectMethod;
}
-static void DiagnoseDirectSelectorsExpr(Sema &S, SourceLocation AtLoc,
- Selector Sel, bool &onlyDirect) {
- for (Sema::GlobalMethodPool::iterator b = S.MethodPool.begin(),
- e = S.MethodPool.end(); b != e; b++) {
- // first, instance methods
- ObjCMethodList &InstMethList = b->second.first;
- HelperToDiagnoseDirectSelectorsExpr(S, AtLoc, Sel, InstMethList,
- onlyDirect);
+// Search the global pool for (potentially) direct methods matching the given
+// selector. If a non-direct method is found, set \param onlyDirect to false. If
+// a direct method is found, set \param anyDirect to true. Returns a direct
+// method, if any.
+static ObjCMethodDecl *LookupDirectMethodInGlobalPool(Sema &S, Selector Sel,
+ bool &onlyDirect,
+ bool &anyDirect) {
+ auto Iter = S.MethodPool.find(Sel);
+ if (Iter == S.MethodPool.end())
+ return nullptr;
- // second, class methods
- ObjCMethodList &ClsMethList = b->second.second;
- HelperToDiagnoseDirectSelectorsExpr(S, AtLoc, Sel, ClsMethList, onlyDirect);
- }
+ ObjCMethodDecl *DirectInstance = LookupDirectMethodInMethodList(
+ S, Sel, Iter->second.first, onlyDirect, anyDirect);
+ ObjCMethodDecl *DirectClass = LookupDirectMethodInMethodList(
+ S, Sel, Iter->second.second, onlyDirect, anyDirect);
+
+ return DirectInstance ? DirectInstance : DirectClass;
+}
+
+static ObjCMethodDecl *findMethodInCurrentClass(Sema &S, Selector Sel) {
+ auto *CurMD = S.getCurMethodDecl();
+ if (!CurMD)
+ return nullptr;
+ ObjCInterfaceDecl *IFace = CurMD->getClassInterface();
+
+ // The language enforce that only one direct method is present in a given
+ // class, so we just need to find one method in the current class to know
+ // whether Sel is potentially direct in this context.
+ if (ObjCMethodDecl *MD = IFace->lookupMethod(Sel, /*isInstance=*/true))
+ return MD;
+ if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*isInstance=*/true))
+ return MD;
+ if (ObjCMethodDecl *MD = IFace->lookupMethod(Sel, /*isInstance=*/false))
+ return MD;
+ if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*isInstance=*/false))
+ return MD;
+
+ return nullptr;
}
ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
@@ -1222,15 +1313,38 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
} else
Diag(SelLoc, diag::warn_undeclared_selector) << Sel;
} else {
- bool onlyDirect = Method->isDirectMethod();
- DiagnoseDirectSelectorsExpr(*this, AtLoc, Sel, onlyDirect);
DiagnoseMismatchedSelectors(*this, AtLoc, Method, LParenLoc, RParenLoc,
WarnMultipleSelectors);
+
+ bool onlyDirect = true;
+ bool anyDirect = false;
+ ObjCMethodDecl *GlobalDirectMethod =
+ LookupDirectMethodInGlobalPool(*this, Sel, onlyDirect, anyDirect);
+
if (onlyDirect) {
Diag(AtLoc, diag::err_direct_selector_expression)
<< Method->getSelector();
Diag(Method->getLocation(), diag::note_direct_method_declared_at)
<< Method->getDeclName();
+ } else if (anyDirect) {
+ // If we saw any direct methods, see if we see a direct member of the
+ // current class. If so, the @selector will likely be used to refer to
+ // this direct method.
+ ObjCMethodDecl *LikelyTargetMethod = findMethodInCurrentClass(*this, Sel);
+ if (LikelyTargetMethod && LikelyTargetMethod->isDirectMethod()) {
+ Diag(AtLoc, diag::warn_potentially_direct_selector_expression) << Sel;
+ Diag(LikelyTargetMethod->getLocation(),
+ diag::note_direct_method_declared_at)
+ << LikelyTargetMethod->getDeclName();
+ } else if (!LikelyTargetMethod) {
+ // Otherwise, emit the "strict" variant of this diagnostic, unless
+ // LikelyTargetMethod is non-direct.
+ Diag(AtLoc, diag::warn_strict_potentially_direct_selector_expression)
+ << Sel;
+ Diag(GlobalDirectMethod->getLocation(),
+ diag::note_direct_method_declared_at)
+ << GlobalDirectMethod->getDeclName();
+ }
}
}
@@ -1953,7 +2067,8 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (const ObjCPropertyDecl *PDecl = Setter->findPropertyDecl()) {
// Do not warn if user is using property-dot syntax to make call to
// user named setter.
- if (!(PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter))
+ if (!(PDecl->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_setter))
Diag(MemberLoc,
diag::warn_property_access_suggest)
<< MemberName << QualType(OPT, 0) << PDecl->getName()
@@ -2570,6 +2685,16 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
diag::err_illegal_message_expr_incomplete_type))
return ExprError();
+ if (Method && Method->isDirectMethod() && SuperLoc.isValid()) {
+ Diag(SuperLoc, diag::err_messaging_super_with_direct_method)
+ << FixItHint::CreateReplacement(
+ SuperLoc, getLangOpts().ObjCAutoRefCount
+ ? "self"
+ : Method->getClassInterface()->getName());
+ Diag(Method->getLocation(), diag::note_direct_method_declared_at)
+ << Method->getDeclName();
+ }
+
// Warn about explicit call of +initialize on its own class. But not on 'super'.
if (Method && Method->getMethodFamily() == OMF_initialize) {
if (!SuperLoc.isValid()) {
@@ -2774,9 +2899,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
ReceiverType->isIntegerType())) {
// Implicitly convert integers and pointers to 'id' but emit a warning.
// But not in ARC.
- Diag(Loc, diag::warn_bad_receiver_type)
- << ReceiverType
- << Receiver->getSourceRange();
+ Diag(Loc, diag::warn_bad_receiver_type) << ReceiverType << RecRange;
if (ReceiverType->isPointerType()) {
Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(),
CK_CPointerToObjCPointerCast).get();
@@ -2927,11 +3050,10 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// definition is found in a module that's not visible.
const ObjCInterfaceDecl *forwardClass = nullptr;
if (RequireCompleteType(Loc, OCIType->getPointeeType(),
- getLangOpts().ObjCAutoRefCount
- ? diag::err_arc_receiver_forward_instance
- : diag::warn_receiver_forward_instance,
- Receiver? Receiver->getSourceRange()
- : SourceRange(SuperLoc))) {
+ getLangOpts().ObjCAutoRefCount
+ ? diag::err_arc_receiver_forward_instance
+ : diag::warn_receiver_forward_instance,
+ RecRange)) {
if (getLangOpts().ObjCAutoRefCount)
return ExprError();
@@ -2993,8 +3115,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
return ExprError();
} else {
// Reject other random receiver types (e.g. structs).
- Diag(Loc, diag::err_bad_receiver_type)
- << ReceiverType << Receiver->getSourceRange();
+ Diag(Loc, diag::err_bad_receiver_type) << ReceiverType << RecRange;
return ExprError();
}
}
@@ -3012,15 +3133,35 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
<< Method->getDeclName();
}
- if (ReceiverType->isObjCClassType() && !isImplicit) {
- Diag(Receiver->getExprLoc(),
- diag::err_messaging_class_with_direct_method);
+ // Under ARC, self can't be assigned, and doing a direct call to `self`
+ // when it's a Class is hence safe. For other cases, we can't trust `self`
+ // is what we think it is, so we reject it.
+ if (ReceiverType->isObjCClassType() && !isImplicit &&
+ !(Receiver->isObjCSelfExpr() && getLangOpts().ObjCAutoRefCount)) {
+ {
+ DiagnosticBuilder Builder =
+ Diag(Receiver->getExprLoc(),
+ diag::err_messaging_class_with_direct_method);
+ if (Receiver->isObjCSelfExpr()) {
+ Builder.AddFixItHint(FixItHint::CreateReplacement(
+ RecRange, Method->getClassInterface()->getName()));
+ }
+ }
Diag(Method->getLocation(), diag::note_direct_method_declared_at)
<< Method->getDeclName();
}
if (SuperLoc.isValid()) {
- Diag(SuperLoc, diag::err_messaging_super_with_direct_method);
+ {
+ DiagnosticBuilder Builder =
+ Diag(SuperLoc, diag::err_messaging_super_with_direct_method);
+ if (ReceiverType->isObjCClassType()) {
+ Builder.AddFixItHint(FixItHint::CreateReplacement(
+ SuperLoc, Method->getClassInterface()->getName()));
+ } else {
+ Builder.AddFixItHint(FixItHint::CreateReplacement(SuperLoc, "self"));
+ }
+ }
Diag(Method->getLocation(), diag::note_direct_method_declared_at)
<< Method->getDeclName();
}
@@ -3232,7 +3373,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (!isImplicit && Method) {
if (const ObjCPropertyDecl *Prop = Method->findPropertyDecl()) {
bool IsWeak =
- Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak;
+ Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak;
if (!IsWeak && Sel.isUnarySelector())
IsWeak = ReturnType.getObjCLifetime() & Qualifiers::OCL_Weak;
if (IsWeak && !isUnevaluatedContext() &&
@@ -4337,7 +4478,7 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
// to 'NSString *', instead of falling through to report a "bridge cast"
// diagnostic.
if (castACTC == ACTC_retainable && exprACTC == ACTC_none &&
- ConversionToObjCStringLiteralCheck(castType, castExpr, Diagnose))
+ CheckConversionToObjCLiteral(castType, castExpr, Diagnose))
return ACR_error;
// Do not issue "bridge cast" diagnostic when implicit casting
@@ -4400,9 +4541,10 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) {
} else if (UnaryOperator *uo = dyn_cast<UnaryOperator>(e)) {
assert(uo->getOpcode() == UO_Extension);
Expr *sub = stripARCUnbridgedCast(uo->getSubExpr());
- return new (Context)
- UnaryOperator(sub, UO_Extension, sub->getType(), sub->getValueKind(),
- sub->getObjectKind(), uo->getOperatorLoc(), false);
+ return UnaryOperator::Create(Context, sub, UO_Extension, sub->getType(),
+ sub->getValueKind(), sub->getObjectKind(),
+ uo->getOperatorLoc(), false,
+ CurFPFeatureOverrides());
} else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
assert(!gse->isResultDependent());
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index 785637761e71..eb07de65d266 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Designator.h"
#include "clang/Sema/Initialization.h"
@@ -1092,7 +1093,7 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
auto *CXXRD = T->getAsCXXRecordDecl();
if (!VerifyOnly && CXXRD && CXXRD->hasUserDeclaredConstructor()) {
SemaRef.Diag(StructuredSubobjectInitList->getBeginLoc(),
- diag::warn_cxx2a_compat_aggregate_init_with_ctors)
+ diag::warn_cxx20_compat_aggregate_init_with_ctors)
<< StructuredSubobjectInitList->getSourceRange() << T;
}
}
@@ -1118,14 +1119,14 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
case InitializedEntity::EK_Parameter_CF_Audited:
case InitializedEntity::EK_Result:
// Extra braces here are suspicious.
- DiagID = diag::warn_braces_around_scalar_init;
+ DiagID = diag::warn_braces_around_init;
break;
case InitializedEntity::EK_Member:
// Warn on aggregate initialization but not on ctor init list or
// default member initializer.
if (Entity.getParent())
- DiagID = diag::warn_braces_around_scalar_init;
+ DiagID = diag::warn_braces_around_init;
break;
case InitializedEntity::EK_Variable:
@@ -1156,9 +1157,9 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
if (DiagID) {
S.Diag(Braces.getBegin(), DiagID)
- << Braces
- << FixItHint::CreateRemoval(Braces.getBegin())
- << FixItHint::CreateRemoval(Braces.getEnd());
+ << Entity.getType()->isSizelessBuiltinType() << Braces
+ << FixItHint::CreateRemoval(Braces.getBegin())
+ << FixItHint::CreateRemoval(Braces.getEnd());
}
}
@@ -1202,6 +1203,12 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
: diag::ext_excess_initializers_in_char_array_initializer;
SemaRef.Diag(IList->getInit(Index)->getBeginLoc(), DK)
<< IList->getInit(Index)->getSourceRange();
+ } else if (T->isSizelessBuiltinType()) {
+ unsigned DK = ExtraInitsIsError
+ ? diag::err_excess_initializers_for_sizeless_type
+ : diag::ext_excess_initializers_for_sizeless_type;
+ SemaRef.Diag(IList->getInit(Index)->getBeginLoc(), DK)
+ << T << IList->getInit(Index)->getSourceRange();
} else {
int initKind = T->isArrayType() ? 0 :
T->isVectorType() ? 1 :
@@ -1235,7 +1242,7 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
if (!HasEquivCtor) {
SemaRef.Diag(IList->getBeginLoc(),
- diag::warn_cxx2a_compat_aggregate_init_with_ctors)
+ diag::warn_cxx20_compat_aggregate_init_with_ctors)
<< IList->getSourceRange() << T;
}
}
@@ -1294,7 +1301,8 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
if (!VerifyOnly)
SemaRef.Diag(IList->getBeginLoc(), diag::err_init_objc_class) << DeclType;
hadError = true;
- } else if (DeclType->isOCLIntelSubgroupAVCType()) {
+ } else if (DeclType->isOCLIntelSubgroupAVCType() ||
+ DeclType->isSizelessBuiltinType()) {
// Checks for scalar type are sufficient for these types too.
CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
StructuredIndex);
@@ -1507,12 +1515,20 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
InitListExpr *StructuredList,
unsigned &StructuredIndex) {
if (Index >= IList->getNumInits()) {
- if (!VerifyOnly)
- SemaRef.Diag(IList->getBeginLoc(),
- SemaRef.getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_empty_scalar_initializer
- : diag::err_empty_scalar_initializer)
- << IList->getSourceRange();
+ if (!VerifyOnly) {
+ if (DeclType->isSizelessBuiltinType())
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_sizeless_initializer
+ : diag::err_empty_sizeless_initializer)
+ << DeclType << IList->getSourceRange();
+ else
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_scalar_initializer
+ : diag::err_empty_scalar_initializer)
+ << IList->getSourceRange();
+ }
hadError = !SemaRef.getLangOpts().CPlusPlus11;
++Index;
++StructuredIndex;
@@ -1524,17 +1540,18 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
// FIXME: This is invalid, and accepting it causes overload resolution
// to pick the wrong overload in some corner cases.
if (!VerifyOnly)
- SemaRef.Diag(SubIList->getBeginLoc(),
- diag::ext_many_braces_around_scalar_init)
- << SubIList->getSourceRange();
+ SemaRef.Diag(SubIList->getBeginLoc(), diag::ext_many_braces_around_init)
+ << DeclType->isSizelessBuiltinType() << SubIList->getSourceRange();
CheckScalarType(Entity, SubIList, DeclType, Index, StructuredList,
StructuredIndex);
return;
} else if (isa<DesignatedInitExpr>(expr)) {
if (!VerifyOnly)
- SemaRef.Diag(expr->getBeginLoc(), diag::err_designator_for_scalar_init)
- << DeclType << expr->getSourceRange();
+ SemaRef.Diag(expr->getBeginLoc(),
+ diag::err_designator_for_scalar_or_sizeless_init)
+ << DeclType->isSizelessBuiltinType() << DeclType
+ << expr->getSourceRange();
hadError = true;
++Index;
++StructuredIndex;
@@ -1621,7 +1638,7 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
expr = Result.getAs<Expr>();
// FIXME: Why are we updating the syntactic init list?
- if (!VerifyOnly)
+ if (!VerifyOnly && expr)
IList->setInit(Index, expr);
if (hadError)
@@ -3477,6 +3494,7 @@ bool InitializationSequence::isAmbiguous() const {
case FK_NonConstLValueReferenceBindingToTemporary:
case FK_NonConstLValueReferenceBindingToBitfield:
case FK_NonConstLValueReferenceBindingToVectorElement:
+ case FK_NonConstLValueReferenceBindingToMatrixElement:
case FK_NonConstLValueReferenceBindingToUnrelated:
case FK_RValueReferenceBindingToLValue:
case FK_ReferenceAddrspaceMismatchTemporary:
@@ -4420,16 +4438,20 @@ static void TryListInitialization(Sema &S,
// direct-list-initialization and copy-initialization otherwise.
// We can't use InitListChecker for this, because it always performs
// copy-initialization. This only matters if we might use an 'explicit'
- // conversion operator, so we only need to handle the cases where the source
- // is of record type.
- if (InitList->getInit(0)->getType()->isRecordType()) {
+ // conversion operator, or for the special case conversion of nullptr_t to
+ // bool, so we only need to handle those cases.
+ //
+ // FIXME: Why not do this in all cases?
+ Expr *Init = InitList->getInit(0);
+ if (Init->getType()->isRecordType() ||
+ (Init->getType()->isNullPtrType() && DestType->isBooleanType())) {
InitializationKind SubKind =
Kind.getKind() == InitializationKind::IK_DirectList
? InitializationKind::CreateDirect(Kind.getLocation(),
InitList->getLBraceLoc(),
InitList->getRBraceLoc())
: Kind;
- Expr *SubInit[1] = { InitList->getInit(0) };
+ Expr *SubInit[1] = { Init };
Sequence.InitializeFrom(S, Entity, SubKind, SubInit,
/*TopLevelOfInitList*/true,
TreatUnavailableAsInvalid);
@@ -4666,10 +4688,14 @@ static void TryReferenceInitialization(Sema &S,
/// which a reference can never bind). Attempting to bind a reference to
/// such a glvalue will always create a temporary.
static bool isNonReferenceableGLValue(Expr *E) {
- return E->refersToBitField() || E->refersToVectorElement();
+ return E->refersToBitField() || E->refersToVectorElement() ||
+ E->refersToMatrixElement();
}
/// Reference initialization without resolving overloaded functions.
+///
+/// We also can get here in C if we call a builtin which is declared as
+/// a function with a parameter of reference type (such as __builtin_va_end()).
static void TryReferenceInitializationCore(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -4746,15 +4772,20 @@ static void TryReferenceInitializationCore(Sema &S,
// an rvalue. DR1287 removed the "implicitly" here.
if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() &&
(isLValueRef || InitCategory.isRValue())) {
- ConvOvlResult = TryRefInitWithConversionFunction(
- S, Entity, Kind, Initializer, /*AllowRValues*/ isRValueRef,
- /*IsLValueRef*/ isLValueRef, Sequence);
- if (ConvOvlResult == OR_Success)
- return;
- if (ConvOvlResult != OR_No_Viable_Function)
- Sequence.SetOverloadFailure(
- InitializationSequence::FK_ReferenceInitOverloadFailed,
- ConvOvlResult);
+ if (S.getLangOpts().CPlusPlus) {
+ // Try conversion functions only for C++.
+ ConvOvlResult = TryRefInitWithConversionFunction(
+ S, Entity, Kind, Initializer, /*AllowRValues*/ isRValueRef,
+ /*IsLValueRef*/ isLValueRef, Sequence);
+ if (ConvOvlResult == OR_Success)
+ return;
+ if (ConvOvlResult != OR_No_Viable_Function)
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+ } else {
+ ConvOvlResult = OR_No_Viable_Function;
+ }
}
}
@@ -4787,6 +4818,9 @@ static void TryReferenceInitializationCore(Sema &S,
else if (Initializer->refersToVectorElement())
FK = InitializationSequence::
FK_NonConstLValueReferenceBindingToVectorElement;
+ else if (Initializer->refersToMatrixElement())
+ FK = InitializationSequence::
+ FK_NonConstLValueReferenceBindingToMatrixElement;
else
llvm_unreachable("unexpected kind of compatible initializer");
break;
@@ -4924,7 +4958,7 @@ static void TryReferenceInitializationCore(Sema &S,
ImplicitConversionSequence ICS
= S.TryImplicitConversion(Initializer, TempEntity.getType(),
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/false,
+ Sema::AllowedExplicit::None,
/*FIXME:InOverloadResolution=*/false,
/*CStyle=*/Kind.isCStyleOrFunctionalCast(),
/*AllowObjCWritebackConversion=*/false);
@@ -5620,7 +5654,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (S.CheckObjCBridgeRelatedConversions(Initializer->getBeginLoc(),
DestType, Initializer->getType(),
Initializer) ||
- S.ConversionToObjCStringLiteralCheck(DestType, Initializer))
+ S.CheckConversionToObjCLiteral(DestType, Initializer))
Args[0] = Initializer;
}
if (!isa<InitListExpr>(Initializer))
@@ -5854,6 +5888,19 @@ void InitializationSequence::InitializeFrom(Sema &S,
return;
}
+ // - Otherwise, if the initialization is direct-initialization, the source
+ // type is std::nullptr_t, and the destination type is bool, the initial
+ // value of the object being initialized is false.
+ if (!SourceType.isNull() && SourceType->isNullPtrType() &&
+ DestType->isBooleanType() &&
+ Kind.getKind() == InitializationKind::IK_Direct) {
+ AddConversionSequenceStep(
+ ImplicitConversionSequence::getNullptrToBool(SourceType, DestType,
+ Initializer->isGLValue()),
+ DestType);
+ return;
+ }
+
// - Otherwise, the initial value of the object being initialized is the
// (possibly converted) value of the initializer expression. Standard
// conversions (Clause 4) will be used, if necessary, to convert the
@@ -5863,7 +5910,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
ImplicitConversionSequence ICS
= S.TryImplicitConversion(Initializer, DestType,
/*SuppressUserConversions*/true,
- /*AllowExplicitConversions*/ false,
+ Sema::AllowedExplicit::None,
/*InOverloadResolution*/ false,
/*CStyle=*/Kind.isCStyleOrFunctionalCast(),
allowObjCWritebackConversion);
@@ -6416,12 +6463,14 @@ PerformConstructorInitialization(Sema &S,
}
S.MarkFunctionReferenced(Loc, Constructor);
- CurInit = CXXTemporaryObjectExpr::Create(
- S.Context, Constructor,
- Entity.getType().getNonLValueExprType(S.Context), TSInfo,
- ConstructorArgs, ParenOrBraceRange, HadMultipleCandidates,
- IsListInitialization, IsStdInitListInitialization,
- ConstructorInitRequiresZeroInit);
+ CurInit = S.CheckForImmediateInvocation(
+ CXXTemporaryObjectExpr::Create(
+ S.Context, Constructor,
+ Entity.getType().getNonLValueExprType(S.Context), TSInfo,
+ ConstructorArgs, ParenOrBraceRange, HadMultipleCandidates,
+ IsListInitialization, IsStdInitListInitialization,
+ ConstructorInitRequiresZeroInit),
+ Constructor);
} else {
CXXConstructExpr::ConstructionKind ConstructKind =
CXXConstructExpr::CK_Complete;
@@ -8159,9 +8208,13 @@ ExprResult InitializationSequence::Perform(Sema &S,
if (const auto *ToPtrType = Step->Type->getAs<PointerType>()) {
if (FromPtrType->getPointeeType()->hasAttr(attr::NoDeref) &&
!ToPtrType->getPointeeType()->hasAttr(attr::NoDeref)) {
- S.Diag(CurInit.get()->getExprLoc(),
- diag::warn_noderef_to_dereferenceable_pointer)
- << CurInit.get()->getSourceRange();
+ // Do not check static casts here because they are checked earlier
+ // in Sema::ActOnCXXNamedCast()
+ if (!Kind.isStaticCast()) {
+ S.Diag(CurInit.get()->getExprLoc(),
+ diag::warn_noderef_to_dereferenceable_pointer)
+ << CurInit.get()->getSourceRange();
+ }
}
}
}
@@ -8762,7 +8815,7 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_UTF8StringIntoPlainChar:
S.Diag(Kind.getLocation(),
diag::err_array_init_utf8_string_into_char)
- << S.getLangOpts().CPlusPlus2a;
+ << S.getLangOpts().CPlusPlus20;
break;
case FK_ArrayTypeMismatch:
case FK_NonConstantArrayInit:
@@ -8889,6 +8942,11 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Args[0]->getSourceRange();
break;
+ case FK_NonConstLValueReferenceBindingToMatrixElement:
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_to_matrix_element)
+ << DestType.isVolatileQualified() << Args[0]->getSourceRange();
+ break;
+
case FK_RValueReferenceBindingToLValue:
S.Diag(Kind.getLocation(), diag::err_lvalue_to_rvalue_ref)
<< DestType.getNonReferenceType() << OnlyArg->getType()
@@ -9234,6 +9292,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "non-const lvalue reference bound to vector element";
break;
+ case FK_NonConstLValueReferenceBindingToMatrixElement:
+ OS << "non-const lvalue reference bound to matrix element";
+ break;
+
case FK_NonConstLValueReferenceBindingToUnrelated:
OS << "non-const lvalue reference bound to unrelated type";
break;
diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp
index c2d14a44f53d..657ed13f207a 100644
--- a/clang/lib/Sema/SemaLambda.cpp
+++ b/clang/lib/Sema/SemaLambda.cpp
@@ -791,7 +791,8 @@ QualType Sema::buildLambdaInitCaptureInitialization(
// deduce against.
QualType DeductType = Context.getAutoDeductType();
TypeLocBuilder TLB;
- TLB.pushTypeSpec(DeductType).setNameLoc(Loc);
+ AutoTypeLoc TL = TLB.push<AutoTypeLoc>(DeductType);
+ TL.setNameLoc(Loc);
if (ByRef) {
DeductType = BuildReferenceType(DeductType, true, Loc, Id);
assert(!DeductType.isNull() && "can't build reference to auto");
@@ -799,7 +800,7 @@ QualType Sema::buildLambdaInitCaptureInitialization(
}
if (EllipsisLoc.isValid()) {
if (Init->containsUnexpandedParameterPack()) {
- Diag(EllipsisLoc, getLangOpts().CPlusPlus2a
+ Diag(EllipsisLoc, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_init_capture_pack
: diag::ext_init_capture_pack);
DeductType = Context.getPackExpansionType(DeductType, NumExpansions);
@@ -989,8 +990,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// Attributes on the lambda apply to the method.
ProcessDeclAttributes(CurScope, Method, ParamInfo);
- // CUDA lambdas get implicit attributes based on the scope in which they're
- // declared.
+ // CUDA lambdas get implicit host and device attributes.
if (getLangOpts().CUDA)
CUDASetLambdaAttrs(Method);
@@ -1052,8 +1052,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// "&identifier", "this", or "* this". [ Note: The form [&,this] is
// redundant but accepted for compatibility with ISO C++14. --end note ]
if (Intro.Default == LCD_ByCopy && C->Kind != LCK_StarThis)
- Diag(C->Loc, !getLangOpts().CPlusPlus2a
- ? diag::ext_equals_this_lambda_capture_cxx2a
+ Diag(C->Loc, !getLangOpts().CPlusPlus20
+ ? diag::ext_equals_this_lambda_capture_cxx20
: diag::warn_cxx17_compat_equals_this_lambda_capture);
// C++11 [expr.prim.lambda]p12:
@@ -1233,7 +1233,9 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// Enter a new evaluation context to insulate the lambda from any
// cleanups from the enclosing full-expression.
PushExpressionEvaluationContext(
- ExpressionEvaluationContext::PotentiallyEvaluated);
+ LSI->CallOperator->isConsteval()
+ ? ExpressionEvaluationContext::ConstantEvaluated
+ : ExpressionEvaluationContext::PotentiallyEvaluated);
}
void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
@@ -1626,7 +1628,8 @@ FieldDecl *Sema::BuildCaptureField(RecordDecl *RD,
// If the variable being captured has an invalid type, mark the class as
// invalid as well.
if (!FieldType->isDependentType()) {
- if (RequireCompleteType(Loc, FieldType, diag::err_field_incomplete)) {
+ if (RequireCompleteSizedType(Loc, FieldType,
+ diag::err_field_incomplete_or_sizeless)) {
RD->setInvalidDecl();
Field->setInvalidDecl();
} else {
@@ -1744,7 +1747,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
// Capturing 'this' implicitly with a default of '[=]' is deprecated,
// because it results in a reference capture. Don't warn prior to
// C++2a; there's nothing that can be done about it before then.
- if (getLangOpts().CPlusPlus2a && IsImplicit &&
+ if (getLangOpts().CPlusPlus20 && IsImplicit &&
CaptureDefault == LCD_ByCopy) {
Diag(From.getLocation(), diag::warn_deprecated_this_capture);
Diag(CaptureDefaultLoc, diag::note_deprecated_this_capture)
@@ -1776,8 +1779,13 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
BuildCaptureField(Class, From);
Captures.push_back(Capture);
CaptureInits.push_back(Init.get());
+
+ if (LangOpts.CUDA)
+ CUDACheckLambdaCapture(CallOperator, From);
}
+ Class->setCaptures(Captures);
+
// C++11 [expr.prim.lambda]p6:
// The closure type for a lambda-expression with no lambda-capture
// has a public non-virtual non-explicit const conversion function
@@ -1807,7 +1815,6 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
LambdaExpr *Lambda = LambdaExpr::Create(Context, Class, IntroducerRange,
CaptureDefault, CaptureDefaultLoc,
- Captures,
ExplicitParams, ExplicitResultType,
CaptureInits, EndLoc,
ContainsUnexpandedParameterPack);
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index 0ed51de0cc13..5757eaf3fac0 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -215,6 +215,7 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
case Sema::LookupOrdinaryName:
case Sema::LookupRedeclarationWithLinkage:
case Sema::LookupLocalFriendName:
+ case Sema::LookupDestructorName:
IDNS = Decl::IDNS_Ordinary;
if (CPlusPlus) {
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Member | Decl::IDNS_Namespace;
@@ -378,11 +379,14 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
// type), per a generous reading of C++ [dcl.typedef]p3 and p4. The typedef
// might carry additional semantic information, such as an alignment override.
// However, per C++ [dcl.typedef]p5, when looking up a tag name, prefer a tag
- // declaration over a typedef.
+ // declaration over a typedef. Also prefer a tag over a typedef for
+ // destructor name lookup because in some contexts we only accept a
+ // class-name in a destructor declaration.
if (DUnderlying->getCanonicalDecl() != EUnderlying->getCanonicalDecl()) {
assert(isa<TypeDecl>(DUnderlying) && isa<TypeDecl>(EUnderlying));
bool HaveTag = isa<TagDecl>(EUnderlying);
- bool WantTag = Kind == Sema::LookupTagName;
+ bool WantTag =
+ Kind == Sema::LookupTagName || Kind == Sema::LookupDestructorName;
return HaveTag != WantTag;
}
@@ -1149,73 +1153,14 @@ static bool isNamespaceOrTranslationUnitScope(Scope *S) {
return false;
}
-// Find the next outer declaration context from this scope. This
-// routine actually returns the semantic outer context, which may
-// differ from the lexical context (encoded directly in the Scope
-// stack) when we are parsing a member of a class template. In this
-// case, the second element of the pair will be true, to indicate that
-// name lookup should continue searching in this semantic context when
-// it leaves the current template parameter scope.
-static std::pair<DeclContext *, bool> findOuterContext(Scope *S) {
- DeclContext *DC = S->getEntity();
- DeclContext *Lexical = nullptr;
- for (Scope *OuterS = S->getParent(); OuterS;
- OuterS = OuterS->getParent()) {
- if (OuterS->getEntity()) {
- Lexical = OuterS->getEntity();
- break;
- }
- }
-
- // C++ [temp.local]p8:
- // In the definition of a member of a class template that appears
- // outside of the namespace containing the class template
- // definition, the name of a template-parameter hides the name of
- // a member of this namespace.
- //
- // Example:
- //
- // namespace N {
- // class C { };
- //
- // template<class T> class B {
- // void f(T);
- // };
- // }
- //
- // template<class C> void N::B<C>::f(C) {
- // C b; // C is the template parameter, not N::C
- // }
- //
- // In this example, the lexical context we return is the
- // TranslationUnit, while the semantic context is the namespace N.
- if (!Lexical || !DC || !S->getParent() ||
- !S->getParent()->isTemplateParamScope())
- return std::make_pair(Lexical, false);
-
- // Find the outermost template parameter scope.
- // For the example, this is the scope for the template parameters of
- // template<class C>.
- Scope *OutermostTemplateScope = S->getParent();
- while (OutermostTemplateScope->getParent() &&
- OutermostTemplateScope->getParent()->isTemplateParamScope())
- OutermostTemplateScope = OutermostTemplateScope->getParent();
-
- // Find the namespace context in which the original scope occurs. In
- // the example, this is namespace N.
- DeclContext *Semantic = DC;
- while (!Semantic->isFileContext())
- Semantic = Semantic->getParent();
-
- // Find the declaration context just outside of the template
- // parameter scope. This is the context in which the template is
- // being lexically declaration (a namespace context). In the
- // example, this is the global scope.
- if (Lexical->isFileContext() && !Lexical->Equals(Semantic) &&
- Lexical->Encloses(Semantic))
- return std::make_pair(Semantic, true);
-
- return std::make_pair(Lexical, false);
+/// Find the outer declaration context from this scope. This indicates the
+/// context that we should search up to (exclusive) before considering the
+/// parent of the specified scope.
+static DeclContext *findOuterContext(Scope *S) {
+ for (Scope *OuterS = S->getParent(); OuterS; OuterS = OuterS->getParent())
+ if (DeclContext *DC = OuterS->getLookupEntity())
+ return DC;
+ return nullptr;
}
namespace {
@@ -1282,13 +1227,11 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
UnqualUsingDirectiveSet UDirs(*this);
bool VisitedUsingDirectives = false;
bool LeftStartingScope = false;
- DeclContext *OutsideOfTemplateParamDC = nullptr;
// When performing a scope lookup, we want to find local extern decls.
FindLocalExternScope FindLocals(R);
for (; S && !isNamespaceOrTranslationUnitScope(S); S = S->getParent()) {
- DeclContext *Ctx = S->getEntity();
bool SearchNamespaceScope = true;
// Check whether the IdResolver has anything in this scope.
for (; I != IEnd && S->isDeclScope(*I); ++I) {
@@ -1320,7 +1263,8 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
if (!SearchNamespaceScope) {
R.resolveKind();
if (S->isClassScope())
- if (CXXRecordDecl *Record = dyn_cast_or_null<CXXRecordDecl>(Ctx))
+ if (CXXRecordDecl *Record =
+ dyn_cast_or_null<CXXRecordDecl>(S->getEntity()))
R.setNamingClass(Record);
return true;
}
@@ -1334,24 +1278,8 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
return false;
}
- if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
- S->getParent() && !S->getParent()->isTemplateParamScope()) {
- // We've just searched the last template parameter scope and
- // found nothing, so look into the contexts between the
- // lexical and semantic declaration contexts returned by
- // findOuterContext(). This implements the name lookup behavior
- // of C++ [temp.local]p8.
- Ctx = OutsideOfTemplateParamDC;
- OutsideOfTemplateParamDC = nullptr;
- }
-
- if (Ctx) {
- DeclContext *OuterCtx;
- bool SearchAfterTemplateScope;
- std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
- if (SearchAfterTemplateScope)
- OutsideOfTemplateParamDC = OuterCtx;
-
+ if (DeclContext *Ctx = S->getLookupEntity()) {
+ DeclContext *OuterCtx = findOuterContext(S);
for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) {
// We do not directly look into transparent contexts, since
// those entities will be found in the nearest enclosing
@@ -1476,25 +1404,9 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
return true;
}
- DeclContext *Ctx = S->getEntity();
- if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
- S->getParent() && !S->getParent()->isTemplateParamScope()) {
- // We've just searched the last template parameter scope and
- // found nothing, so look into the contexts between the
- // lexical and semantic declaration contexts returned by
- // findOuterContext(). This implements the name lookup behavior
- // of C++ [temp.local]p8.
- Ctx = OutsideOfTemplateParamDC;
- OutsideOfTemplateParamDC = nullptr;
- }
-
+ DeclContext *Ctx = S->getLookupEntity();
if (Ctx) {
- DeclContext *OuterCtx;
- bool SearchAfterTemplateScope;
- std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
- if (SearchAfterTemplateScope)
- OutsideOfTemplateParamDC = OuterCtx;
-
+ DeclContext *OuterCtx = findOuterContext(S);
for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) {
// We do not directly look into transparent contexts, since
// those entities will be found in the nearest enclosing
@@ -1575,7 +1487,9 @@ llvm::DenseSet<Module*> &Sema::getLookupModules() {
unsigned N = CodeSynthesisContexts.size();
for (unsigned I = CodeSynthesisContextLookupModules.size();
I != N; ++I) {
- Module *M = getDefiningModule(*this, CodeSynthesisContexts[I].Entity);
+ Module *M = CodeSynthesisContexts[I].Entity ?
+ getDefiningModule(*this, CodeSynthesisContexts[I].Entity) :
+ nullptr;
if (M && !LookupModulesCache.insert(M).second)
M = nullptr;
CodeSynthesisContextLookupModules.push_back(M);
@@ -1704,7 +1618,8 @@ bool Sema::hasVisibleMemberSpecialization(
/// path (by instantiating a template, you allow it to see the declarations that
/// your module can see, including those later on in your module).
bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
- assert(D->isHidden() && "should not call this: not in slow case");
+ assert(!D->isUnconditionallyVisible() &&
+ "should not call this: not in slow case");
Module *DeclModule = SemaRef.getOwningModule(D);
assert(DeclModule && "hidden decl has no owning module");
@@ -2295,6 +2210,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
case LookupMemberName:
case LookupRedeclarationWithLinkage:
case LookupLocalFriendName:
+ case LookupDestructorName:
BaseCallback = &CXXRecordDecl::FindOrdinaryMember;
break;
@@ -2959,7 +2875,9 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
// These are fundamental types.
case Type::Vector:
case Type::ExtVector:
+ case Type::ConstantMatrix:
case Type::Complex:
+ case Type::ExtInt:
break;
// Non-deduced auto types only get here for error cases.
@@ -3985,14 +3903,12 @@ private:
}
}
- // FIXME: C++ [temp.local]p8
- DeclContext *Entity = nullptr;
- if (S->getEntity()) {
+ DeclContext *Entity = S->getLookupEntity();
+ if (Entity) {
// Look into this scope's declaration context, along with any of its
// parent lookup contexts (e.g., enclosing classes), up to the point
// where we hit the context stored in the next outer scope.
- Entity = S->getEntity();
- DeclContext *OuterCtx = findOuterContext(S).first; // FIXME
+ DeclContext *OuterCtx = findOuterContext(S);
for (DeclContext *Ctx = Entity; Ctx && !Ctx->Equals(OuterCtx);
Ctx = Ctx->getLookupParent()) {
@@ -5158,9 +5074,9 @@ TypoExpr *Sema::CorrectTypoDelayed(
IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
if (!ExternalTypo && ED > 0 && Typo->getName().size() / ED < 3)
return nullptr;
-
ExprEvalContexts.back().NumTypos++;
- return createDelayedTypo(std::move(Consumer), std::move(TDG), std::move(TRC));
+ return createDelayedTypo(std::move(Consumer), std::move(TDG), std::move(TRC),
+ TypoName.getLoc());
}
void TypoCorrection::addCorrectionDecl(NamedDecl *CDecl) {
@@ -5342,9 +5258,8 @@ void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
/// Get a "quoted.h" or <angled.h> include path to use in a diagnostic
/// suggesting the addition of a #include of the specified file.
-static std::string getIncludeStringForHeader(Preprocessor &PP,
- const FileEntry *E,
- llvm::StringRef IncludingFile) {
+static std::string getHeaderNameForHeader(Preprocessor &PP, const FileEntry *E,
+ llvm::StringRef IncludingFile) {
bool IsSystem = false;
auto Path = PP.getHeaderSearchInfo().suggestPathToFileForDiagnostics(
E, IncludingFile, &IsSystem);
@@ -5358,25 +5273,10 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
assert(!Modules.empty());
auto NotePrevious = [&] {
- unsigned DiagID;
- switch (MIK) {
- case MissingImportKind::Declaration:
- DiagID = diag::note_previous_declaration;
- break;
- case MissingImportKind::Definition:
- DiagID = diag::note_previous_definition;
- break;
- case MissingImportKind::DefaultArgument:
- DiagID = diag::note_default_argument_declared_here;
- break;
- case MissingImportKind::ExplicitSpecialization:
- DiagID = diag::note_explicit_specialization_declared_here;
- break;
- case MissingImportKind::PartialSpecialization:
- DiagID = diag::note_partial_specialization_declared_here;
- break;
- }
- Diag(DeclLoc, DiagID);
+ // FIXME: Suppress the note backtrace even under
+ // -fdiagnostics-show-note-include-stack. We don't care how this
+ // declaration was previously reached.
+ Diag(DeclLoc, diag::note_unreachable_entity) << (int)MIK;
};
// Weed out duplicates from module list.
@@ -5389,26 +5289,24 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
UniqueModules.push_back(M);
}
- llvm::StringRef IncludingFile;
- if (const FileEntry *FE =
- SourceMgr.getFileEntryForID(SourceMgr.getFileID(UseLoc)))
- IncludingFile = FE->tryGetRealPathName();
+ // Try to find a suitable header-name to #include.
+ std::string HeaderName;
+ if (const FileEntry *Header =
+ PP.getHeaderToIncludeForDiagnostics(UseLoc, DeclLoc)) {
+ if (const FileEntry *FE =
+ SourceMgr.getFileEntryForID(SourceMgr.getFileID(UseLoc)))
+ HeaderName = getHeaderNameForHeader(PP, Header, FE->tryGetRealPathName());
+ }
- if (UniqueModules.empty()) {
- // All candidates were global module fragments. Try to suggest a #include.
- const FileEntry *E =
- PP.getModuleHeaderToIncludeForDiagnostics(UseLoc, Modules[0], DeclLoc);
+ // If we have a #include we should suggest, or if all definition locations
+ // were in global module fragments, don't suggest an import.
+ if (!HeaderName.empty() || UniqueModules.empty()) {
// FIXME: Find a smart place to suggest inserting a #include, and add
// a FixItHint there.
- Diag(UseLoc, diag::err_module_unimported_use_global_module_fragment)
- << (int)MIK << Decl << !!E
- << (E ? getIncludeStringForHeader(PP, E, IncludingFile) : "");
- // Produce a "previous" note if it will point to a header rather than some
- // random global module fragment.
- // FIXME: Suppress the note backtrace even under
- // -fdiagnostics-show-note-include-stack.
- if (E)
- NotePrevious();
+ Diag(UseLoc, diag::err_module_unimported_use_header)
+ << (int)MIK << Decl << !HeaderName.empty() << HeaderName;
+ // Produce a note showing where the entity was declared.
+ NotePrevious();
if (Recover)
createImplicitModuleImportForErrorRecovery(UseLoc, Modules[0]);
return;
@@ -5430,16 +5328,6 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
Diag(UseLoc, diag::err_module_unimported_use_multiple)
<< (int)MIK << Decl << ModuleList;
- } else if (const FileEntry *E = PP.getModuleHeaderToIncludeForDiagnostics(
- UseLoc, Modules[0], DeclLoc)) {
- // The right way to make the declaration visible is to include a header;
- // suggest doing so.
- //
- // FIXME: Find a smart place to suggest inserting a #include, and add
- // a FixItHint there.
- Diag(UseLoc, diag::err_module_unimported_use_header)
- << (int)MIK << Decl << Modules[0]->getFullModuleName()
- << getIncludeStringForHeader(PP, E, IncludingFile);
} else {
// FIXME: Add a FixItHint that imports the corresponding module.
Diag(UseLoc, diag::err_module_unimported_use)
@@ -5500,9 +5388,10 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction,
TypoExpr *Sema::createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
- TypoRecoveryCallback TRC) {
+ TypoRecoveryCallback TRC,
+ SourceLocation TypoLoc) {
assert(TCC && "createDelayedTypo requires a valid TypoCorrectionConsumer");
- auto TE = new (Context) TypoExpr(Context.DependentTy);
+ auto TE = new (Context) TypoExpr(Context.DependentTy, TypoLoc);
auto &State = DelayedTypos[TE];
State.Consumer = std::move(TCC);
State.DiagHandler = std::move(TDG);
diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp
index f6717f4cbe5e..e301c62dd2c0 100644
--- a/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/clang/lib/Sema/SemaObjCProperty.cpp
@@ -35,24 +35,23 @@ using namespace clang;
///
/// Returns OCL_None if the attributes as stated do not imply an ownership.
/// Never returns OCL_Autoreleasing.
-static Qualifiers::ObjCLifetime getImpliedARCOwnership(
- ObjCPropertyDecl::PropertyAttributeKind attrs,
- QualType type) {
+static Qualifiers::ObjCLifetime
+getImpliedARCOwnership(ObjCPropertyAttribute::Kind attrs, QualType type) {
// retain, strong, copy, weak, and unsafe_unretained are only legal
// on properties of retainable pointer type.
- if (attrs & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_copy)) {
+ if (attrs &
+ (ObjCPropertyAttribute::kind_retain | ObjCPropertyAttribute::kind_strong |
+ ObjCPropertyAttribute::kind_copy)) {
return Qualifiers::OCL_Strong;
- } else if (attrs & ObjCPropertyDecl::OBJC_PR_weak) {
+ } else if (attrs & ObjCPropertyAttribute::kind_weak) {
return Qualifiers::OCL_Weak;
- } else if (attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) {
+ } else if (attrs & ObjCPropertyAttribute::kind_unsafe_unretained) {
return Qualifiers::OCL_ExplicitNone;
}
// assign can appear on other types, so we have to check the
// property type.
- if (attrs & ObjCPropertyDecl::OBJC_PR_assign &&
+ if (attrs & ObjCPropertyAttribute::kind_assign &&
type->isObjCRetainableType()) {
return Qualifiers::OCL_ExplicitNone;
}
@@ -66,8 +65,7 @@ static void checkPropertyDeclWithOwnership(Sema &S,
ObjCPropertyDecl *property) {
if (property->isInvalidDecl()) return;
- ObjCPropertyDecl::PropertyAttributeKind propertyKind
- = property->getPropertyAttributes();
+ ObjCPropertyAttribute::Kind propertyKind = property->getPropertyAttributes();
Qualifiers::ObjCLifetime propertyLifetime
= property->getType().getObjCLifetime();
@@ -80,14 +78,14 @@ static void checkPropertyDeclWithOwnership(Sema &S,
// attribute. That's okay, but restore reasonable invariants by
// setting the property attribute according to the lifetime
// qualifier.
- ObjCPropertyDecl::PropertyAttributeKind attr;
+ ObjCPropertyAttribute::Kind attr;
if (propertyLifetime == Qualifiers::OCL_Strong) {
- attr = ObjCPropertyDecl::OBJC_PR_strong;
+ attr = ObjCPropertyAttribute::kind_strong;
} else if (propertyLifetime == Qualifiers::OCL_Weak) {
- attr = ObjCPropertyDecl::OBJC_PR_weak;
+ attr = ObjCPropertyAttribute::kind_weak;
} else {
assert(propertyLifetime == Qualifiers::OCL_ExplicitNone);
- attr = ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ attr = ObjCPropertyAttribute::kind_unsafe_unretained;
}
property->setPropertyAttributes(attr);
return;
@@ -130,18 +128,19 @@ CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
static unsigned deducePropertyOwnershipFromType(Sema &S, QualType T) {
// In GC mode, just look for the __weak qualifier.
if (S.getLangOpts().getGC() != LangOptions::NonGC) {
- if (T.isObjCGCWeak()) return ObjCDeclSpec::DQ_PR_weak;
+ if (T.isObjCGCWeak())
+ return ObjCPropertyAttribute::kind_weak;
- // In ARC/MRC, look for an explicit ownership qualifier.
- // For some reason, this only applies to __weak.
+ // In ARC/MRC, look for an explicit ownership qualifier.
+ // For some reason, this only applies to __weak.
} else if (auto ownership = T.getObjCLifetime()) {
switch (ownership) {
case Qualifiers::OCL_Weak:
- return ObjCDeclSpec::DQ_PR_weak;
+ return ObjCPropertyAttribute::kind_weak;
case Qualifiers::OCL_Strong:
- return ObjCDeclSpec::DQ_PR_strong;
+ return ObjCPropertyAttribute::kind_strong;
case Qualifiers::OCL_ExplicitNone:
- return ObjCDeclSpec::DQ_PR_unsafe_unretained;
+ return ObjCPropertyAttribute::kind_unsafe_unretained;
case Qualifiers::OCL_Autoreleasing:
case Qualifiers::OCL_None:
return 0;
@@ -153,22 +152,20 @@ static unsigned deducePropertyOwnershipFromType(Sema &S, QualType T) {
}
static const unsigned OwnershipMask =
- (ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_weak |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+ (ObjCPropertyAttribute::kind_assign | ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_copy | ObjCPropertyAttribute::kind_weak |
+ ObjCPropertyAttribute::kind_strong |
+ ObjCPropertyAttribute::kind_unsafe_unretained);
static unsigned getOwnershipRule(unsigned attr) {
unsigned result = attr & OwnershipMask;
// From an ownership perspective, assign and unsafe_unretained are
// identical; make sure one also implies the other.
- if (result & (ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained)) {
- result |= ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ if (result & (ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained)) {
+ result |= ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained;
}
return result;
@@ -183,15 +180,16 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC) {
unsigned Attributes = ODS.getPropertyAttributes();
- FD.D.setObjCWeakProperty((Attributes & ObjCDeclSpec::DQ_PR_weak) != 0);
+ FD.D.setObjCWeakProperty((Attributes & ObjCPropertyAttribute::kind_weak) !=
+ 0);
TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
QualType T = TSI->getType();
if (!getOwnershipRule(Attributes)) {
Attributes |= deducePropertyOwnershipFromType(*this, T);
}
- bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) ||
+ bool isReadWrite = ((Attributes & ObjCPropertyAttribute::kind_readwrite) ||
// default is readwrite!
- !(Attributes & ObjCDeclSpec::DQ_PR_readonly));
+ !(Attributes & ObjCPropertyAttribute::kind_readonly));
// Proceed with constructing the ObjCPropertyDecls.
ObjCContainerDecl *ClassDecl = cast<ObjCContainerDecl>(CurContext);
@@ -277,39 +275,39 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
return Res;
}
-static ObjCPropertyDecl::PropertyAttributeKind
+static ObjCPropertyAttribute::Kind
makePropertyAttributesAsWritten(unsigned Attributes) {
unsigned attributesAsWritten = 0;
- if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readonly;
- if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readwrite;
- if (Attributes & ObjCDeclSpec::DQ_PR_getter)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_getter;
- if (Attributes & ObjCDeclSpec::DQ_PR_setter)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_setter;
- if (Attributes & ObjCDeclSpec::DQ_PR_assign)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_assign;
- if (Attributes & ObjCDeclSpec::DQ_PR_retain)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_retain;
- if (Attributes & ObjCDeclSpec::DQ_PR_strong)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_strong;
- if (Attributes & ObjCDeclSpec::DQ_PR_weak)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_weak;
- if (Attributes & ObjCDeclSpec::DQ_PR_copy)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_copy;
- if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
- if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_nonatomic;
- if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_atomic;
- if (Attributes & ObjCDeclSpec::DQ_PR_class)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_class;
- if (Attributes & ObjCDeclSpec::DQ_PR_direct)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_direct;
-
- return (ObjCPropertyDecl::PropertyAttributeKind)attributesAsWritten;
+ if (Attributes & ObjCPropertyAttribute::kind_readonly)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_readonly;
+ if (Attributes & ObjCPropertyAttribute::kind_readwrite)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_readwrite;
+ if (Attributes & ObjCPropertyAttribute::kind_getter)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_getter;
+ if (Attributes & ObjCPropertyAttribute::kind_setter)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_setter;
+ if (Attributes & ObjCPropertyAttribute::kind_assign)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_assign;
+ if (Attributes & ObjCPropertyAttribute::kind_retain)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_retain;
+ if (Attributes & ObjCPropertyAttribute::kind_strong)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_strong;
+ if (Attributes & ObjCPropertyAttribute::kind_weak)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_weak;
+ if (Attributes & ObjCPropertyAttribute::kind_copy)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_copy;
+ if (Attributes & ObjCPropertyAttribute::kind_unsafe_unretained)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_unsafe_unretained;
+ if (Attributes & ObjCPropertyAttribute::kind_nonatomic)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_nonatomic;
+ if (Attributes & ObjCPropertyAttribute::kind_atomic)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_atomic;
+ if (Attributes & ObjCPropertyAttribute::kind_class)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_class;
+ if (Attributes & ObjCPropertyAttribute::kind_direct)
+ attributesAsWritten |= ObjCPropertyAttribute::kind_direct;
+
+ return (ObjCPropertyAttribute::Kind)attributesAsWritten;
}
static bool LocPropertyAttribute( ASTContext &Context, const char *attrName,
@@ -347,12 +345,10 @@ static void checkAtomicPropertyMismatch(Sema &S,
ObjCPropertyDecl *NewProperty,
bool PropagateAtomicity) {
// If the atomicity of both matches, we're done.
- bool OldIsAtomic =
- (OldProperty->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
- == 0;
- bool NewIsAtomic =
- (NewProperty->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
- == 0;
+ bool OldIsAtomic = (OldProperty->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_nonatomic) == 0;
+ bool NewIsAtomic = (NewProperty->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_nonatomic) == 0;
if (OldIsAtomic == NewIsAtomic) return;
// Determine whether the given property is readonly and implicitly
@@ -360,14 +356,16 @@ static void checkAtomicPropertyMismatch(Sema &S,
auto isImplicitlyReadonlyAtomic = [](ObjCPropertyDecl *Property) -> bool {
// Is it readonly?
auto Attrs = Property->getPropertyAttributes();
- if ((Attrs & ObjCPropertyDecl::OBJC_PR_readonly) == 0) return false;
+ if ((Attrs & ObjCPropertyAttribute::kind_readonly) == 0)
+ return false;
// Is it nonatomic?
- if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic) return false;
+ if (Attrs & ObjCPropertyAttribute::kind_nonatomic)
+ return false;
// Was 'atomic' specified directly?
if (Property->getPropertyAttributesAsWritten() &
- ObjCPropertyDecl::OBJC_PR_atomic)
+ ObjCPropertyAttribute::kind_atomic)
return false;
return true;
@@ -375,16 +373,16 @@ static void checkAtomicPropertyMismatch(Sema &S,
// If we're allowed to propagate atomicity, and the new property did
// not specify atomicity at all, propagate.
- const unsigned AtomicityMask =
- (ObjCPropertyDecl::OBJC_PR_atomic | ObjCPropertyDecl::OBJC_PR_nonatomic);
+ const unsigned AtomicityMask = (ObjCPropertyAttribute::kind_atomic |
+ ObjCPropertyAttribute::kind_nonatomic);
if (PropagateAtomicity &&
((NewProperty->getPropertyAttributesAsWritten() & AtomicityMask) == 0)) {
unsigned Attrs = NewProperty->getPropertyAttributes();
Attrs = Attrs & ~AtomicityMask;
if (OldIsAtomic)
- Attrs |= ObjCPropertyDecl::OBJC_PR_atomic;
+ Attrs |= ObjCPropertyAttribute::kind_atomic;
else
- Attrs |= ObjCPropertyDecl::OBJC_PR_nonatomic;
+ Attrs |= ObjCPropertyAttribute::kind_nonatomic;
NewProperty->overwritePropertyAttributes(Attrs);
return;
@@ -438,8 +436,9 @@ Sema::HandlePropertyInClassExtension(Scope *S,
return nullptr;
}
- bool isClassProperty = (AttributesAsWritten & ObjCDeclSpec::DQ_PR_class) ||
- (Attributes & ObjCDeclSpec::DQ_PR_class);
+ bool isClassProperty =
+ (AttributesAsWritten & ObjCPropertyAttribute::kind_class) ||
+ (Attributes & ObjCPropertyAttribute::kind_class);
// Find the property in the extended class's primary class or
// extensions.
@@ -464,11 +463,11 @@ Sema::HandlePropertyInClassExtension(Scope *S,
// This is a common error where the user often intended the original
// declaration to be readonly.
unsigned diag =
- (Attributes & ObjCDeclSpec::DQ_PR_readwrite) &&
- (PIDecl->getPropertyAttributesAsWritten() &
- ObjCPropertyDecl::OBJC_PR_readwrite)
- ? diag::err_use_continuation_class_redeclaration_readwrite
- : diag::err_use_continuation_class;
+ (Attributes & ObjCPropertyAttribute::kind_readwrite) &&
+ (PIDecl->getPropertyAttributesAsWritten() &
+ ObjCPropertyAttribute::kind_readwrite)
+ ? diag::err_use_continuation_class_redeclaration_readwrite
+ : diag::err_use_continuation_class;
Diag(AtLoc, diag)
<< CCPrimary->getDeclName();
Diag(PIDecl->getLocation(), diag::note_property_declare);
@@ -478,15 +477,15 @@ Sema::HandlePropertyInClassExtension(Scope *S,
// Check for consistency of getters.
if (PIDecl->getGetterName() != GetterSel) {
// If the getter was written explicitly, complain.
- if (AttributesAsWritten & ObjCDeclSpec::DQ_PR_getter) {
- Diag(AtLoc, diag::warn_property_redecl_getter_mismatch)
- << PIDecl->getGetterName() << GetterSel;
- Diag(PIDecl->getLocation(), diag::note_property_declare);
- }
+ if (AttributesAsWritten & ObjCPropertyAttribute::kind_getter) {
+ Diag(AtLoc, diag::warn_property_redecl_getter_mismatch)
+ << PIDecl->getGetterName() << GetterSel;
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
// Always adopt the getter from the original declaration.
GetterSel = PIDecl->getGetterName();
- Attributes |= ObjCDeclSpec::DQ_PR_getter;
+ Attributes |= ObjCPropertyAttribute::kind_getter;
}
// Check consistency of ownership.
@@ -505,9 +504,9 @@ Sema::HandlePropertyInClassExtension(Scope *S,
}
// If the redeclaration is 'weak' but the original property is not,
- if ((Attributes & ObjCPropertyDecl::OBJC_PR_weak) &&
- !(PIDecl->getPropertyAttributesAsWritten()
- & ObjCPropertyDecl::OBJC_PR_weak) &&
+ if ((Attributes & ObjCPropertyAttribute::kind_weak) &&
+ !(PIDecl->getPropertyAttributesAsWritten() &
+ ObjCPropertyAttribute::kind_weak) &&
PIDecl->getType()->getAs<ObjCObjectPointerType>() &&
PIDecl->getType().getObjCLifetime() == Qualifiers::OCL_None) {
Diag(AtLoc, diag::warn_property_implicitly_mismatched);
@@ -584,8 +583,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
// Property defaults to 'assign' if it is readwrite, unless this is ARC
// and the type is retainable.
bool isAssign;
- if (Attributes & (ObjCDeclSpec::DQ_PR_assign |
- ObjCDeclSpec::DQ_PR_unsafe_unretained)) {
+ if (Attributes & (ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained)) {
isAssign = true;
} else if (getOwnershipRule(Attributes) || !isReadWrite) {
isAssign = false;
@@ -596,8 +595,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
// Issue a warning if property is 'assign' as default and its
// object, which is gc'able conforms to NSCopying protocol
- if (getLangOpts().getGC() != LangOptions::NonGC &&
- isAssign && !(Attributes & ObjCDeclSpec::DQ_PR_assign)) {
+ if (getLangOpts().getGC() != LangOptions::NonGC && isAssign &&
+ !(Attributes & ObjCPropertyAttribute::kind_assign)) {
if (const ObjCObjectPointerType *ObjPtrTy =
T->getAs<ObjCObjectPointerType>()) {
ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface();
@@ -625,8 +624,9 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
PropertyId, AtLoc,
LParenLoc, T, TInfo);
- bool isClassProperty = (AttributesAsWritten & ObjCDeclSpec::DQ_PR_class) ||
- (Attributes & ObjCDeclSpec::DQ_PR_class);
+ bool isClassProperty =
+ (AttributesAsWritten & ObjCPropertyAttribute::kind_class) ||
+ (Attributes & ObjCPropertyAttribute::kind_class);
// Class property and instance property can have the same name.
if (ObjCPropertyDecl *prevDecl = ObjCPropertyDecl::findPropertyDecl(
DC, PropertyId, ObjCPropertyDecl::getQueryKind(isClassProperty))) {
@@ -654,68 +654,68 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
PDecl->setPropertyAttributesAsWritten(
makePropertyAttributesAsWritten(AttributesAsWritten));
- if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
+ if (Attributes & ObjCPropertyAttribute::kind_readonly)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_readonly);
- if (Attributes & ObjCDeclSpec::DQ_PR_getter)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_getter);
+ if (Attributes & ObjCPropertyAttribute::kind_getter)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_getter);
- if (Attributes & ObjCDeclSpec::DQ_PR_setter)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_setter);
+ if (Attributes & ObjCPropertyAttribute::kind_setter)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_setter);
if (isReadWrite)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_readwrite);
- if (Attributes & ObjCDeclSpec::DQ_PR_retain)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
+ if (Attributes & ObjCPropertyAttribute::kind_retain)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_retain);
- if (Attributes & ObjCDeclSpec::DQ_PR_strong)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ if (Attributes & ObjCPropertyAttribute::kind_strong)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
- if (Attributes & ObjCDeclSpec::DQ_PR_weak)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+ if (Attributes & ObjCPropertyAttribute::kind_weak)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_weak);
- if (Attributes & ObjCDeclSpec::DQ_PR_copy)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
+ if (Attributes & ObjCPropertyAttribute::kind_copy)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_copy);
- if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+ if (Attributes & ObjCPropertyAttribute::kind_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_unsafe_unretained);
if (isAssign)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_assign);
// In the semantic attributes, one of nonatomic or atomic is always set.
- if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic);
+ if (Attributes & ObjCPropertyAttribute::kind_nonatomic)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_nonatomic);
else
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_atomic);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_atomic);
// 'unsafe_unretained' is alias for 'assign'.
- if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+ if (Attributes & ObjCPropertyAttribute::kind_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_assign);
if (isAssign)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_unsafe_unretained);
if (MethodImplKind == tok::objc_required)
PDecl->setPropertyImplementation(ObjCPropertyDecl::Required);
else if (MethodImplKind == tok::objc_optional)
PDecl->setPropertyImplementation(ObjCPropertyDecl::Optional);
- if (Attributes & ObjCDeclSpec::DQ_PR_nullability)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nullability);
+ if (Attributes & ObjCPropertyAttribute::kind_nullability)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_nullability);
- if (Attributes & ObjCDeclSpec::DQ_PR_null_resettable)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_null_resettable);
+ if (Attributes & ObjCPropertyAttribute::kind_null_resettable)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_null_resettable);
- if (Attributes & ObjCDeclSpec::DQ_PR_class)
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_class);
+ if (Attributes & ObjCPropertyAttribute::kind_class)
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_class);
- if ((Attributes & ObjCDeclSpec::DQ_PR_direct) ||
+ if ((Attributes & ObjCPropertyAttribute::kind_direct) ||
CDecl->hasAttr<ObjCDirectMembersAttr>()) {
if (isa<ObjCProtocolDecl>(CDecl)) {
Diag(PDecl->getLocation(), diag::err_objc_direct_on_protocol) << true;
} else if (getLangOpts().ObjCRuntime.allowsDirectDispatch()) {
- PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_direct);
+ PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_direct);
} else {
Diag(PDecl->getLocation(), diag::warn_objc_direct_property_ignored)
<< PDecl->getDeclName();
@@ -781,10 +781,9 @@ static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc,
case Qualifiers::OCL_ExplicitNone:
S.Diag(ivar->getLocation(), diag::err_arc_assign_property_ownership)
- << property->getDeclName()
- << ivar->getDeclName()
- << ((property->getPropertyAttributesAsWritten()
- & ObjCPropertyDecl::OBJC_PR_assign) != 0);
+ << property->getDeclName() << ivar->getDeclName()
+ << ((property->getPropertyAttributesAsWritten() &
+ ObjCPropertyAttribute::kind_assign) != 0);
break;
case Qualifiers::OCL_Autoreleasing:
@@ -815,21 +814,20 @@ static void setImpliedPropertyAttributeForReadOnlyProperty(
if (!ivar) {
// if no backing ivar, make property 'strong'.
- property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ property->setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
return;
}
// property assumes owenership of backing ivar.
QualType ivarType = ivar->getType();
Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime();
if (ivarLifetime == Qualifiers::OCL_Strong)
- property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ property->setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
else if (ivarLifetime == Qualifiers::OCL_Weak)
- property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+ property->setPropertyAttributes(ObjCPropertyAttribute::kind_weak);
}
-static bool
-isIncompatiblePropertyAttribute(unsigned Attr1, unsigned Attr2,
- ObjCPropertyDecl::PropertyAttributeKind Kind) {
+static bool isIncompatiblePropertyAttribute(unsigned Attr1, unsigned Attr2,
+ ObjCPropertyAttribute::Kind Kind) {
return (Attr1 & Kind) != (Attr2 & Kind);
}
@@ -912,30 +910,31 @@ SelectPropertyForSynthesisFromProtocols(Sema &S, SourceLocation AtLoc,
};
// The ownership might be incompatible unless the property has no explicit
// ownership.
- bool HasOwnership = (Attr & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong |
- ObjCPropertyDecl::OBJC_PR_copy |
- ObjCPropertyDecl::OBJC_PR_assign |
- ObjCPropertyDecl::OBJC_PR_unsafe_unretained |
- ObjCPropertyDecl::OBJC_PR_weak)) != 0;
+ bool HasOwnership =
+ (Attr & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong |
+ ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_assign |
+ ObjCPropertyAttribute::kind_unsafe_unretained |
+ ObjCPropertyAttribute::kind_weak)) != 0;
if (HasOwnership &&
isIncompatiblePropertyAttribute(OriginalAttributes, Attr,
- ObjCPropertyDecl::OBJC_PR_copy)) {
- Diag(OriginalAttributes & ObjCPropertyDecl::OBJC_PR_copy, "copy");
+ ObjCPropertyAttribute::kind_copy)) {
+ Diag(OriginalAttributes & ObjCPropertyAttribute::kind_copy, "copy");
continue;
}
if (HasOwnership && areIncompatiblePropertyAttributes(
OriginalAttributes, Attr,
- ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong)) {
- Diag(OriginalAttributes & (ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong),
+ ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong)) {
+ Diag(OriginalAttributes & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong),
"retain (or strong)");
continue;
}
if (isIncompatiblePropertyAttribute(OriginalAttributes, Attr,
- ObjCPropertyDecl::OBJC_PR_atomic)) {
- Diag(OriginalAttributes & ObjCPropertyDecl::OBJC_PR_atomic, "atomic");
+ ObjCPropertyAttribute::kind_atomic)) {
+ Diag(OriginalAttributes & ObjCPropertyAttribute::kind_atomic, "atomic");
continue;
}
}
@@ -1126,8 +1125,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
return nullptr;
}
unsigned PIkind = property->getPropertyAttributesAsWritten();
- if ((PIkind & (ObjCPropertyDecl::OBJC_PR_atomic |
- ObjCPropertyDecl::OBJC_PR_nonatomic) ) == 0) {
+ if ((PIkind & (ObjCPropertyAttribute::kind_atomic |
+ ObjCPropertyAttribute::kind_nonatomic)) == 0) {
if (AtLoc.isValid())
Diag(AtLoc, diag::warn_implicit_atomic_property);
else
@@ -1143,10 +1142,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
return nullptr;
}
}
- if (Synthesize&&
- (PIkind & ObjCPropertyDecl::OBJC_PR_readonly) &&
- property->hasAttr<IBOutletAttr>() &&
- !AtLoc.isValid()) {
+ if (Synthesize && (PIkind & ObjCPropertyAttribute::kind_readonly) &&
+ property->hasAttr<IBOutletAttr>() && !AtLoc.isValid()) {
bool ReadWriteProperty = false;
// Search into the class extensions and see if 'readonly property is
// redeclared 'readwrite', then no warning is to be issued.
@@ -1155,7 +1152,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (!R.empty())
if (ObjCPropertyDecl *ExtProp = dyn_cast<ObjCPropertyDecl>(R[0])) {
PIkind = ExtProp->getPropertyAttributesAsWritten();
- if (PIkind & ObjCPropertyDecl::OBJC_PR_readwrite) {
+ if (PIkind & ObjCPropertyAttribute::kind_readwrite) {
ReadWriteProperty = true;
break;
}
@@ -1232,16 +1229,15 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (getLangOpts().ObjCAutoRefCount &&
(property->getPropertyAttributesAsWritten() &
- ObjCPropertyDecl::OBJC_PR_readonly) &&
+ ObjCPropertyAttribute::kind_readonly) &&
PropertyIvarType->isObjCRetainableType()) {
setImpliedPropertyAttributeForReadOnlyProperty(property, Ivar);
}
- ObjCPropertyDecl::PropertyAttributeKind kind
- = property->getPropertyAttributes();
+ ObjCPropertyAttribute::Kind kind = property->getPropertyAttributes();
bool isARCWeak = false;
- if (kind & ObjCPropertyDecl::OBJC_PR_weak) {
+ if (kind & ObjCPropertyAttribute::kind_weak) {
// Add GC __weak to the ivar type if the property is weak.
if (getLangOpts().getGC() != LangOptions::NonGC) {
assert(!getLangOpts().ObjCAutoRefCount);
@@ -1312,7 +1308,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// It's an error if we have to do this and the user didn't
// explicitly write an ownership attribute on the property.
if (!hasWrittenStorageAttribute(property, QueryKind) &&
- !(kind & ObjCPropertyDecl::OBJC_PR_strong)) {
+ !(kind & ObjCPropertyAttribute::kind_strong)) {
Diag(PropertyDiagLoc,
diag::err_arc_objc_property_default_assign_on_object);
Diag(property->getLocation(), diag::note_property_declare);
@@ -1456,7 +1452,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
PropertyLoc);
PIDecl->setGetterMethodDecl(OMD);
}
-
+
if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr &&
Ivar->getType()->isRecordType()) {
// For Objective-C++, need to synthesize the AST for the IVAR object to be
@@ -1551,7 +1547,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
ExprResult Res = BuildBinOp(S, PropertyDiagLoc,
BO_Assign, lhs, rhs);
if (property->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_atomic) {
+ ObjCPropertyAttribute::kind_atomic) {
Expr *callExpr = Res.getAs<Expr>();
if (const CXXOperatorCallExpr *CXXCE =
dyn_cast_or_null<CXXOperatorCallExpr>(callExpr))
@@ -1627,6 +1623,15 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
CatImplClass->addPropertyImplementation(PIDecl);
}
+ if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic &&
+ PIDecl->getPropertyDecl() &&
+ PIDecl->getPropertyDecl()->isDirectProperty()) {
+ Diag(PropertyLoc, diag::err_objc_direct_dynamic_property);
+ Diag(PIDecl->getPropertyDecl()->getLocation(),
+ diag::note_previous_declaration);
+ return nullptr;
+ }
+
return PIDecl;
}
@@ -1642,10 +1647,8 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *inheritedName,
bool OverridingProtocolProperty) {
- ObjCPropertyDecl::PropertyAttributeKind CAttr =
- Property->getPropertyAttributes();
- ObjCPropertyDecl::PropertyAttributeKind SAttr =
- SuperProperty->getPropertyAttributes();
+ ObjCPropertyAttribute::Kind CAttr = Property->getPropertyAttributes();
+ ObjCPropertyAttribute::Kind SAttr = SuperProperty->getPropertyAttributes();
// We allow readonly properties without an explicit ownership
// (assign/unsafe_unretained/weak/retain/strong/copy) in super class
@@ -1654,21 +1657,19 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
!getOwnershipRule(SAttr) && getOwnershipRule(CAttr))
;
else {
- if ((CAttr & ObjCPropertyDecl::OBJC_PR_readonly)
- && (SAttr & ObjCPropertyDecl::OBJC_PR_readwrite))
+ if ((CAttr & ObjCPropertyAttribute::kind_readonly) &&
+ (SAttr & ObjCPropertyAttribute::kind_readwrite))
Diag(Property->getLocation(), diag::warn_readonly_property)
<< Property->getDeclName() << inheritedName;
- if ((CAttr & ObjCPropertyDecl::OBJC_PR_copy)
- != (SAttr & ObjCPropertyDecl::OBJC_PR_copy))
+ if ((CAttr & ObjCPropertyAttribute::kind_copy) !=
+ (SAttr & ObjCPropertyAttribute::kind_copy))
Diag(Property->getLocation(), diag::warn_property_attribute)
<< Property->getDeclName() << "copy" << inheritedName;
- else if (!(SAttr & ObjCPropertyDecl::OBJC_PR_readonly)){
- unsigned CAttrRetain =
- (CAttr &
- (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
- unsigned SAttrRetain =
- (SAttr &
- (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
+ else if (!(SAttr & ObjCPropertyAttribute::kind_readonly)) {
+ unsigned CAttrRetain = (CAttr & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong));
+ unsigned SAttrRetain = (SAttr & (ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong));
bool CStrong = (CAttrRetain != 0);
bool SStrong = (SAttrRetain != 0);
if (CStrong != SStrong)
@@ -1876,7 +1877,7 @@ static bool SuperClassImplementsProperty(ObjCInterfaceDecl *IDecl,
ObjCPropertyDecl *Prop) {
bool SuperClassImplementsGetter = false;
bool SuperClassImplementsSetter = false;
- if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly)
+ if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_readonly)
SuperClassImplementsSetter = true;
while (IDecl->getSuperClass()) {
@@ -1919,7 +1920,7 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
continue;
ObjCMethodDecl *ImpMethod = IMPDecl->getInstanceMethod(Prop->getGetterName());
if (ImpMethod && !ImpMethod->getBody()) {
- if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly)
+ if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_readonly)
continue;
ImpMethod = IMPDecl->getInstanceMethod(Prop->getSetterName());
if (ImpMethod && !ImpMethod->getBody())
@@ -1956,16 +1957,16 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
}
// If property to be implemented in the super class, ignore.
if (PropInSuperClass) {
- if ((Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite) &&
+ if ((Prop->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_readwrite) &&
(PropInSuperClass->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_readonly) &&
+ ObjCPropertyAttribute::kind_readonly) &&
!IMPDecl->getInstanceMethod(Prop->getSetterName()) &&
!IDecl->HasUserDeclaredSetterMethod(Prop)) {
Diag(Prop->getLocation(), diag::warn_no_autosynthesis_property)
<< Prop->getIdentifier();
Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
- }
- else {
+ } else {
Diag(Prop->getLocation(), diag::warn_autosynthesis_property_in_superclass)
<< Prop->getIdentifier();
Diag(PropInSuperClass->getLocation(), diag::note_property_declare);
@@ -2152,12 +2153,11 @@ void Sema::diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl)
const auto *property = propertyImpl->getPropertyDecl();
// Warn about null_resettable properties with synthesized setters,
// because the setter won't properly handle nil.
- if (propertyImpl->getPropertyImplementation()
- == ObjCPropertyImplDecl::Synthesize &&
+ if (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize &&
(property->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_null_resettable) &&
- property->getGetterMethodDecl() &&
- property->getSetterMethodDecl()) {
+ ObjCPropertyAttribute::kind_null_resettable) &&
+ property->getGetterMethodDecl() && property->getSetterMethodDecl()) {
auto *getterImpl = propertyImpl->getGetterMethodDecl();
auto *setterImpl = propertyImpl->getSetterMethodDecl();
if ((!getterImpl || getterImpl->isSynthesizedAccessorStub()) &&
@@ -2195,8 +2195,8 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
unsigned Attributes = Property->getPropertyAttributes();
unsigned AttributesAsWritten = Property->getPropertyAttributesAsWritten();
- if (!(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic) &&
- !(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_nonatomic)) {
+ if (!(AttributesAsWritten & ObjCPropertyAttribute::kind_atomic) &&
+ !(AttributesAsWritten & ObjCPropertyAttribute::kind_nonatomic)) {
GetterMethod = Property->isClassProperty() ?
IMPDecl->getClassMethod(Property->getGetterName()) :
IMPDecl->getInstanceMethod(Property->getGetterName());
@@ -2222,8 +2222,8 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
}
// We only care about readwrite atomic property.
- if ((Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) ||
- !(Attributes & ObjCPropertyDecl::OBJC_PR_readwrite))
+ if ((Attributes & ObjCPropertyAttribute::kind_nonatomic) ||
+ !(Attributes & ObjCPropertyAttribute::kind_readwrite))
continue;
if (const ObjCPropertyImplDecl *PIDecl = IMPDecl->FindPropertyImplDecl(
Property->getIdentifier(), Property->getQueryKind())) {
@@ -2244,7 +2244,7 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
<< (SetterMethod != nullptr);
// fixit stuff.
if (Property->getLParenLoc().isValid() &&
- !(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic)) {
+ !(AttributesAsWritten & ObjCPropertyAttribute::kind_atomic)) {
// @property () ... case.
SourceLocation AfterLParen =
getLocForEndOfToken(Property->getLParenLoc());
@@ -2260,8 +2260,7 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
Diag(Property->getLocation(),
diag::note_atomic_property_fixup_suggest)
<< FixItHint::CreateInsertion(startLoc, "(nonatomic) ");
- }
- else
+ } else
Diag(MethodLoc, diag::note_atomic_property_fixup_suggest);
Diag(Property->getLocation(), diag::note_property_declare);
}
@@ -2421,6 +2420,40 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
DiagnosePropertyAccessorMismatch(property, GetterMethod,
property->getLocation());
+ // synthesizing accessors must not result in a direct method that is not
+ // monomorphic
+ if (!GetterMethod) {
+ if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CD)) {
+ auto *ExistingGetter = CatDecl->getClassInterface()->lookupMethod(
+ property->getGetterName(), !IsClassProperty, true, false, CatDecl);
+ if (ExistingGetter) {
+ if (ExistingGetter->isDirectMethod() || property->isDirectProperty()) {
+ Diag(property->getLocation(), diag::err_objc_direct_duplicate_decl)
+ << property->isDirectProperty() << 1 /* property */
+ << ExistingGetter->isDirectMethod()
+ << ExistingGetter->getDeclName();
+ Diag(ExistingGetter->getLocation(), diag::note_previous_declaration);
+ }
+ }
+ }
+ }
+
+ if (!property->isReadOnly() && !SetterMethod) {
+ if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CD)) {
+ auto *ExistingSetter = CatDecl->getClassInterface()->lookupMethod(
+ property->getSetterName(), !IsClassProperty, true, false, CatDecl);
+ if (ExistingSetter) {
+ if (ExistingSetter->isDirectMethod() || property->isDirectProperty()) {
+ Diag(property->getLocation(), diag::err_objc_direct_duplicate_decl)
+ << property->isDirectProperty() << 1 /* property */
+ << ExistingSetter->isDirectMethod()
+ << ExistingSetter->getDeclName();
+ Diag(ExistingSetter->getLocation(), diag::note_previous_declaration);
+ }
+ }
+ }
+ }
+
if (!property->isReadOnly() && SetterMethod) {
if (Context.getCanonicalType(SetterMethod->getReturnType()) !=
Context.VoidTy)
@@ -2455,7 +2488,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
// If the property is null_resettable, the getter returns nonnull.
if (property->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_null_resettable) {
+ ObjCPropertyAttribute::kind_null_resettable) {
QualType modifiedTy = resultTy;
if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)) {
if (*nullability == NullabilityKind::Unspecified)
@@ -2534,7 +2567,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
// If the property is null_resettable, the setter accepts a
// nullable value.
if (property->getPropertyAttributes() &
- ObjCPropertyDecl::OBJC_PR_null_resettable) {
+ ObjCPropertyAttribute::kind_null_resettable) {
QualType modifiedTy = paramTy;
if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)){
if (*nullability == NullabilityKind::Unspecified)
@@ -2622,8 +2655,8 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
if (!PDecl || PDecl->isInvalidDecl())
return;
- if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- (Attributes & ObjCDeclSpec::DQ_PR_readwrite))
+ if ((Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ (Attributes & ObjCPropertyAttribute::kind_readwrite))
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "readonly" << "readwrite";
@@ -2631,104 +2664,109 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
QualType PropertyTy = PropertyDecl->getType();
// Check for copy or retain on non-object types.
- if ((Attributes & (ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong)) &&
+ if ((Attributes &
+ (ObjCPropertyAttribute::kind_weak | ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong)) &&
!PropertyTy->isObjCRetainableType() &&
!PropertyDecl->hasAttr<ObjCNSObjectAttr>()) {
Diag(Loc, diag::err_objc_property_requires_object)
- << (Attributes & ObjCDeclSpec::DQ_PR_weak ? "weak" :
- Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain (or strong)");
- Attributes &= ~(ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong);
+ << (Attributes & ObjCPropertyAttribute::kind_weak
+ ? "weak"
+ : Attributes & ObjCPropertyAttribute::kind_copy
+ ? "copy"
+ : "retain (or strong)");
+ Attributes &=
+ ~(ObjCPropertyAttribute::kind_weak | ObjCPropertyAttribute::kind_copy |
+ ObjCPropertyAttribute::kind_retain |
+ ObjCPropertyAttribute::kind_strong);
PropertyDecl->setInvalidDecl();
}
// Check for assign on object types.
- if ((Attributes & ObjCDeclSpec::DQ_PR_assign) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) &&
+ if ((Attributes & ObjCPropertyAttribute::kind_assign) &&
+ !(Attributes & ObjCPropertyAttribute::kind_unsafe_unretained) &&
PropertyTy->isObjCRetainableType() &&
!PropertyTy->isObjCARCImplicitlyUnretainedType()) {
Diag(Loc, diag::warn_objc_property_assign_on_object);
}
// Check for more than one of { assign, copy, retain }.
- if (Attributes & ObjCDeclSpec::DQ_PR_assign) {
- if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ if (Attributes & ObjCPropertyAttribute::kind_assign) {
+ if (Attributes & ObjCPropertyAttribute::kind_copy) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "assign" << "copy";
- Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ Attributes &= ~ObjCPropertyAttribute::kind_copy;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ if (Attributes & ObjCPropertyAttribute::kind_retain) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "assign" << "retain";
- Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ Attributes &= ~ObjCPropertyAttribute::kind_retain;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ if (Attributes & ObjCPropertyAttribute::kind_strong) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "assign" << "strong";
- Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ Attributes &= ~ObjCPropertyAttribute::kind_strong;
}
- if (getLangOpts().ObjCAutoRefCount &&
- (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCPropertyAttribute::kind_weak)) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "assign" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ Attributes &= ~ObjCPropertyAttribute::kind_weak;
}
if (PropertyDecl->hasAttr<IBOutletCollectionAttr>())
Diag(Loc, diag::warn_iboutletcollection_property_assign);
- } else if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) {
- if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ } else if (Attributes & ObjCPropertyAttribute::kind_unsafe_unretained) {
+ if (Attributes & ObjCPropertyAttribute::kind_copy) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "unsafe_unretained" << "copy";
- Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ Attributes &= ~ObjCPropertyAttribute::kind_copy;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ if (Attributes & ObjCPropertyAttribute::kind_retain) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "unsafe_unretained" << "retain";
- Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ Attributes &= ~ObjCPropertyAttribute::kind_retain;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ if (Attributes & ObjCPropertyAttribute::kind_strong) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "unsafe_unretained" << "strong";
- Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ Attributes &= ~ObjCPropertyAttribute::kind_strong;
}
- if (getLangOpts().ObjCAutoRefCount &&
- (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCPropertyAttribute::kind_weak)) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "unsafe_unretained" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ Attributes &= ~ObjCPropertyAttribute::kind_weak;
}
- } else if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
- if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ } else if (Attributes & ObjCPropertyAttribute::kind_copy) {
+ if (Attributes & ObjCPropertyAttribute::kind_retain) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "copy" << "retain";
- Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ Attributes &= ~ObjCPropertyAttribute::kind_retain;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ if (Attributes & ObjCPropertyAttribute::kind_strong) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "copy" << "strong";
- Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ Attributes &= ~ObjCPropertyAttribute::kind_strong;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_weak) {
+ if (Attributes & ObjCPropertyAttribute::kind_weak) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "copy" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ Attributes &= ~ObjCPropertyAttribute::kind_weak;
}
- }
- else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) &&
- (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
- Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
- << "retain" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
- }
- else if ((Attributes & ObjCDeclSpec::DQ_PR_strong) &&
- (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
- Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
- << "strong" << "weak";
- Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ } else if ((Attributes & ObjCPropertyAttribute::kind_retain) &&
+ (Attributes & ObjCPropertyAttribute::kind_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "retain"
+ << "weak";
+ Attributes &= ~ObjCPropertyAttribute::kind_retain;
+ } else if ((Attributes & ObjCPropertyAttribute::kind_strong) &&
+ (Attributes & ObjCPropertyAttribute::kind_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "strong"
+ << "weak";
+ Attributes &= ~ObjCPropertyAttribute::kind_weak;
}
- if (Attributes & ObjCDeclSpec::DQ_PR_weak) {
+ if (Attributes & ObjCPropertyAttribute::kind_weak) {
// 'weak' and 'nonnull' are mutually exclusive.
if (auto nullability = PropertyTy->getNullability(Context)) {
if (*nullability == NullabilityKind::NonNull)
@@ -2737,41 +2775,40 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
}
}
- if ((Attributes & ObjCDeclSpec::DQ_PR_atomic) &&
- (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)) {
- Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
- << "atomic" << "nonatomic";
- Attributes &= ~ObjCDeclSpec::DQ_PR_atomic;
+ if ((Attributes & ObjCPropertyAttribute::kind_atomic) &&
+ (Attributes & ObjCPropertyAttribute::kind_nonatomic)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "atomic"
+ << "nonatomic";
+ Attributes &= ~ObjCPropertyAttribute::kind_atomic;
}
// Warn if user supplied no assignment attribute, property is
// readwrite, and this is an object type.
if (!getOwnershipRule(Attributes) && PropertyTy->isObjCRetainableType()) {
- if (Attributes & ObjCDeclSpec::DQ_PR_readonly) {
+ if (Attributes & ObjCPropertyAttribute::kind_readonly) {
// do nothing
} else if (getLangOpts().ObjCAutoRefCount) {
// With arc, @property definitions should default to strong when
// not specified.
- PropertyDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ PropertyDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_strong);
} else if (PropertyTy->isObjCObjectPointerType()) {
- bool isAnyClassTy =
- (PropertyTy->isObjCClassType() ||
- PropertyTy->isObjCQualifiedClassType());
- // In non-gc, non-arc mode, 'Class' is treated as a 'void *' no need to
- // issue any warning.
- if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC)
- ;
- else if (propertyInPrimaryClass) {
- // Don't issue warning on property with no life time in class
- // extension as it is inherited from property in primary class.
- // Skip this warning in gc-only mode.
- if (getLangOpts().getGC() != LangOptions::GCOnly)
- Diag(Loc, diag::warn_objc_property_no_assignment_attribute);
-
- // If non-gc code warn that this is likely inappropriate.
- if (getLangOpts().getGC() == LangOptions::NonGC)
- Diag(Loc, diag::warn_objc_property_default_assign_on_object);
- }
+ bool isAnyClassTy = (PropertyTy->isObjCClassType() ||
+ PropertyTy->isObjCQualifiedClassType());
+ // In non-gc, non-arc mode, 'Class' is treated as a 'void *' no need to
+ // issue any warning.
+ if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC)
+ ;
+ else if (propertyInPrimaryClass) {
+ // Don't issue warning on property with no life time in class
+ // extension as it is inherited from property in primary class.
+ // Skip this warning in gc-only mode.
+ if (getLangOpts().getGC() != LangOptions::GCOnly)
+ Diag(Loc, diag::warn_objc_property_no_assignment_attribute);
+
+ // If non-gc code warn that this is likely inappropriate.
+ if (getLangOpts().getGC() == LangOptions::NonGC)
+ Diag(Loc, diag::warn_objc_property_default_assign_on_object);
+ }
}
// FIXME: Implement warning dependent on NSCopying being
@@ -2780,18 +2817,18 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
// (please trim this list while you are at it).
}
- if (!(Attributes & ObjCDeclSpec::DQ_PR_copy)
- &&!(Attributes & ObjCDeclSpec::DQ_PR_readonly)
- && getLangOpts().getGC() == LangOptions::GCOnly
- && PropertyTy->isBlockPointerType())
+ if (!(Attributes & ObjCPropertyAttribute::kind_copy) &&
+ !(Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ getLangOpts().getGC() == LangOptions::GCOnly &&
+ PropertyTy->isBlockPointerType())
Diag(Loc, diag::warn_objc_property_copy_missing_on_block);
- else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_strong) &&
+ else if ((Attributes & ObjCPropertyAttribute::kind_retain) &&
+ !(Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ !(Attributes & ObjCPropertyAttribute::kind_strong) &&
PropertyTy->isBlockPointerType())
- Diag(Loc, diag::warn_objc_property_retain_of_block);
+ Diag(Loc, diag::warn_objc_property_retain_of_block);
- if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
- (Attributes & ObjCDeclSpec::DQ_PR_setter))
+ if ((Attributes & ObjCPropertyAttribute::kind_readonly) &&
+ (Attributes & ObjCPropertyAttribute::kind_setter))
Diag(Loc, diag::warn_objc_readonly_property_has_setter);
}
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 3fce0e27e9b3..920463da4027 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -18,12 +18,15 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
@@ -31,7 +34,10 @@
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include <set>
+
using namespace clang;
using namespace llvm::omp;
@@ -47,9 +53,10 @@ static const Expr *checkMapClauseExpressionBase(
namespace {
/// Default data sharing attributes, which can be applied to directive.
enum DefaultDataSharingAttributes {
- DSA_unspecified = 0, /// Data sharing attribute not specified.
- DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
- DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
+ DSA_unspecified = 0, /// Data sharing attribute not specified.
+ DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
+ DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
+ DSA_firstprivate = 1 << 2, /// Default data sharing attribute 'firstprivate'.
};
/// Stack for tracking declarations used in OpenMP directives and
@@ -59,24 +66,35 @@ public:
struct DSAVarData {
OpenMPDirectiveKind DKind = OMPD_unknown;
OpenMPClauseKind CKind = OMPC_unknown;
+ unsigned Modifier = 0;
const Expr *RefExpr = nullptr;
DeclRefExpr *PrivateCopy = nullptr;
SourceLocation ImplicitDSALoc;
DSAVarData() = default;
DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
const Expr *RefExpr, DeclRefExpr *PrivateCopy,
- SourceLocation ImplicitDSALoc)
- : DKind(DKind), CKind(CKind), RefExpr(RefExpr),
+ SourceLocation ImplicitDSALoc, unsigned Modifier)
+ : DKind(DKind), CKind(CKind), Modifier(Modifier), RefExpr(RefExpr),
PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
};
using OperatorOffsetTy =
llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
using DoacrossDependMapTy =
llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
+ /// Kind of the declaration used in the uses_allocators clauses.
+ enum class UsesAllocatorsDeclKind {
+ /// Predefined allocator
+ PredefinedAllocator,
+ /// User-defined allocator
+ UserDefinedAllocator,
+ /// The declaration that represent allocator trait
+ AllocatorTrait,
+ };
private:
struct DSAInfo {
OpenMPClauseKind Attributes = OMPC_unknown;
+ unsigned Modifier = 0;
/// Pointer to a reference expression and a flag which shows that the
/// variable is marked as lastprivate(true) or not (false).
llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
@@ -151,13 +169,21 @@ private:
bool CancelRegion = false;
bool LoopStart = false;
bool BodyComplete = false;
+ SourceLocation PrevScanLocation;
+ SourceLocation PrevOrderedLocation;
SourceLocation InnerTeamsRegionLoc;
/// Reference to the taskgroup task_reduction reference expression.
Expr *TaskgroupReductionRef = nullptr;
llvm::DenseSet<QualType> MappedClassesQualTypes;
+ SmallVector<Expr *, 4> InnerUsedAllocators;
+ llvm::DenseSet<CanonicalDeclPtr<Decl>> ImplicitTaskFirstprivates;
/// List of globals marked as declare target link in this target region
/// (isOpenMPTargetExecutionDirective(Directive) == true).
llvm::SmallVector<DeclRefExpr *, 4> DeclareTargetLinkVarDecls;
+ /// List of decls used in inclusive/exclusive clauses of the scan directive.
+ llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
+ llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
+ UsesAllocatorsDecls;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: Directive(DKind), DirectiveName(Name), CurScope(CurScope),
@@ -263,11 +289,18 @@ private:
SmallVector<const OMPRequiresDecl *, 2> RequiresDecls;
/// omp_allocator_handle_t type.
QualType OMPAllocatorHandleT;
+ /// omp_depend_t type.
+ QualType OMPDependT;
+ /// omp_event_handle_t type.
+ QualType OMPEventHandleT;
+ /// omp_alloctrait_t type.
+ QualType OMPAlloctraitT;
/// Expression for the predefined allocators.
Expr *OMPPredefinedAllocators[OMPAllocateDeclAttr::OMPUserDefinedMemAlloc] = {
nullptr};
/// Vector of previously encountered target directives
SmallVector<SourceLocation, 2> TargetLocations;
+ SourceLocation AtomicLocation;
public:
explicit DSAStackTy(Sema &S) : SemaRef(S) {}
@@ -276,6 +309,10 @@ public:
void setOMPAllocatorHandleT(QualType Ty) { OMPAllocatorHandleT = Ty; }
/// Gets omp_allocator_handle_t type.
QualType getOMPAllocatorHandleT() const { return OMPAllocatorHandleT; }
+ /// Sets omp_alloctrait_t type.
+ void setOMPAlloctraitT(QualType Ty) { OMPAlloctraitT = Ty; }
+ /// Gets omp_alloctrait_t type.
+ QualType getOMPAlloctraitT() const { return OMPAlloctraitT; }
/// Sets the given default allocator.
void setAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
Expr *Allocator) {
@@ -285,6 +322,15 @@ public:
Expr *getAllocator(OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind) const {
return OMPPredefinedAllocators[AllocatorKind];
}
+ /// Sets omp_depend_t type.
+ void setOMPDependT(QualType Ty) { OMPDependT = Ty; }
+ /// Gets omp_depend_t type.
+ QualType getOMPDependT() const { return OMPDependT; }
+
+ /// Sets omp_event_handle_t type.
+ void setOMPEventHandleT(QualType Ty) { OMPEventHandleT = Ty; }
+ /// Gets omp_event_handle_t type.
+ QualType getOMPEventHandleT() const { return OMPEventHandleT; }
bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
OpenMPClauseKind getClauseParsingMode() const {
@@ -439,13 +485,32 @@ public:
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
+ /// Check if the specified variable is a loop control variable for
+ /// current region.
+ /// \return The index of the loop control variable in the list of associated
+ /// for-loops (from outer to inner).
+ const LCDeclInfo isLoopControlVariable(const ValueDecl *D,
+ unsigned Level) const;
/// Get the loop control variable for the I-th loop (or nullptr) in
/// parent directive.
const ValueDecl *getParentLoopControlVariable(unsigned I) const;
+ /// Marks the specified decl \p D as used in scan directive.
+ void markDeclAsUsedInScanDirective(ValueDecl *D) {
+ if (SharingMapTy *Stack = getSecondOnStackOrNull())
+ Stack->UsedInScanDirective.insert(D);
+ }
+
+ /// Checks if the specified declaration was used in the inner scan directive.
+ bool isUsedInScanDirective(ValueDecl *D) const {
+ if (const SharingMapTy *Stack = getTopOfStackOrNull())
+ return Stack->UsedInScanDirective.count(D) > 0;
+ return false;
+ }
+
/// Adds explicit data sharing attribute to the specified declaration.
void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
- DeclRefExpr *PrivateCopy = nullptr);
+ DeclRefExpr *PrivateCopy = nullptr, unsigned Modifier = 0);
/// Adds additional information for the reduction items with the reduction id
/// represented as an operator.
@@ -467,11 +532,15 @@ public:
getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
const Expr *&ReductionRef,
Expr *&TaskgroupDescriptor) const;
- /// Return reduction reference expression for the current taskgroup.
+ /// Return reduction reference expression for the current taskgroup or
+ /// parallel/worksharing directives with task reductions.
Expr *getTaskgroupReductionRef() const {
- assert(getTopOfStack().Directive == OMPD_taskgroup &&
- "taskgroup reference expression requested for non taskgroup "
- "directive.");
+ assert((getTopOfStack().Directive == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
+ isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
+ !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
+ "taskgroup reference expression requested for non taskgroup or "
+ "parallel/worksharing directive.");
return getTopOfStack().TaskgroupReductionRef;
}
/// Checks if the given \p VD declaration is actually a taskgroup reduction
@@ -487,6 +556,8 @@ public:
const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
/// Returns data-sharing attributes for the specified declaration.
const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
+ /// Returns data-sharing attributes for the specified declaration.
+ const DSAVarData getImplicitDSA(ValueDecl *D, unsigned Level) const;
/// Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any directive which matches \a DPred
/// predicate.
@@ -552,7 +623,7 @@ public:
/// Checks if the defined 'requires' directive has specified type of clause.
template <typename ClauseType>
- bool hasRequiresDeclWithClause() {
+ bool hasRequiresDeclWithClause() const {
return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
return isa<ClauseType>(C);
@@ -587,6 +658,18 @@ public:
TargetLocations.push_back(LocStart);
}
+ /// Add location for the first encountered atomicc directive.
+ void addAtomicDirectiveLoc(SourceLocation Loc) {
+ if (AtomicLocation.isInvalid())
+ AtomicLocation = Loc;
+ }
+
+ /// Returns the location of the first encountered atomic directive in the
+ /// module.
+ SourceLocation getAtomicDirectiveLoc() const {
+ return AtomicLocation;
+ }
+
// Return previously encountered target region locations.
ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
return TargetLocations;
@@ -602,6 +685,11 @@ public:
getTopOfStack().DefaultAttr = DSA_shared;
getTopOfStack().DefaultAttrLoc = Loc;
}
+ /// Set default data sharing attribute to firstprivate.
+ void setDefaultDSAFirstPrivate(SourceLocation Loc) {
+ getTopOfStack().DefaultAttr = DSA_firstprivate;
+ getTopOfStack().DefaultAttrLoc = Loc;
+ }
/// Set default data mapping attribute to Modifier:Kind
void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
OpenMPDefaultmapClauseKind Kind,
@@ -612,10 +700,24 @@ public:
}
/// Check whether the implicit-behavior has been set in defaultmap
bool checkDefaultmapCategory(OpenMPDefaultmapClauseKind VariableCategory) {
+ if (VariableCategory == OMPC_DEFAULTMAP_unknown)
+ return getTopOfStack()
+ .DefaultmapMap[OMPC_DEFAULTMAP_aggregate]
+ .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
+ getTopOfStack()
+ .DefaultmapMap[OMPC_DEFAULTMAP_scalar]
+ .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown ||
+ getTopOfStack()
+ .DefaultmapMap[OMPC_DEFAULTMAP_pointer]
+ .ImplicitBehavior != OMPC_DEFAULTMAP_MODIFIER_unknown;
return getTopOfStack().DefaultmapMap[VariableCategory].ImplicitBehavior !=
OMPC_DEFAULTMAP_MODIFIER_unknown;
}
+ DefaultDataSharingAttributes getDefaultDSA(unsigned Level) const {
+ return getStackSize() <= Level ? DSA_unspecified
+ : getStackElemAtLevel(Level).DefaultAttr;
+ }
DefaultDataSharingAttributes getDefaultDSA() const {
return isStackEmpty() ? DSA_unspecified
: getTopOfStack().DefaultAttr;
@@ -738,6 +840,37 @@ public:
return Top ? Top->CancelRegion : false;
}
+ /// Mark that parent region already has scan directive.
+ void setParentHasScanDirective(SourceLocation Loc) {
+ if (SharingMapTy *Parent = getSecondOnStackOrNull())
+ Parent->PrevScanLocation = Loc;
+ }
+ /// Return true if current region has inner cancel construct.
+ bool doesParentHasScanDirective() const {
+ const SharingMapTy *Top = getSecondOnStackOrNull();
+ return Top ? Top->PrevScanLocation.isValid() : false;
+ }
+ /// Return true if current region has inner cancel construct.
+ SourceLocation getParentScanDirectiveLoc() const {
+ const SharingMapTy *Top = getSecondOnStackOrNull();
+ return Top ? Top->PrevScanLocation : SourceLocation();
+ }
+ /// Mark that parent region already has ordered directive.
+ void setParentHasOrderedDirective(SourceLocation Loc) {
+ if (SharingMapTy *Parent = getSecondOnStackOrNull())
+ Parent->PrevOrderedLocation = Loc;
+ }
+ /// Return true if current region has inner ordered construct.
+ bool doesParentHasOrderedDirective() const {
+ const SharingMapTy *Top = getSecondOnStackOrNull();
+ return Top ? Top->PrevOrderedLocation.isValid() : false;
+ }
+ /// Returns the location of the previously specified ordered directive.
+ SourceLocation getParentOrderedDirectiveLoc() const {
+ const SharingMapTy *Top = getSecondOnStackOrNull();
+ return Top ? Top->PrevOrderedLocation : SourceLocation();
+ }
+
/// Set collapse value for the region.
void setAssociatedLoops(unsigned Val) {
getTopOfStack().AssociatedLoops = Val;
@@ -899,6 +1032,46 @@ public:
"Expected target executable directive.");
return getTopOfStack().DeclareTargetLinkVarDecls;
}
+
+ /// Adds list of allocators expressions.
+ void addInnerAllocatorExpr(Expr *E) {
+ getTopOfStack().InnerUsedAllocators.push_back(E);
+ }
+ /// Return list of used allocators.
+ ArrayRef<Expr *> getInnerAllocators() const {
+ return getTopOfStack().InnerUsedAllocators;
+ }
+ /// Marks the declaration as implicitly firstprivate nin the task-based
+ /// regions.
+ void addImplicitTaskFirstprivate(unsigned Level, Decl *D) {
+ getStackElemAtLevel(Level).ImplicitTaskFirstprivates.insert(D);
+ }
+ /// Checks if the decl is implicitly firstprivate in the task-based region.
+ bool isImplicitTaskFirstprivate(Decl *D) const {
+ return getTopOfStack().ImplicitTaskFirstprivates.count(D) > 0;
+ }
+
+ /// Marks decl as used in uses_allocators clause as the allocator.
+ void addUsesAllocatorsDecl(const Decl *D, UsesAllocatorsDeclKind Kind) {
+ getTopOfStack().UsesAllocatorsDecls.try_emplace(D, Kind);
+ }
+ /// Checks if specified decl is used in uses allocator clause as the
+ /// allocator.
+ Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(unsigned Level,
+ const Decl *D) const {
+ const SharingMapTy &StackElem = getTopOfStack();
+ auto I = StackElem.UsesAllocatorsDecls.find(D);
+ if (I == StackElem.UsesAllocatorsDecls.end())
+ return None;
+ return I->getSecond();
+ }
+ Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(const Decl *D) const {
+ const SharingMapTy &StackElem = getTopOfStack();
+ auto I = StackElem.UsesAllocatorsDecls.find(D);
+ if (I == StackElem.UsesAllocatorsDecls.end())
+ return None;
+ return I->getSecond();
+ }
};
bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
@@ -1001,6 +1174,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
DVar.PrivateCopy = Data.PrivateCopy;
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
+ DVar.Modifier = Data.Modifier;
return DVar;
}
@@ -1015,6 +1189,15 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
return DVar;
case DSA_none:
return DVar;
+ case DSA_firstprivate:
+ if (VD->getStorageDuration() == SD_Static &&
+ VD->getDeclContext()->isFileContext()) {
+ DVar.CKind = OMPC_unknown;
+ } else {
+ DVar.CKind = OMPC_firstprivate;
+ }
+ DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
+ return DVar;
case DSA_unspecified:
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.2]
@@ -1113,6 +1296,19 @@ DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
}
const DSAStackTy::LCDeclInfo
+DSAStackTy::isLoopControlVariable(const ValueDecl *D, unsigned Level) const {
+ assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
+ D = getCanonicalDecl(D);
+ for (unsigned I = Level + 1; I > 0; --I) {
+ const SharingMapTy &StackElem = getStackElemAtLevel(I - 1);
+ auto It = StackElem.LCVMap.find(D);
+ if (It != StackElem.LCVMap.end())
+ return It->second;
+ }
+ return {0, nullptr};
+}
+
+const DSAStackTy::LCDeclInfo
DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
const SharingMapTy *Parent = getSecondOnStackOrNull();
assert(Parent && "Data-sharing attributes stack is empty");
@@ -1135,19 +1331,21 @@ const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
}
void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
- DeclRefExpr *PrivateCopy) {
+ DeclRefExpr *PrivateCopy, unsigned Modifier) {
D = getCanonicalDecl(D);
if (A == OMPC_threadprivate) {
DSAInfo &Data = Threadprivates[D];
Data.Attributes = A;
Data.RefExpr.setPointer(E);
Data.PrivateCopy = nullptr;
+ Data.Modifier = Modifier;
} else {
DSAInfo &Data = getTopOfStack().SharingMap[D];
assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
(A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
(A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
(isLoopControlVariable(D).first && A == OMPC_private));
+ Data.Modifier = Modifier;
if (A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) {
Data.RefExpr.setInt(/*IntVal=*/true);
return;
@@ -1159,6 +1357,7 @@ void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
Data.PrivateCopy = PrivateCopy;
if (PrivateCopy) {
DSAInfo &Data = getTopOfStack().SharingMap[PrivateCopy->getDecl()];
+ Data.Modifier = Modifier;
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
Data.PrivateCopy = nullptr;
@@ -1207,7 +1406,10 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
"Additional reduction info may be specified only for reduction items.");
ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
- getTopOfStack().Directive == OMPD_taskgroup &&
+ (getTopOfStack().Directive == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
+ isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
+ !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(BOK, SR);
@@ -1230,7 +1432,10 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
"Additional reduction info may be specified only for reduction items.");
ReductionData &ReductionData = getTopOfStack().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
- getTopOfStack().Directive == OMPD_taskgroup &&
+ (getTopOfStack().Directive == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(getTopOfStack().Directive) ||
+ isOpenMPWorksharingDirective(getTopOfStack().Directive)) &&
+ !isOpenMPSimdDirective(getTopOfStack().Directive))) &&
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(ReductionRef, SR);
@@ -1251,7 +1456,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
const DSAInfo &Data = I->SharingMap.lookup(D);
- if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
+ if (Data.Attributes != OMPC_reduction ||
+ Data.Modifier != OMPC_REDUCTION_task)
continue;
const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
@@ -1263,8 +1469,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
- return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
- Data.PrivateCopy, I->DefaultAttrLoc);
+ return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
+ Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task);
}
return DSAVarData();
}
@@ -1276,7 +1482,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
for (const_iterator I = begin() + 1, E = end(); I != E; ++I) {
const DSAInfo &Data = I->SharingMap.lookup(D);
- if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
+ if (Data.Attributes != OMPC_reduction ||
+ Data.Modifier != OMPC_REDUCTION_task)
continue;
const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
@@ -1288,8 +1495,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
"expression for the descriptor is not "
"set.");
TaskgroupDescriptor = I->TaskgroupReductionRef;
- return DSAVarData(OMPD_taskgroup, OMPC_reduction, Data.RefExpr.getPointer(),
- Data.PrivateCopy, I->DefaultAttrLoc);
+ return DSAVarData(I->Directive, OMPC_reduction, Data.RefExpr.getPointer(),
+ Data.PrivateCopy, I->DefaultAttrLoc, OMPC_REDUCTION_task);
}
return DSAVarData();
}
@@ -1364,6 +1571,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
if (TI != Threadprivates.end()) {
DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
DVar.CKind = OMPC_threadprivate;
+ DVar.Modifier = TI->getSecond().Modifier;
return DVar;
}
if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
@@ -1447,15 +1655,18 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
const_iterator EndI = end();
if (FromParent && I != EndI)
++I;
- auto It = I->SharingMap.find(D);
- if (It != I->SharingMap.end()) {
- const DSAInfo &Data = It->getSecond();
- DVar.RefExpr = Data.RefExpr.getPointer();
- DVar.PrivateCopy = Data.PrivateCopy;
- DVar.CKind = Data.Attributes;
- DVar.ImplicitDSALoc = I->DefaultAttrLoc;
- DVar.DKind = I->Directive;
- return DVar;
+ if (I != EndI) {
+ auto It = I->SharingMap.find(D);
+ if (It != I->SharingMap.end()) {
+ const DSAInfo &Data = It->getSecond();
+ DVar.RefExpr = Data.RefExpr.getPointer();
+ DVar.PrivateCopy = Data.PrivateCopy;
+ DVar.CKind = Data.Attributes;
+ DVar.ImplicitDSALoc = I->DefaultAttrLoc;
+ DVar.DKind = I->Directive;
+ DVar.Modifier = Data.Modifier;
+ return DVar;
+ }
}
DVar.CKind = OMPC_shared;
@@ -1493,6 +1704,8 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
const_iterator EndI = end();
if (FromParent && I != EndI)
++I;
+ if (I == EndI)
+ return DVar;
auto It = I->SharingMap.find(D);
if (It != I->SharingMap.end()) {
const DSAInfo &Data = It->getSecond();
@@ -1501,6 +1714,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = I->DefaultAttrLoc;
DVar.DKind = I->Directive;
+ DVar.Modifier = Data.Modifier;
}
return DVar;
@@ -1520,6 +1734,15 @@ const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
return getDSA(StartI, D);
}
+const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
+ unsigned Level) const {
+ if (getStackSize() <= Level)
+ return DSAVarData();
+ D = getCanonicalDecl(D);
+ const_iterator StartI = std::next(begin(), getStackSize() - 1 - Level);
+ return getDSA(StartI, D);
+}
+
const DSAStackTy::DSAVarData
DSAStackTy::hasDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
@@ -1640,23 +1863,28 @@ Sema::DeviceDiagBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
unsigned DiagID) {
assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
"Expected OpenMP device compilation.");
- FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
+
+ FunctionDecl *FD = getCurFunctionDecl();
DeviceDiagBuilder::Kind Kind = DeviceDiagBuilder::K_Nop;
- switch (FES) {
- case FunctionEmissionStatus::Emitted:
- Kind = DeviceDiagBuilder::K_Immediate;
- break;
- case FunctionEmissionStatus::Unknown:
- Kind = isOpenMPDeviceDelayedContext(*this) ? DeviceDiagBuilder::K_Deferred
- : DeviceDiagBuilder::K_Immediate;
- break;
- case FunctionEmissionStatus::TemplateDiscarded:
- case FunctionEmissionStatus::OMPDiscarded:
- Kind = DeviceDiagBuilder::K_Nop;
- break;
- case FunctionEmissionStatus::CUDADiscarded:
- llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
- break;
+ if (FD) {
+ FunctionEmissionStatus FES = getEmissionStatus(FD);
+ switch (FES) {
+ case FunctionEmissionStatus::Emitted:
+ Kind = DeviceDiagBuilder::K_Immediate;
+ break;
+ case FunctionEmissionStatus::Unknown:
+ Kind = isOpenMPDeviceDelayedContext(*this)
+ ? DeviceDiagBuilder::K_Deferred
+ : DeviceDiagBuilder::K_Immediate;
+ break;
+ case FunctionEmissionStatus::TemplateDiscarded:
+ case FunctionEmissionStatus::OMPDiscarded:
+ Kind = DeviceDiagBuilder::K_Nop;
+ break;
+ case FunctionEmissionStatus::CUDADiscarded:
+ llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation");
+ break;
+ }
}
return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
@@ -1685,107 +1913,6 @@ Sema::DeviceDiagBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
return DeviceDiagBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
}
-void Sema::checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
- bool CheckForDelayedContext) {
- assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
- "Expected OpenMP device compilation.");
- assert(Callee && "Callee may not be null.");
- Callee = Callee->getMostRecentDecl();
- FunctionDecl *Caller = getCurFunctionDecl();
-
- // host only function are not available on the device.
- if (Caller) {
- FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
- FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
- assert(CallerS != FunctionEmissionStatus::CUDADiscarded &&
- CalleeS != FunctionEmissionStatus::CUDADiscarded &&
- "CUDADiscarded unexpected in OpenMP device function check");
- if ((CallerS == FunctionEmissionStatus::Emitted ||
- (!isOpenMPDeviceDelayedContext(*this) &&
- CallerS == FunctionEmissionStatus::Unknown)) &&
- CalleeS == FunctionEmissionStatus::OMPDiscarded) {
- StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
- OMPC_device_type, OMPC_DEVICE_TYPE_host);
- Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
- Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
- diag::note_omp_marked_device_type_here)
- << HostDevTy;
- return;
- }
- }
- // If the caller is known-emitted, mark the callee as known-emitted.
- // Otherwise, mark the call in our call graph so we can traverse it later.
- if ((CheckForDelayedContext && !isOpenMPDeviceDelayedContext(*this)) ||
- (!Caller && !CheckForDelayedContext) ||
- (Caller && getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
- markKnownEmitted(*this, Caller, Callee, Loc,
- [CheckForDelayedContext](Sema &S, FunctionDecl *FD) {
- return CheckForDelayedContext &&
- S.getEmissionStatus(FD) ==
- FunctionEmissionStatus::Emitted;
- });
- else if (Caller)
- DeviceCallGraph[Caller].insert({Callee, Loc});
-}
-
-void Sema::checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
- bool CheckCaller) {
- assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
- "Expected OpenMP host compilation.");
- assert(Callee && "Callee may not be null.");
- Callee = Callee->getMostRecentDecl();
- FunctionDecl *Caller = getCurFunctionDecl();
-
- // device only function are not available on the host.
- if (Caller) {
- FunctionEmissionStatus CallerS = getEmissionStatus(Caller);
- FunctionEmissionStatus CalleeS = getEmissionStatus(Callee);
- assert(
- (LangOpts.CUDA || (CallerS != FunctionEmissionStatus::CUDADiscarded &&
- CalleeS != FunctionEmissionStatus::CUDADiscarded)) &&
- "CUDADiscarded unexpected in OpenMP host function check");
- if (CallerS == FunctionEmissionStatus::Emitted &&
- CalleeS == FunctionEmissionStatus::OMPDiscarded) {
- StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
- OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
- Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
- Diag(Callee->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
- diag::note_omp_marked_device_type_here)
- << NoHostDevTy;
- return;
- }
- }
- // If the caller is known-emitted, mark the callee as known-emitted.
- // Otherwise, mark the call in our call graph so we can traverse it later.
- if (!shouldIgnoreInHostDeviceCheck(Callee)) {
- if ((!CheckCaller && !Caller) ||
- (Caller &&
- getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted))
- markKnownEmitted(
- *this, Caller, Callee, Loc, [CheckCaller](Sema &S, FunctionDecl *FD) {
- return CheckCaller &&
- S.getEmissionStatus(FD) == FunctionEmissionStatus::Emitted;
- });
- else if (Caller)
- DeviceCallGraph[Caller].insert({Callee, Loc});
- }
-}
-
-void Sema::checkOpenMPDeviceExpr(const Expr *E) {
- assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
- "OpenMP device compilation mode is expected.");
- QualType Ty = E->getType();
- if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
- ((Ty->isFloat128Type() ||
- (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
- !Context.getTargetInfo().hasFloat128Type()) ||
- (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
- !Context.getTargetInfo().hasInt128Type()))
- targetDiag(E->getExprLoc(), diag::err_omp_unsupported_type)
- << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
- << Context.getTargetInfo().getTriple().str() << E->getSourceRange();
-}
-
static OpenMPDefaultmapClauseKind
getVariableCategoryFromDecl(const LangOptions &LO, const ValueDecl *VD) {
if (LO.OpenMP <= 45) {
@@ -1901,7 +2028,8 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
- isa<MemberExpr>(EI->getAssociatedExpression())) {
+ isa<MemberExpr>(EI->getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(EI->getAssociatedExpression())) {
IsVariableAssociatedWithSection = true;
// There is nothing more we need to know about this variable.
return true;
@@ -1935,14 +2063,23 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
((IsVariableUsedInMapClause &&
DSAStack->getCaptureRegion(Level, OpenMPCaptureLevel) ==
OMPD_target) ||
- !DSAStack->hasExplicitDSA(
- D,
- [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
- Level, /*NotLastprivate=*/true)) &&
+ !(DSAStack->hasExplicitDSA(
+ D,
+ [](OpenMPClauseKind K) -> bool {
+ return K == OMPC_firstprivate;
+ },
+ Level, /*NotLastprivate=*/true) ||
+ DSAStack->isUsesAllocatorsDecl(Level, D))) &&
// If the variable is artificial and must be captured by value - try to
// capture by value.
!(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
- !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue());
+ !cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
+ // If the variable is implicitly firstprivate and scalar - capture by
+ // copy
+ !(DSAStack->getDefaultDSA() == DSA_firstprivate &&
+ !DSAStack->hasExplicitDSA(
+ D, [](OpenMPClauseKind K) { return K != OMPC_unknown; }, Level) &&
+ !DSAStack->isLoopControlVariable(D, Level).first);
}
// When passing data by copy, we need to make sure it fits the uintptr size
@@ -2010,7 +2147,23 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
//
if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return nullptr;
- return VD;
+ CapturedRegionScopeInfo *CSI = nullptr;
+ for (FunctionScopeInfo *FSI : llvm::drop_begin(
+ llvm::reverse(FunctionScopes),
+ CheckScopeInfo ? (FunctionScopes.size() - (StopAt + 1)) : 0)) {
+ if (!isa<CapturingScopeInfo>(FSI))
+ return nullptr;
+ if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
+ if (RSI->CapRegionKind == CR_OpenMP) {
+ CSI = RSI;
+ break;
+ }
+ }
+ SmallVector<OpenMPDirectiveKind, 4> Regions;
+ getOpenMPCaptureRegions(Regions,
+ DSAStack->getDirective(CSI->OpenMPLevel));
+ if (Regions[CSI->OpenMPCaptureLevel] != OMPD_task)
+ return VD;
}
}
@@ -2039,20 +2192,27 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
(VD && DSAStack->isForceVarCapturing()))
return VD ? VD : Info.second;
- DSAStackTy::DSAVarData DVarPrivate =
+ DSAStackTy::DSAVarData DVarTop =
DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
- if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
- return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
+ if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind))
+ return VD ? VD : cast<VarDecl>(DVarTop.PrivateCopy->getDecl());
// Threadprivate variables must not be captured.
- if (isOpenMPThreadPrivate(DVarPrivate.CKind))
+ if (isOpenMPThreadPrivate(DVarTop.CKind))
return nullptr;
// The variable is not private or it is the variable in the directive with
// default(none) clause and not used in any clause.
- DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
- [](OpenMPDirectiveKind) { return true; },
- DSAStack->isClauseParsingMode());
+ DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
+ D, isOpenMPPrivate, [](OpenMPDirectiveKind) { return true; },
+ DSAStack->isClauseParsingMode());
+ // Global shared must not be captured.
+ if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
+ ((DSAStack->getDefaultDSA() != DSA_none &&
+ DSAStack->getDefaultDSA() != DSA_firstprivate) ||
+ DVarTop.CKind == OMPC_shared))
+ return nullptr;
if (DVarPrivate.CKind != OMPC_unknown ||
- (VD && DSAStack->getDefaultDSA() == DSA_none))
+ (VD && (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_firstprivate)))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
}
return nullptr;
@@ -2060,9 +2220,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const {
- SmallVector<OpenMPDirectiveKind, 4> Regions;
- getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
- FunctionScopesIndex -= Regions.size();
+ FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStack->getDirective(Level));
}
void Sema::startOpenMPLoop() {
@@ -2079,39 +2237,77 @@ void Sema::startOpenMPCXXRangeFor() {
}
}
-bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
+OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
+ unsigned CapLevel) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ if (DSAStack->hasExplicitDirective(
+ [](OpenMPDirectiveKind K) { return isOpenMPTaskingDirective(K); },
+ Level)) {
+ bool IsTriviallyCopyable =
+ D->getType().getNonReferenceType().isTriviallyCopyableType(Context);
+ OpenMPDirectiveKind DKind = DSAStack->getDirective(Level);
+ SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, DKind);
+ if (isOpenMPTaskingDirective(CaptureRegions[CapLevel]) &&
+ (IsTriviallyCopyable ||
+ !isOpenMPTaskLoopDirective(CaptureRegions[CapLevel]))) {
+ if (DSAStack->hasExplicitDSA(
+ D, [](OpenMPClauseKind K) { return K == OMPC_firstprivate; },
+ Level, /*NotLastprivate=*/true))
+ return OMPC_firstprivate;
+ DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
+ if (DVar.CKind != OMPC_shared &&
+ !DSAStack->isLoopControlVariable(D, Level).first && !DVar.RefExpr) {
+ DSAStack->addImplicitTaskFirstprivate(Level, D);
+ return OMPC_firstprivate;
+ }
+ }
+ }
if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
if (DSAStack->getAssociatedLoops() > 0 &&
!DSAStack->isLoopStarted()) {
DSAStack->resetPossibleLoopCounter(D);
DSAStack->loopStart();
- return true;
+ return OMPC_private;
}
if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
DSAStack->isLoopControlVariable(D).first) &&
!DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
!isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
- return true;
+ return OMPC_private;
}
if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
DSAStack->isForceVarCapturing() &&
!DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_copyin; }, Level))
- return true;
- }
- return DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
- (DSAStack->isClauseParsingMode() &&
- DSAStack->getClauseParsingMode() == OMPC_private) ||
- // Consider taskgroup reduction descriptor variable a private to avoid
- // possible capture in the region.
- (DSAStack->hasExplicitDirective(
- [](OpenMPDirectiveKind K) { return K == OMPD_taskgroup; },
- Level) &&
- DSAStack->isTaskgroupReductionRef(D, Level));
+ return OMPC_private;
+ }
+ // User-defined allocators are private since they must be defined in the
+ // context of target region.
+ if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
+ DSAStack->isUsesAllocatorsDecl(Level, D).getValueOr(
+ DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
+ DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
+ return OMPC_private;
+ return (DSAStack->hasExplicitDSA(
+ D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
+ (DSAStack->isClauseParsingMode() &&
+ DSAStack->getClauseParsingMode() == OMPC_private) ||
+ // Consider taskgroup reduction descriptor variable a private
+ // to avoid possible capture in the region.
+ (DSAStack->hasExplicitDirective(
+ [](OpenMPDirectiveKind K) {
+ return K == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(K) ||
+ isOpenMPWorksharingDirective(K)) &&
+ !isOpenMPSimdDirective(K));
+ },
+ Level) &&
+ DSAStack->isTaskgroupReductionRef(D, Level)))
+ ? OMPC_private
+ : OMPC_unknown;
}
void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
@@ -2148,68 +2344,101 @@ void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
}
}
if (OMPC != OMPC_unknown)
- FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
+ FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, unsigned(OMPC)));
}
-bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D,
- unsigned Level) const {
+bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
+ unsigned CaptureLevel) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
// Return true if the current level is no longer enclosed in a target region.
+ SmallVector<OpenMPDirectiveKind, 4> Regions;
+ getOpenMPCaptureRegions(Regions, DSAStack->getDirective(Level));
const auto *VD = dyn_cast<VarDecl>(D);
return VD && !VD->hasLocalStorage() &&
DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
- Level);
+ Level) &&
+ Regions[CaptureLevel] != OMPD_task;
+}
+
+bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
+ unsigned CaptureLevel) const {
+ assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ // Return true if the current level is no longer enclosed in a target region.
+
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->hasLocalStorage()) {
+ DSAStackTy::DSAVarData TopDVar =
+ DSAStack->getTopDSA(D, /*FromParent=*/false);
+ unsigned NumLevels =
+ getOpenMPCaptureLevels(DSAStack->getDirective(Level));
+ if (Level == 0)
+ return (NumLevels == CaptureLevel + 1) && TopDVar.CKind != OMPC_shared;
+ DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level - 1);
+ return DVar.CKind != OMPC_shared ||
+ isOpenMPGlobalCapturedDecl(
+ D, Level - 1,
+ getOpenMPCaptureLevels(DSAStack->getDirective(Level - 1)) - 1);
+ }
+ }
+ return true;
}
void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; }
-void Sema::finalizeOpenMPDelayedAnalysis() {
+void Sema::ActOnOpenMPBeginDeclareVariant(SourceLocation Loc,
+ OMPTraitInfo &TI) {
+ if (!OMPDeclareVariantScopes.empty()) {
+ Diag(Loc, diag::warn_nested_declare_variant);
+ return;
+ }
+ OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI));
+}
+
+void Sema::ActOnOpenMPEndDeclareVariant() {
+ assert(isInOpenMPDeclareVariantScope() &&
+ "Not in OpenMP declare variant scope!");
+
+ OMPDeclareVariantScopes.pop_back();
+}
+
+void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ SourceLocation Loc) {
assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
- // Diagnose implicit declare target functions and their callees.
- for (const auto &CallerCallees : DeviceCallGraph) {
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(
- CallerCallees.getFirst()->getMostRecentDecl());
- // Ignore host functions during device analyzis.
- if (LangOpts.OpenMPIsDevice && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
- continue;
- // Ignore nohost functions during host analyzis.
- if (!LangOpts.OpenMPIsDevice && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
- continue;
- for (const std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation>
- &Callee : CallerCallees.getSecond()) {
- const FunctionDecl *FD = Callee.first->getMostRecentDecl();
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(FD);
- if (LangOpts.OpenMPIsDevice && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
- // Diagnose host function called during device codegen.
- StringRef HostDevTy = getOpenMPSimpleClauseTypeName(
- OMPC_device_type, OMPC_DEVICE_TYPE_host);
- Diag(Callee.second, diag::err_omp_wrong_device_function_call)
- << HostDevTy << 0;
- Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
- diag::note_omp_marked_device_type_here)
- << HostDevTy;
- continue;
- }
+ Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
+ // Ignore host functions during device analyzis.
+ if (LangOpts.OpenMPIsDevice && DevTy &&
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
+ return;
+ // Ignore nohost functions during host analyzis.
+ if (!LangOpts.OpenMPIsDevice && DevTy &&
+ *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
+ return;
+ const FunctionDecl *FD = Callee->getMostRecentDecl();
+ DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
+ if (LangOpts.OpenMPIsDevice && DevTy &&
+ *DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
+ // Diagnose host function called during device codegen.
+ StringRef HostDevTy =
+ getOpenMPSimpleClauseTypeName(OMPC_device_type, OMPC_DEVICE_TYPE_host);
+ Diag(Loc, diag::err_omp_wrong_device_function_call) << HostDevTy << 0;
+ Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
+ diag::note_omp_marked_device_type_here)
+ << HostDevTy;
+ return;
+ }
if (!LangOpts.OpenMPIsDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
// Diagnose nohost function called during host codegen.
StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
- Diag(Callee.second, diag::err_omp_wrong_device_function_call)
- << NoHostDevTy << 1;
+ Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
Diag(FD->getAttr<OMPDeclareTargetDeclAttr>()->getLocation(),
diag::note_omp_marked_device_type_here)
<< NoHostDevTy;
- continue;
}
- }
- }
}
void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
@@ -2228,14 +2457,86 @@ void Sema::EndOpenMPClause() {
DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
}
-static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
- ArrayRef<OMPClause *> Clauses);
static std::pair<ValueDecl *, bool>
getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
SourceRange &ERange, bool AllowArraySection = false);
+
+/// Check consistency of the reduction clauses.
+static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
+ ArrayRef<OMPClause *> Clauses) {
+ bool InscanFound = false;
+ SourceLocation InscanLoc;
+ // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions.
+ // A reduction clause without the inscan reduction-modifier may not appear on
+ // a construct on which a reduction clause with the inscan reduction-modifier
+ // appears.
+ for (OMPClause *C : Clauses) {
+ if (C->getClauseKind() != OMPC_reduction)
+ continue;
+ auto *RC = cast<OMPReductionClause>(C);
+ if (RC->getModifier() == OMPC_REDUCTION_inscan) {
+ InscanFound = true;
+ InscanLoc = RC->getModifierLoc();
+ continue;
+ }
+ if (RC->getModifier() == OMPC_REDUCTION_task) {
+ // OpenMP 5.0, 2.19.5.4 reduction Clause.
+ // A reduction clause with the task reduction-modifier may only appear on
+ // a parallel construct, a worksharing construct or a combined or
+ // composite construct for which any of the aforementioned constructs is a
+ // constituent construct and simd or loop are not constituent constructs.
+ OpenMPDirectiveKind CurDir = Stack->getCurrentDirective();
+ if (!(isOpenMPParallelDirective(CurDir) ||
+ isOpenMPWorksharingDirective(CurDir)) ||
+ isOpenMPSimdDirective(CurDir))
+ S.Diag(RC->getModifierLoc(),
+ diag::err_omp_reduction_task_not_parallel_or_worksharing);
+ continue;
+ }
+ }
+ if (InscanFound) {
+ for (OMPClause *C : Clauses) {
+ if (C->getClauseKind() != OMPC_reduction)
+ continue;
+ auto *RC = cast<OMPReductionClause>(C);
+ if (RC->getModifier() != OMPC_REDUCTION_inscan) {
+ S.Diag(RC->getModifier() == OMPC_REDUCTION_unknown
+ ? RC->getBeginLoc()
+ : RC->getModifierLoc(),
+ diag::err_omp_inscan_reduction_expected);
+ S.Diag(InscanLoc, diag::note_omp_previous_inscan_reduction);
+ continue;
+ }
+ for (Expr *Ref : RC->varlists()) {
+ assert(Ref && "NULL expr in OpenMP nontemporal clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = Ref;
+ auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+ if (!Stack->isUsedInScanDirective(getCanonicalDecl(D))) {
+ S.Diag(Ref->getExprLoc(),
+ diag::err_omp_reduction_not_inclusive_exclusive)
+ << Ref->getSourceRange();
+ }
+ }
+ }
+ }
+}
+
+static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
+ ArrayRef<OMPClause *> Clauses);
static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
bool WithInit);
+static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
+ const ValueDecl *D,
+ const DSAStackTy::DSAVarData &DVar,
+ bool IsLoopIterVar = false);
+
void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// OpenMP [2.14.3.5, Restrictions, C/C++, p.1]
// A variable of class type (or array thereof) that appears in a lastprivate
@@ -2305,10 +2606,56 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
Clause->setPrivateRefs(PrivateRefs);
continue;
}
+ if (auto *Clause = dyn_cast<OMPUsesAllocatorsClause>(C)) {
+ for (unsigned I = 0, E = Clause->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data D = Clause->getAllocatorData(I);
+ auto *DRE = dyn_cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts());
+ if (!DRE)
+ continue;
+ ValueDecl *VD = DRE->getDecl();
+ if (!VD || !isa<VarDecl>(VD))
+ continue;
+ DSAStackTy::DSAVarData DVar =
+ DSAStack->getTopDSA(VD, /*FromParent=*/false);
+ // OpenMP [2.12.5, target Construct]
+ // Memory allocators that appear in a uses_allocators clause cannot
+ // appear in other data-sharing attribute clauses or data-mapping
+ // attribute clauses in the same construct.
+ Expr *MapExpr = nullptr;
+ if (DVar.RefExpr ||
+ DSAStack->checkMappableExprComponentListsForDecl(
+ VD, /*CurrentRegionOnly=*/true,
+ [VD, &MapExpr](
+ OMPClauseMappableExprCommon::MappableExprComponentListRef
+ MapExprComponents,
+ OpenMPClauseKind C) {
+ auto MI = MapExprComponents.rbegin();
+ auto ME = MapExprComponents.rend();
+ if (MI != ME &&
+ MI->getAssociatedDeclaration()->getCanonicalDecl() ==
+ VD->getCanonicalDecl()) {
+ MapExpr = MI->getAssociatedExpression();
+ return true;
+ }
+ return false;
+ })) {
+ Diag(D.Allocator->getExprLoc(),
+ diag::err_omp_allocator_used_in_clauses)
+ << D.Allocator->getSourceRange();
+ if (DVar.RefExpr)
+ reportOriginalDsa(*this, DSAStack, VD, DVar);
+ else
+ Diag(MapExpr->getExprLoc(), diag::note_used_here)
+ << MapExpr->getSourceRange();
+ }
+ }
+ continue;
+ }
}
// Check allocate clauses.
if (!CurContext->isDependentContext())
checkAllocateClauses(*this, DSAStack, D->clauses());
+ checkReductionClauses(*this, DSAStack, D->clauses());
}
DSAStack->pop();
@@ -2618,15 +2965,14 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
static OMPAllocateDeclAttr::AllocatorTypeTy
getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
if (!Allocator)
- return OMPAllocateDeclAttr::OMPDefaultMemAlloc;
+ return OMPAllocateDeclAttr::OMPNullMemAlloc;
if (Allocator->isTypeDependent() || Allocator->isValueDependent() ||
Allocator->isInstantiationDependent() ||
Allocator->containsUnexpandedParameterPack())
return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
const Expr *AE = Allocator->IgnoreParenImpCasts();
- for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
- I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
+ for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
llvm::FoldingSetNodeID AEId, DAEId;
@@ -2799,18 +3145,26 @@ OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
/// current compilation unit.
ArrayRef<SourceLocation> TargetLocations =
DSAStack->getEncounteredTargetLocs();
- if (!TargetLocations.empty()) {
+ SourceLocation AtomicLoc = DSAStack->getAtomicDirectiveLoc();
+ if (!TargetLocations.empty() || !AtomicLoc.isInvalid()) {
for (const OMPClause *CNew : ClauseList) {
// Check if any of the requires clauses affect target regions.
if (isa<OMPUnifiedSharedMemoryClause>(CNew) ||
isa<OMPUnifiedAddressClause>(CNew) ||
isa<OMPReverseOffloadClause>(CNew) ||
isa<OMPDynamicAllocatorsClause>(CNew)) {
- Diag(Loc, diag::err_omp_target_before_requires)
- << getOpenMPClauseName(CNew->getClauseKind());
+ Diag(Loc, diag::err_omp_directive_before_requires)
+ << "target" << getOpenMPClauseName(CNew->getClauseKind());
for (SourceLocation TargetLoc : TargetLocations) {
- Diag(TargetLoc, diag::note_omp_requires_encountered_target);
+ Diag(TargetLoc, diag::note_omp_requires_encountered_directive)
+ << "target";
}
+ } else if (!AtomicLoc.isInvalid() &&
+ isa<OMPAtomicDefaultMemOrderClause>(CNew)) {
+ Diag(Loc, diag::err_omp_directive_before_requires)
+ << "atomic" << getOpenMPClauseName(CNew->getClauseKind());
+ Diag(AtomicLoc, diag::note_omp_requires_encountered_directive)
+ << "atomic";
}
}
}
@@ -2824,7 +3178,7 @@ OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
const ValueDecl *D,
const DSAStackTy::DSAVarData &DVar,
- bool IsLoopIterVar = false) {
+ bool IsLoopIterVar) {
if (DVar.RefExpr) {
SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
<< getOpenMPClauseName(DVar.CKind);
@@ -2944,6 +3298,16 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
Visit(S->getInnermostCapturedStmt()->getCapturedStmt());
TryCaptureCXXThisMembers = SavedTryCaptureCXXThisMembers;
}
+ // In tasks firstprivates are not captured anymore, need to analyze them
+ // explicitly.
+ if (isOpenMPTaskingDirective(S->getDirectiveKind()) &&
+ !isOpenMPTaskLoopDirective(S->getDirectiveKind())) {
+ for (OMPClause *C : S->clauses())
+ if (auto *FC = dyn_cast<OMPFirstprivateClause>(C)) {
+ for (Expr *Ref : FC->varlists())
+ Visit(Ref);
+ }
+ }
}
public:
@@ -2966,7 +3330,11 @@ public:
return;
VD = VD->getCanonicalDecl();
// Skip internally declared variables.
- if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD))
+ if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
+ !Stack->isImplicitTaskFirstprivate(VD))
+ return;
+ // Skip allocators in uses_allocators clauses.
+ if (Stack->isUsesAllocatorsDecl(VD).hasValue())
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
@@ -2979,7 +3347,8 @@ public:
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
(Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
- !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
+ !Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
+ !Stack->isImplicitTaskFirstprivate(VD))
return;
SourceLocation ELoc = E->getExprLoc();
@@ -2988,10 +3357,19 @@ public:
// in the construct, and does not have a predetermined data-sharing
// attribute, must have its data-sharing attribute explicitly determined
// by being listed in a data-sharing attribute clause.
- if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
+ if (DVar.CKind == OMPC_unknown &&
+ (Stack->getDefaultDSA() == DSA_none ||
+ Stack->getDefaultDSA() == DSA_firstprivate) &&
isImplicitOrExplicitTaskingRegion(DKind) &&
VarsWithInheritedDSA.count(VD) == 0) {
- VarsWithInheritedDSA[VD] = E;
+ bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
+ if (!InheritedDSA && Stack->getDefaultDSA() == DSA_firstprivate) {
+ DSAStackTy::DSAVarData DVar =
+ Stack->getImplicitDSA(VD, /*FromParent=*/false);
+ InheritedDSA = DVar.CKind == OMPC_unknown;
+ }
+ if (InheritedDSA)
+ VarsWithInheritedDSA[VD] = E;
return;
}
@@ -3036,7 +3414,7 @@ public:
StackComponents,
OpenMPClauseKind) {
// Variable is used if it has been marked as an array, array
- // section or the variable iself.
+ // section, array shaping or the variable iself.
return StackComponents.size() == 1 ||
std::all_of(
std::next(StackComponents.rbegin()),
@@ -3047,6 +3425,8 @@ public:
nullptr &&
(isa<OMPArraySectionExpr>(
MC.getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(
+ MC.getAssociatedExpression()) ||
isa<ArraySubscriptExpr>(
MC.getAssociatedExpression()));
});
@@ -3091,7 +3471,9 @@ public:
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
- if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
+ if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
+ (Stack->getDefaultDSA() == DSA_firstprivate &&
+ DVar.CKind == OMPC_firstprivate && !DVar.RefExpr)) &&
!Stack->isLoopControlVariable(VD).first) {
ImplicitFirstprivate.push_back(E);
return;
@@ -3112,7 +3494,7 @@ public:
return;
auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
- if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParens())) {
+ if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParenCasts())) {
if (!FD)
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
@@ -3204,8 +3586,10 @@ public:
// Do both expressions have the same kind?
if (CCI->getAssociatedExpression()->getStmtClass() !=
SC.getAssociatedExpression()->getStmtClass())
- if (!(isa<OMPArraySectionExpr>(
- SC.getAssociatedExpression()) &&
+ if (!((isa<OMPArraySectionExpr>(
+ SC.getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(
+ SC.getAssociatedExpression())) &&
isa<ArraySubscriptExpr>(
CCI->getAssociatedExpression())))
return false;
@@ -3516,7 +3900,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
};
// Start a captured region for 'parallel'.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsParallel, /*OpenMPCaptureLevel=*/1);
+ ParamsParallel, /*OpenMPCaptureLevel=*/0);
QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
@@ -3537,7 +3921,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params, /*OpenMPCaptureLevel=*/2);
+ Params, /*OpenMPCaptureLevel=*/1);
// Mark this captured region as inlined, because we don't use outlined
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
@@ -3688,6 +4072,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_cancellation_point:
case OMPD_cancel:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
@@ -3695,8 +4081,11 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_end_declare_target:
case OMPD_requires:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
}
@@ -3841,6 +4230,36 @@ void Sema::tryCaptureOpenMPLambdas(ValueDecl *V) {
}
}
+static bool checkOrderedOrderSpecified(Sema &S,
+ const ArrayRef<OMPClause *> Clauses) {
+ const OMPOrderedClause *Ordered = nullptr;
+ const OMPOrderClause *Order = nullptr;
+
+ for (const OMPClause *Clause : Clauses) {
+ if (Clause->getClauseKind() == OMPC_ordered)
+ Ordered = cast<OMPOrderedClause>(Clause);
+ else if (Clause->getClauseKind() == OMPC_order) {
+ Order = cast<OMPOrderClause>(Clause);
+ if (Order->getKind() != OMPC_ORDER_concurrent)
+ Order = nullptr;
+ }
+ if (Ordered && Order)
+ break;
+ }
+
+ if (Ordered && Order) {
+ S.Diag(Order->getKindKwLoc(),
+ diag::err_omp_simple_clause_incompatible_with_ordered)
+ << getOpenMPClauseName(OMPC_order)
+ << getOpenMPSimpleClauseTypeName(OMPC_order, OMPC_ORDER_concurrent)
+ << SourceRange(Order->getBeginLoc(), Order->getEndLoc());
+ S.Diag(Ordered->getBeginLoc(), diag::note_omp_ordered_param)
+ << 0 << SourceRange(Ordered->getBeginLoc(), Ordered->getEndLoc());
+ return true;
+ }
+ return false;
+}
+
StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
ArrayRef<OMPClause *> Clauses) {
bool ErrorFound = false;
@@ -3859,7 +4278,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
SmallVector<const OMPClauseWithPreInit *, 4> PICs;
// This is required for proper codegen.
for (OMPClause *Clause : Clauses) {
- if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
+ if (!LangOpts.OpenMPSimd &&
+ isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
Clause->getClauseKind() == OMPC_in_reduction) {
// Capture taskgroup task_reduction descriptors inside the tasking regions
// with the corresponding in_reduction items.
@@ -3897,6 +4317,9 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
else if (Clause->getClauseKind() == OMPC_linear)
LCs.push_back(cast<OMPLinearClause>(Clause));
}
+ // Capture allocator expressions if used.
+ for (Expr *E : DSAStack->getInnerAllocators())
+ MarkDeclarationsReferencedInExpr(E);
// OpenMP, 2.7.1 Loop Construct, Restrictions
// The nonmonotonic modifier cannot be specified if an ordered clause is
// specified.
@@ -3908,10 +4331,18 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
Diag(SC->getFirstScheduleModifier() == OMPC_SCHEDULE_MODIFIER_nonmonotonic
? SC->getFirstScheduleModifierLoc()
: SC->getSecondScheduleModifierLoc(),
- diag::err_omp_schedule_nonmonotonic_ordered)
+ diag::err_omp_simple_clause_incompatible_with_ordered)
+ << getOpenMPClauseName(OMPC_schedule)
+ << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ OMPC_SCHEDULE_MODIFIER_nonmonotonic)
<< SourceRange(OC->getBeginLoc(), OC->getEndLoc());
ErrorFound = true;
}
+ // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions.
+ // If an order(concurrent) clause is present, an ordered clause may not appear
+ // on the same directive.
+ if (checkOrderedOrderSpecified(*this, Clauses))
+ ErrorFound = true;
if (!LCs.empty() && OC && OC->getNumForLoops()) {
for (const OMPLinearClause *C : LCs) {
Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
@@ -3952,6 +4383,21 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
}
}
}
+ if (ThisCaptureRegion == OMPD_target) {
+ // Capture allocator traits in the target region. They are used implicitly
+ // and, thus, are not captured by default.
+ for (OMPClause *C : Clauses) {
+ if (const auto *UAC = dyn_cast<OMPUsesAllocatorsClause>(C)) {
+ for (unsigned I = 0, End = UAC->getNumberOfAllocators(); I < End;
+ ++I) {
+ OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I);
+ if (Expr *E = D.AllocatorTraits)
+ MarkDeclarationsReferencedInExpr(E);
+ }
+ continue;
+ }
+ }
+ }
if (++CompletedRegions == CaptureRegions.size())
DSAStack->setBodyComplete();
SR = ActOnCapturedRegionEnd(SR.get());
@@ -3991,12 +4437,14 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
ShouldBeInParallelRegion,
ShouldBeInOrderedRegion,
ShouldBeInTargetRegion,
- ShouldBeInTeamsRegion
+ ShouldBeInTeamsRegion,
+ ShouldBeInLoopSimdRegion,
} Recommend = NoRecommend;
if (isOpenMPSimdDirective(ParentRegion) &&
((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
(SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
- CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic))) {
+ CurrentRegion != OMPD_simd && CurrentRegion != OMPD_atomic &&
+ CurrentRegion != OMPD_scan))) {
// OpenMP [2.16, Nesting of Regions]
// OpenMP constructs may not be nested inside a simd region.
// OpenMP [2.8.1,simd Construct, Restrictions]
@@ -4041,7 +4489,7 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
if (ParentRegion == OMPD_unknown &&
!isOpenMPNestingTeamsDirective(CurrentRegion) &&
CurrentRegion != OMPD_cancellation_point &&
- CurrentRegion != OMPD_cancel)
+ CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
return false;
if (CurrentRegion == OMPD_cancellation_point ||
CurrentRegion == OMPD_cancel) {
@@ -4066,7 +4514,12 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
ParentRegion == OMPD_distribute_parallel_for ||
ParentRegion == OMPD_teams_distribute_parallel_for ||
ParentRegion == OMPD_target_teams_distribute_parallel_for)) ||
- (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) ||
+ (CancelRegion == OMPD_taskgroup &&
+ (ParentRegion == OMPD_task ||
+ (SemaRef.getLangOpts().OpenMP >= 50 &&
+ (ParentRegion == OMPD_taskloop ||
+ ParentRegion == OMPD_master_taskloop ||
+ ParentRegion == OMPD_parallel_master_taskloop)))) ||
(CancelRegion == OMPD_sections &&
(ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
ParentRegion == OMPD_parallel_sections)));
@@ -4150,6 +4603,17 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
ParentRegion != OMPD_target);
OrphanSeen = ParentRegion == OMPD_unknown;
Recommend = ShouldBeInTargetRegion;
+ } else if (CurrentRegion == OMPD_scan) {
+ // OpenMP [2.16, Nesting of Regions]
+ // If specified, a teams construct must be contained within a target
+ // construct.
+ NestingProhibited =
+ SemaRef.LangOpts.OpenMP < 50 ||
+ (ParentRegion != OMPD_simd && ParentRegion != OMPD_for &&
+ ParentRegion != OMPD_for_simd && ParentRegion != OMPD_parallel_for &&
+ ParentRegion != OMPD_parallel_for_simd);
+ OrphanSeen = ParentRegion == OMPD_unknown;
+ Recommend = ShouldBeInLoopSimdRegion;
}
if (!NestingProhibited &&
!isOpenMPTargetExecutionDirective(CurrentRegion) &&
@@ -4216,7 +4680,7 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
bool ErrorFound = false;
unsigned NamedModifiersNumber = 0;
llvm::IndexedMap<const OMPIfClause *, Kind2Unsigned> FoundNameModifiers;
- FoundNameModifiers.resize(unsigned(OMPD_unknown) + 1);
+ FoundNameModifiers.resize(llvm::omp::Directive_enumSize + 1);
SmallVector<SourceLocation, 4> NameModifierLoc;
for (const OMPClause *C : Clauses) {
if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
@@ -4353,6 +4817,30 @@ static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
getCanonicalDecl(DE ? DE->getDecl() : ME->getMemberDecl()), false);
}
+namespace {
+/// Checks if the allocator is used in uses_allocators clause to be allowed in
+/// target regions.
+class AllocatorChecker final : public ConstStmtVisitor<AllocatorChecker, bool> {
+ DSAStackTy *S = nullptr;
+
+public:
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ return S->isUsesAllocatorsDecl(E->getDecl())
+ .getValueOr(
+ DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
+ DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait;
+ }
+ bool VisitStmt(const Stmt *S) {
+ for (const Stmt *Child : S->children()) {
+ if (Child && Visit(Child))
+ return true;
+ }
+ return false;
+ }
+ explicit AllocatorChecker(DSAStackTy *S) : S(S) {}
+};
+} // namespace
+
static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
ArrayRef<OMPClause *> Clauses) {
assert(!S.CurContext->isDependentContext() &&
@@ -4421,6 +4909,22 @@ static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
}
for (OMPClause *C : AllocateRange) {
auto *AC = cast<OMPAllocateClause>(C);
+ if (S.getLangOpts().OpenMP >= 50 &&
+ !Stack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>() &&
+ isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
+ AC->getAllocator()) {
+ Expr *Allocator = AC->getAllocator();
+ // OpenMP, 2.12.5 target Construct
+ // Memory allocators that do not appear in a uses_allocators clause cannot
+ // appear as an allocator in an allocate clause or be used in the target
+ // region unless a requires directive with the dynamic_allocators clause
+ // is present in the same compilation unit.
+ AllocatorChecker Checker(Stack);
+ if (Checker.Visit(Allocator))
+ S.Diag(Allocator->getExprLoc(),
+ diag::err_omp_allocator_not_in_uses_allocators)
+ << Allocator->getSourceRange();
+ }
OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
getAllocatorKind(S, Stack, AC->getAllocator());
// OpenMP, 2.11.4 allocate Clause, Restrictions.
@@ -4513,6 +5017,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (E)
ImplicitFirstprivates.emplace_back(E);
}
+ // OpenMP 5.0, 2.10.1 task Construct
+ // [detach clause]... The event-handle will be considered as if it was
+ // specified on a firstprivate clause.
+ if (auto *DC = dyn_cast<OMPDetachClause>(C))
+ ImplicitFirstprivates.push_back(DC->getEventHandler());
}
if (!ImplicitFirstprivates.empty()) {
if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause(
@@ -4648,6 +5157,16 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
"No associated statement allowed for 'omp flush' directive");
Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc);
break;
+ case OMPD_depobj:
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp depobj' directive");
+ Res = ActOnOpenMPDepobjDirective(ClausesWithImplicit, StartLoc, EndLoc);
+ break;
+ case OMPD_scan:
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp scan' directive");
+ Res = ActOnOpenMPScanDirective(ClausesWithImplicit, StartLoc, EndLoc);
+ break;
case OMPD_ordered:
Res = ActOnOpenMPOrderedDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
@@ -4848,15 +5367,20 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_declare_simd:
case OMPD_requires:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
ErrorFound = Res.isInvalid() || ErrorFound;
- // Check variables in the clauses if default(none) was specified.
- if (DSAStack->getDefaultDSA() == DSA_none) {
+ // Check variables in the clauses if default(none) or
+ // default(firstprivate) was specified.
+ if (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_firstprivate) {
DSAAttrChecker DSAChecker(DSAStack, *this, nullptr);
for (OMPClause *C : Clauses) {
switch (C->getClauseKind()) {
@@ -4876,6 +5400,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
break;
continue;
case OMPC_schedule:
+ case OMPC_detach:
break;
case OMPC_grainsize:
case OMPC_num_tasks:
@@ -4915,6 +5440,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_threads:
case OMPC_simd:
@@ -4924,11 +5453,19 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
continue;
case OMPC_allocator:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_threadprivate:
case OMPC_uniform:
case OMPC_unknown:
@@ -4939,6 +5476,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_atomic_default_mem_order:
case OMPC_device_type:
case OMPC_match:
+ default:
llvm_unreachable("Unexpected clause");
}
for (Stmt *CC : C->children()) {
@@ -4946,14 +5484,15 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
DSAChecker.Visit(CC);
}
}
- for (auto &P : DSAChecker.getVarsWithInheritedDSA())
+ for (const auto &P : DSAChecker.getVarsWithInheritedDSA())
VarsWithInheritedDSA[P.getFirst()] = P.getSecond();
}
for (const auto &P : VarsWithInheritedDSA) {
if (P.getFirst()->isImplicit() || isa<OMPCapturedExprDecl>(P.getFirst()))
continue;
ErrorFound = true;
- if (DSAStack->getDefaultDSA() == DSA_none) {
+ if (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_firstprivate) {
Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
<< P.first << P.second->getSourceRange();
Diag(DSAStack->getDefaultDSALocation(), diag::note_omp_default_dsa_none);
@@ -4973,12 +5512,6 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (ErrorFound)
return StmtError();
- if (!(Res.getAs<OMPExecutableDirective>()->isStandaloneDirective())) {
- Res.getAs<OMPExecutableDirective>()
- ->getStructuredBlock()
- ->setIsOMPStructuredBlock(true);
- }
-
if (!CurContext->isDependentContext() &&
isOpenMPTargetExecutionDirective(Kind) &&
!(DSAStack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
@@ -5166,7 +5699,8 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
E->containsUnexpandedParameterPack())
continue;
(void)CheckOpenMPLinearDecl(CanonPVD, E->getExprLoc(), LinKind,
- PVD->getOriginalType());
+ PVD->getOriginalType(),
+ /*IsDeclareSimd=*/true);
continue;
}
}
@@ -5186,7 +5720,7 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
E->isInstantiationDependent() || E->containsUnexpandedParameterPack())
continue;
(void)CheckOpenMPLinearDecl(/*D=*/nullptr, E->getExprLoc(), LinKind,
- E->getType());
+ E->getType(), /*IsDeclareSimd=*/true);
continue;
}
Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause)
@@ -5264,9 +5798,170 @@ static void setPrototype(Sema &S, FunctionDecl *FD, FunctionDecl *FDWithProto,
FD->setParams(Params);
}
+Sema::OMPDeclareVariantScope::OMPDeclareVariantScope(OMPTraitInfo &TI)
+ : TI(&TI), NameSuffix(TI.getMangledName()) {}
+
+FunctionDecl *
+Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
+ Declarator &D) {
+ IdentifierInfo *BaseII = D.getIdentifier();
+ LookupResult Lookup(*this, DeclarationName(BaseII), D.getIdentifierLoc(),
+ LookupOrdinaryName);
+ LookupParsedName(Lookup, S, &D.getCXXScopeSpec());
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType FType = TInfo->getType();
+
+ bool IsConstexpr = D.getDeclSpec().getConstexprSpecifier() == CSK_constexpr;
+ bool IsConsteval = D.getDeclSpec().getConstexprSpecifier() == CSK_consteval;
+
+ FunctionDecl *BaseFD = nullptr;
+ for (auto *Candidate : Lookup) {
+ auto *UDecl = dyn_cast<FunctionDecl>(Candidate->getUnderlyingDecl());
+ if (!UDecl)
+ continue;
+
+ // Don't specialize constexpr/consteval functions with
+ // non-constexpr/consteval functions.
+ if (UDecl->isConstexpr() && !IsConstexpr)
+ continue;
+ if (UDecl->isConsteval() && !IsConsteval)
+ continue;
+
+ QualType NewType = Context.mergeFunctionTypes(
+ FType, UDecl->getType(), /* OfBlockPointer */ false,
+ /* Unqualified */ false, /* AllowCXX */ true);
+ if (NewType.isNull())
+ continue;
+
+ // Found a base!
+ BaseFD = UDecl;
+ break;
+ }
+ if (!BaseFD) {
+ BaseFD = cast<FunctionDecl>(ActOnDeclarator(S, D));
+ BaseFD->setImplicit(true);
+ }
+
+ OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
+ std::string MangledName;
+ MangledName += D.getIdentifier()->getName();
+ MangledName += getOpenMPVariantManglingSeparatorStr();
+ MangledName += DVScope.NameSuffix;
+ IdentifierInfo &VariantII = Context.Idents.get(MangledName);
+
+ VariantII.setMangledOpenMPVariantName(true);
+ D.SetIdentifier(&VariantII, D.getBeginLoc());
+ return BaseFD;
+}
+
+void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
+ FunctionDecl *FD, FunctionDecl *BaseFD) {
+ // Do not mark function as is used to prevent its emission if this is the
+ // only place where it is used.
+ EnterExpressionEvaluationContext Unevaluated(
+ *this, Sema::ExpressionEvaluationContext::Unevaluated);
+
+ Expr *VariantFuncRef = DeclRefExpr::Create(
+ Context, NestedNameSpecifierLoc(), SourceLocation(), FD,
+ /* RefersToEnclosingVariableOrCapture */ false,
+ /* NameLoc */ FD->getLocation(), FD->getType(), ExprValueKind::VK_RValue);
+
+ OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
+ auto *OMPDeclareVariantA = OMPDeclareVariantAttr::CreateImplicit(
+ Context, VariantFuncRef, DVScope.TI);
+ BaseFD->addAttr(OMPDeclareVariantA);
+}
+
+ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
+ SourceLocation LParenLoc,
+ MultiExprArg ArgExprs,
+ SourceLocation RParenLoc, Expr *ExecConfig) {
+ // The common case is a regular call we do not want to specialize at all. Try
+ // to make that case fast by bailing early.
+ CallExpr *CE = dyn_cast<CallExpr>(Call.get());
+ if (!CE)
+ return Call;
+
+ FunctionDecl *CalleeFnDecl = CE->getDirectCallee();
+ if (!CalleeFnDecl)
+ return Call;
+
+ if (!CalleeFnDecl->hasAttr<OMPDeclareVariantAttr>())
+ return Call;
+
+ ASTContext &Context = getASTContext();
+ OMPContext OMPCtx(getLangOpts().OpenMPIsDevice,
+ Context.getTargetInfo().getTriple());
+
+ SmallVector<Expr *, 4> Exprs;
+ SmallVector<VariantMatchInfo, 4> VMIs;
+ while (CalleeFnDecl) {
+ for (OMPDeclareVariantAttr *A :
+ CalleeFnDecl->specific_attrs<OMPDeclareVariantAttr>()) {
+ Expr *VariantRef = A->getVariantFuncRef();
+
+ VariantMatchInfo VMI;
+ OMPTraitInfo &TI = A->getTraitInfo();
+ TI.getAsVariantMatchInfo(Context, VMI);
+ if (!isVariantApplicableInContext(VMI, OMPCtx, /* DeviceSetOnly */ false))
+ continue;
+
+ VMIs.push_back(VMI);
+ Exprs.push_back(VariantRef);
+ }
+
+ CalleeFnDecl = CalleeFnDecl->getPreviousDecl();
+ }
+
+ ExprResult NewCall;
+ do {
+ int BestIdx = getBestVariantMatchForContext(VMIs, OMPCtx);
+ if (BestIdx < 0)
+ return Call;
+ Expr *BestExpr = cast<DeclRefExpr>(Exprs[BestIdx]);
+ Decl *BestDecl = cast<DeclRefExpr>(BestExpr)->getDecl();
+
+ {
+ // Try to build a (member) call expression for the current best applicable
+ // variant expression. We allow this to fail in which case we continue
+ // with the next best variant expression. The fail case is part of the
+ // implementation defined behavior in the OpenMP standard when it talks
+ // about what differences in the function prototypes: "Any differences
+ // that the specific OpenMP context requires in the prototype of the
+ // variant from the base function prototype are implementation defined."
+ // This wording is there to allow the specialized variant to have a
+ // different type than the base function. This is intended and OK but if
+ // we cannot create a call the difference is not in the "implementation
+ // defined range" we allow.
+ Sema::TentativeAnalysisScope Trap(*this);
+
+ if (auto *SpecializedMethod = dyn_cast<CXXMethodDecl>(BestDecl)) {
+ auto *MemberCall = dyn_cast<CXXMemberCallExpr>(CE);
+ BestExpr = MemberExpr::CreateImplicit(
+ Context, MemberCall->getImplicitObjectArgument(),
+ /* IsArrow */ false, SpecializedMethod, Context.BoundMemberTy,
+ MemberCall->getValueKind(), MemberCall->getObjectKind());
+ }
+ NewCall = BuildCallExpr(Scope, BestExpr, LParenLoc, ArgExprs, RParenLoc,
+ ExecConfig);
+ if (NewCall.isUsable())
+ break;
+ }
+
+ VMIs.erase(VMIs.begin() + BestIdx);
+ Exprs.erase(Exprs.begin() + BestIdx);
+ } while (!VMIs.empty());
+
+ if (!NewCall.isUsable())
+ return Call;
+ return PseudoObjectExpr::Create(Context, CE, {NewCall.get()}, 0);
+}
+
Optional<std::pair<FunctionDecl *, Expr *>>
Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
- Expr *VariantRef, SourceRange SR) {
+ Expr *VariantRef, OMPTraitInfo &TI,
+ SourceRange SR) {
if (!DG || DG.get().isNull())
return None;
@@ -5319,12 +6014,41 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
return None;
}
+ auto ShouldDelayChecks = [](Expr *&E, bool) {
+ return E && (E->isTypeDependent() || E->isValueDependent() ||
+ E->containsUnexpandedParameterPack() ||
+ E->isInstantiationDependent());
+ };
// Do not check templates, wait until instantiation.
- if (VariantRef->isTypeDependent() || VariantRef->isValueDependent() ||
- VariantRef->containsUnexpandedParameterPack() ||
- VariantRef->isInstantiationDependent() || FD->isDependentContext())
+ if (FD->isDependentContext() || ShouldDelayChecks(VariantRef, false) ||
+ TI.anyScoreOrCondition(ShouldDelayChecks))
return std::make_pair(FD, VariantRef);
+ // Deal with non-constant score and user condition expressions.
+ auto HandleNonConstantScoresAndConditions = [this](Expr *&E,
+ bool IsScore) -> bool {
+ llvm::APSInt Result;
+ if (!E || E->isIntegerConstantExpr(Result, Context))
+ return false;
+
+ if (IsScore) {
+ // We warn on non-constant scores and pretend they were not present.
+ Diag(E->getExprLoc(), diag::warn_omp_declare_variant_score_not_constant)
+ << E;
+ E = nullptr;
+ } else {
+ // We could replace a non-constant user condition with "false" but we
+ // will soon need to handle these anyway for the dynamic version of
+ // OpenMP context selectors.
+ Diag(E->getExprLoc(),
+ diag::err_omp_declare_variant_user_condition_not_constant)
+ << E;
+ }
+ return true;
+ };
+ if (TI.anyScoreOrCondition(HandleNonConstantScoresAndConditions))
+ return None;
+
// Convert VariantRef expression to the type of the original function to
// resolve possible conflicts.
ExprResult VariantRefCast;
@@ -5355,7 +6079,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
ImplicitConversionSequence ICS =
TryImplicitConversion(VariantRef, FnPtrType.getUnqualifiedType(),
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false);
@@ -5497,94 +6221,13 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
return std::make_pair(FD, cast<Expr>(DRE));
}
-void Sema::ActOnOpenMPDeclareVariantDirective(
- FunctionDecl *FD, Expr *VariantRef, SourceRange SR,
- ArrayRef<OMPCtxSelectorData> Data) {
- if (Data.empty())
- return;
- SmallVector<Expr *, 4> CtxScores;
- SmallVector<unsigned, 4> CtxSets;
- SmallVector<unsigned, 4> Ctxs;
- SmallVector<StringRef, 4> ImplVendors, DeviceKinds;
- bool IsError = false;
- for (const OMPCtxSelectorData &D : Data) {
- OpenMPContextSelectorSetKind CtxSet = D.CtxSet;
- OpenMPContextSelectorKind Ctx = D.Ctx;
- if (CtxSet == OMP_CTX_SET_unknown || Ctx == OMP_CTX_unknown)
- return;
- Expr *Score = nullptr;
- if (D.Score.isUsable()) {
- Score = D.Score.get();
- if (!Score->isTypeDependent() && !Score->isValueDependent() &&
- !Score->isInstantiationDependent() &&
- !Score->containsUnexpandedParameterPack()) {
- Score =
- PerformOpenMPImplicitIntegerConversion(Score->getExprLoc(), Score)
- .get();
- if (Score)
- Score = VerifyIntegerConstantExpression(Score).get();
- }
- } else {
- // OpenMP 5.0, 2.3.3 Matching and Scoring Context Selectors.
- // The kind, arch, and isa selectors are given the values 2^l, 2^(l+1) and
- // 2^(l+2), respectively, where l is the number of traits in the construct
- // set.
- // TODO: implement correct logic for isa and arch traits.
- // TODO: take the construct context set into account when it is
- // implemented.
- int L = 0; // Currently set the number of traits in construct set to 0,
- // since the construct trait set in not supported yet.
- if (CtxSet == OMP_CTX_SET_device && Ctx == OMP_CTX_kind)
- Score = ActOnIntegerConstant(SourceLocation(), std::pow(2, L)).get();
- else
- Score = ActOnIntegerConstant(SourceLocation(), 0).get();
- }
- switch (Ctx) {
- case OMP_CTX_vendor:
- assert(CtxSet == OMP_CTX_SET_implementation &&
- "Expected implementation context selector set.");
- ImplVendors.append(D.Names.begin(), D.Names.end());
- break;
- case OMP_CTX_kind:
- assert(CtxSet == OMP_CTX_SET_device &&
- "Expected device context selector set.");
- DeviceKinds.append(D.Names.begin(), D.Names.end());
- break;
- case OMP_CTX_unknown:
- llvm_unreachable("Unknown context selector kind.");
- }
- IsError = IsError || !Score;
- CtxSets.push_back(CtxSet);
- Ctxs.push_back(Ctx);
- CtxScores.push_back(Score);
- }
- if (!IsError) {
- auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit(
- Context, VariantRef, CtxScores.begin(), CtxScores.size(),
- CtxSets.begin(), CtxSets.size(), Ctxs.begin(), Ctxs.size(),
- ImplVendors.begin(), ImplVendors.size(), DeviceKinds.begin(),
- DeviceKinds.size(), SR);
- FD->addAttr(NewAttr);
- }
-}
-
-void Sema::markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
- FunctionDecl *Func,
- bool MightBeOdrUse) {
- assert(LangOpts.OpenMP && "Expected OpenMP mode.");
-
- if (!Func->isDependentContext() && Func->hasAttrs()) {
- for (OMPDeclareVariantAttr *A :
- Func->specific_attrs<OMPDeclareVariantAttr>()) {
- // TODO: add checks for active OpenMP context where possible.
- Expr *VariantRef = A->getVariantFuncRef();
- auto *DRE = cast<DeclRefExpr>(VariantRef->IgnoreParenImpCasts());
- auto *F = cast<FunctionDecl>(DRE->getDecl());
- if (!F->isDefined() && F->isTemplateInstantiation())
- InstantiateFunctionDefinition(Loc, F->getFirstDecl());
- MarkFunctionReferenced(Loc, F, MightBeOdrUse);
- }
- }
+void Sema::ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD,
+ Expr *VariantRef,
+ OMPTraitInfo &TI,
+ SourceRange SR) {
+ auto *NewAttr =
+ OMPDeclareVariantAttr::CreateImplicit(Context, VariantRef, &TI, SR);
+ FD->addAttr(NewAttr);
}
StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
@@ -5605,6 +6248,7 @@ StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(),
DSAStack->isCancelRegion());
}
@@ -6300,8 +6944,8 @@ bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
static ExprResult
tryBuildCapture(Sema &SemaRef, Expr *Capture,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
- if (SemaRef.CurContext->isDependentContext())
- return ExprResult(Capture);
+ if (SemaRef.CurContext->isDependentContext() || Capture->containsErrors())
+ return Capture;
if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
return SemaRef.PerformImplicitConversion(
Capture->IgnoreImpCasts(), Capture->getType(), Sema::AA_Converting,
@@ -6315,221 +6959,344 @@ tryBuildCapture(Sema &SemaRef, Expr *Capture,
return Res;
}
-/// Build the expression to calculate the number of iterations.
-Expr *OpenMPIterationSpaceChecker::buildNumIterations(
- Scope *S, ArrayRef<LoopIterationSpace> ResultIterSpaces, bool LimitedType,
- llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
- ExprResult Diff;
- QualType VarType = LCDecl->getType().getNonReferenceType();
- if (VarType->isIntegerType() || VarType->isPointerType() ||
- SemaRef.getLangOpts().CPlusPlus) {
- Expr *LBVal = LB;
- Expr *UBVal = UB;
- // LB = TestIsLessOp.getValue() ? min(LB(MinVal), LB(MaxVal)) :
- // max(LB(MinVal), LB(MaxVal))
- if (InitDependOnLC) {
- const LoopIterationSpace &IS =
- ResultIterSpaces[ResultIterSpaces.size() - 1 -
- InitDependOnLC.getValueOr(
- CondDependOnLC.getValueOr(0))];
- if (!IS.MinValue || !IS.MaxValue)
- return nullptr;
- // OuterVar = Min
- ExprResult MinValue =
- SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MinValue);
- if (!MinValue.isUsable())
- return nullptr;
-
- ExprResult LBMinVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
- IS.CounterVar, MinValue.get());
- if (!LBMinVal.isUsable())
- return nullptr;
- // OuterVar = Min, LBVal
- LBMinVal =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, LBMinVal.get(), LBVal);
- if (!LBMinVal.isUsable())
- return nullptr;
- // (OuterVar = Min, LBVal)
- LBMinVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, LBMinVal.get());
- if (!LBMinVal.isUsable())
- return nullptr;
-
- // OuterVar = Max
- ExprResult MaxValue =
- SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MaxValue);
- if (!MaxValue.isUsable())
- return nullptr;
-
- ExprResult LBMaxVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
- IS.CounterVar, MaxValue.get());
- if (!LBMaxVal.isUsable())
- return nullptr;
- // OuterVar = Max, LBVal
- LBMaxVal =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, LBMaxVal.get(), LBVal);
- if (!LBMaxVal.isUsable())
- return nullptr;
- // (OuterVar = Max, LBVal)
- LBMaxVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, LBMaxVal.get());
- if (!LBMaxVal.isUsable())
- return nullptr;
-
- Expr *LBMin = tryBuildCapture(SemaRef, LBMinVal.get(), Captures).get();
- Expr *LBMax = tryBuildCapture(SemaRef, LBMaxVal.get(), Captures).get();
- if (!LBMin || !LBMax)
- return nullptr;
- // LB(MinVal) < LB(MaxVal)
- ExprResult MinLessMaxRes =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_LT, LBMin, LBMax);
- if (!MinLessMaxRes.isUsable())
- return nullptr;
- Expr *MinLessMax =
- tryBuildCapture(SemaRef, MinLessMaxRes.get(), Captures).get();
- if (!MinLessMax)
- return nullptr;
- if (TestIsLessOp.getValue()) {
- // LB(MinVal) < LB(MaxVal) ? LB(MinVal) : LB(MaxVal) - min(LB(MinVal),
- // LB(MaxVal))
- ExprResult MinLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
- MinLessMax, LBMin, LBMax);
- if (!MinLB.isUsable())
- return nullptr;
- LBVal = MinLB.get();
- } else {
- // LB(MinVal) < LB(MaxVal) ? LB(MaxVal) : LB(MinVal) - max(LB(MinVal),
- // LB(MaxVal))
- ExprResult MaxLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
- MinLessMax, LBMax, LBMin);
- if (!MaxLB.isUsable())
- return nullptr;
- LBVal = MaxLB.get();
- }
+/// Calculate number of iterations, transforming to unsigned, if number of
+/// iterations may be larger than the original type.
+static Expr *
+calculateNumIters(Sema &SemaRef, Scope *S, SourceLocation DefaultLoc,
+ Expr *Lower, Expr *Upper, Expr *Step, QualType LCTy,
+ bool TestIsStrictOp, bool RoundToStep,
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
+ ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
+ if (!NewStep.isUsable())
+ return nullptr;
+ llvm::APSInt LRes, URes, SRes;
+ bool IsLowerConst = Lower->isIntegerConstantExpr(LRes, SemaRef.Context);
+ bool IsStepConst = Step->isIntegerConstantExpr(SRes, SemaRef.Context);
+ bool NoNeedToConvert = IsLowerConst && !RoundToStep &&
+ ((!TestIsStrictOp && LRes.isNonNegative()) ||
+ (TestIsStrictOp && LRes.isStrictlyPositive()));
+ bool NeedToReorganize = false;
+ // Check if any subexpressions in Lower -Step [+ 1] lead to overflow.
+ if (!NoNeedToConvert && IsLowerConst &&
+ (TestIsStrictOp || (RoundToStep && IsStepConst))) {
+ NoNeedToConvert = true;
+ if (RoundToStep) {
+ unsigned BW = LRes.getBitWidth() > SRes.getBitWidth()
+ ? LRes.getBitWidth()
+ : SRes.getBitWidth();
+ LRes = LRes.extend(BW + 1);
+ LRes.setIsSigned(true);
+ SRes = SRes.extend(BW + 1);
+ SRes.setIsSigned(true);
+ LRes -= SRes;
+ NoNeedToConvert = LRes.trunc(BW).extend(BW + 1) == LRes;
+ LRes = LRes.trunc(BW);
+ }
+ if (TestIsStrictOp) {
+ unsigned BW = LRes.getBitWidth();
+ LRes = LRes.extend(BW + 1);
+ LRes.setIsSigned(true);
+ ++LRes;
+ NoNeedToConvert =
+ NoNeedToConvert && LRes.trunc(BW).extend(BW + 1) == LRes;
+ // truncate to the original bitwidth.
+ LRes = LRes.trunc(BW);
+ }
+ NeedToReorganize = NoNeedToConvert;
+ }
+ bool IsUpperConst = Upper->isIntegerConstantExpr(URes, SemaRef.Context);
+ if (NoNeedToConvert && IsLowerConst && IsUpperConst &&
+ (!RoundToStep || IsStepConst)) {
+ unsigned BW = LRes.getBitWidth() > URes.getBitWidth() ? LRes.getBitWidth()
+ : URes.getBitWidth();
+ LRes = LRes.extend(BW + 1);
+ LRes.setIsSigned(true);
+ URes = URes.extend(BW + 1);
+ URes.setIsSigned(true);
+ URes -= LRes;
+ NoNeedToConvert = URes.trunc(BW).extend(BW + 1) == URes;
+ NeedToReorganize = NoNeedToConvert;
+ }
+ // If the boundaries are not constant or (Lower - Step [+ 1]) is not constant
+ // or less than zero (Upper - (Lower - Step [+ 1]) may overflow) - promote to
+ // unsigned.
+ if ((!NoNeedToConvert || (LRes.isNegative() && !IsUpperConst)) &&
+ !LCTy->isDependentType() && LCTy->isIntegerType()) {
+ QualType LowerTy = Lower->getType();
+ QualType UpperTy = Upper->getType();
+ uint64_t LowerSize = SemaRef.Context.getTypeSize(LowerTy);
+ uint64_t UpperSize = SemaRef.Context.getTypeSize(UpperTy);
+ if ((LowerSize <= UpperSize && UpperTy->hasSignedIntegerRepresentation()) ||
+ (LowerSize > UpperSize && LowerTy->hasSignedIntegerRepresentation())) {
+ QualType CastType = SemaRef.Context.getIntTypeForBitwidth(
+ LowerSize > UpperSize ? LowerSize : UpperSize, /*Signed=*/0);
+ Upper =
+ SemaRef
+ .PerformImplicitConversion(
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Upper).get(),
+ CastType, Sema::AA_Converting)
+ .get();
+ Lower = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Lower).get();
+ NewStep = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, NewStep.get());
}
- // UB = TestIsLessOp.getValue() ? max(UB(MinVal), UB(MaxVal)) :
- // min(UB(MinVal), UB(MaxVal))
- if (CondDependOnLC) {
- const LoopIterationSpace &IS =
- ResultIterSpaces[ResultIterSpaces.size() - 1 -
- InitDependOnLC.getValueOr(
- CondDependOnLC.getValueOr(0))];
- if (!IS.MinValue || !IS.MaxValue)
- return nullptr;
- // OuterVar = Min
- ExprResult MinValue =
- SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MinValue);
- if (!MinValue.isUsable())
- return nullptr;
-
- ExprResult UBMinVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
- IS.CounterVar, MinValue.get());
- if (!UBMinVal.isUsable())
- return nullptr;
- // OuterVar = Min, UBVal
- UBMinVal =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, UBMinVal.get(), UBVal);
- if (!UBMinVal.isUsable())
- return nullptr;
- // (OuterVar = Min, UBVal)
- UBMinVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, UBMinVal.get());
- if (!UBMinVal.isUsable())
- return nullptr;
+ }
+ if (!Lower || !Upper || NewStep.isInvalid())
+ return nullptr;
- // OuterVar = Max
- ExprResult MaxValue =
- SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MaxValue);
- if (!MaxValue.isUsable())
+ ExprResult Diff;
+ // If need to reorganize, then calculate the form as Upper - (Lower - Step [+
+ // 1]).
+ if (NeedToReorganize) {
+ Diff = Lower;
+
+ if (RoundToStep) {
+ // Lower - Step
+ Diff =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Diff.get(), NewStep.get());
+ if (!Diff.isUsable())
return nullptr;
+ }
- ExprResult UBMaxVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
- IS.CounterVar, MaxValue.get());
- if (!UBMaxVal.isUsable())
- return nullptr;
- // OuterVar = Max, UBVal
- UBMaxVal =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, UBMaxVal.get(), UBVal);
- if (!UBMaxVal.isUsable())
- return nullptr;
- // (OuterVar = Max, UBVal)
- UBMaxVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, UBMaxVal.get());
- if (!UBMaxVal.isUsable())
- return nullptr;
+ // Lower - Step [+ 1]
+ if (TestIsStrictOp)
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Add, Diff.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!Diff.isUsable())
+ return nullptr;
- Expr *UBMin = tryBuildCapture(SemaRef, UBMinVal.get(), Captures).get();
- Expr *UBMax = tryBuildCapture(SemaRef, UBMaxVal.get(), Captures).get();
- if (!UBMin || !UBMax)
- return nullptr;
- // UB(MinVal) > UB(MaxVal)
- ExprResult MinGreaterMaxRes =
- SemaRef.BuildBinOp(S, DefaultLoc, BO_GT, UBMin, UBMax);
- if (!MinGreaterMaxRes.isUsable())
- return nullptr;
- Expr *MinGreaterMax =
- tryBuildCapture(SemaRef, MinGreaterMaxRes.get(), Captures).get();
- if (!MinGreaterMax)
- return nullptr;
- if (TestIsLessOp.getValue()) {
- // UB(MinVal) > UB(MaxVal) ? UB(MinVal) : UB(MaxVal) - max(UB(MinVal),
- // UB(MaxVal))
- ExprResult MaxUB = SemaRef.ActOnConditionalOp(
- DefaultLoc, DefaultLoc, MinGreaterMax, UBMin, UBMax);
- if (!MaxUB.isUsable())
- return nullptr;
- UBVal = MaxUB.get();
- } else {
- // UB(MinVal) > UB(MaxVal) ? UB(MaxVal) : UB(MinVal) - min(UB(MinVal),
- // UB(MaxVal))
- ExprResult MinUB = SemaRef.ActOnConditionalOp(
- DefaultLoc, DefaultLoc, MinGreaterMax, UBMax, UBMin);
- if (!MinUB.isUsable())
- return nullptr;
- UBVal = MinUB.get();
- }
- }
- // Upper - Lower
- Expr *UBExpr = TestIsLessOp.getValue() ? UBVal : LBVal;
- Expr *LBExpr = TestIsLessOp.getValue() ? LBVal : UBVal;
- Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
- Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
- if (!Upper || !Lower)
+ Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
+ if (!Diff.isUsable())
return nullptr;
+ // Upper - (Lower - Step [+ 1]).
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Diff.get());
+ if (!Diff.isUsable())
+ return nullptr;
+ } else {
Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
- if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
+ if (!Diff.isUsable() && LCTy->getAsCXXRecordDecl()) {
// BuildBinOp already emitted error, this one is to point user to upper
// and lower bound, and to tell what is passed to 'operator-'.
SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
<< Upper->getSourceRange() << Lower->getSourceRange();
return nullptr;
}
+
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // Upper - Lower [- 1]
+ if (TestIsStrictOp)
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Sub, Diff.get(),
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ if (RoundToStep) {
+ // Upper - Lower [- 1] + Step
+ Diff =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
+ if (!Diff.isUsable())
+ return nullptr;
+ }
}
+ // Parentheses (for dumping/debugging purposes only).
+ Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
if (!Diff.isUsable())
return nullptr;
- // Upper - Lower [- 1]
- if (TestIsStrictOp)
- Diff = SemaRef.BuildBinOp(
- S, DefaultLoc, BO_Sub, Diff.get(),
- SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ // (Upper - Lower [- 1] + Step) / Step or (Upper - Lower) / Step
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
if (!Diff.isUsable())
return nullptr;
- // Upper - Lower [- 1] + Step
- ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
- if (!NewStep.isUsable())
- return nullptr;
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
- if (!Diff.isUsable())
+ return Diff.get();
+}
+
+/// Build the expression to calculate the number of iterations.
+Expr *OpenMPIterationSpaceChecker::buildNumIterations(
+ Scope *S, ArrayRef<LoopIterationSpace> ResultIterSpaces, bool LimitedType,
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
+ QualType VarType = LCDecl->getType().getNonReferenceType();
+ if (!VarType->isIntegerType() && !VarType->isPointerType() &&
+ !SemaRef.getLangOpts().CPlusPlus)
return nullptr;
+ Expr *LBVal = LB;
+ Expr *UBVal = UB;
+ // LB = TestIsLessOp.getValue() ? min(LB(MinVal), LB(MaxVal)) :
+ // max(LB(MinVal), LB(MaxVal))
+ if (InitDependOnLC) {
+ const LoopIterationSpace &IS =
+ ResultIterSpaces[ResultIterSpaces.size() - 1 -
+ InitDependOnLC.getValueOr(
+ CondDependOnLC.getValueOr(0))];
+ if (!IS.MinValue || !IS.MaxValue)
+ return nullptr;
+ // OuterVar = Min
+ ExprResult MinValue =
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MinValue);
+ if (!MinValue.isUsable())
+ return nullptr;
- // Parentheses (for dumping/debugging purposes only).
- Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
- if (!Diff.isUsable())
+ ExprResult LBMinVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
+ IS.CounterVar, MinValue.get());
+ if (!LBMinVal.isUsable())
+ return nullptr;
+ // OuterVar = Min, LBVal
+ LBMinVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, LBMinVal.get(), LBVal);
+ if (!LBMinVal.isUsable())
+ return nullptr;
+ // (OuterVar = Min, LBVal)
+ LBMinVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, LBMinVal.get());
+ if (!LBMinVal.isUsable())
+ return nullptr;
+
+ // OuterVar = Max
+ ExprResult MaxValue =
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MaxValue);
+ if (!MaxValue.isUsable())
+ return nullptr;
+
+ ExprResult LBMaxVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
+ IS.CounterVar, MaxValue.get());
+ if (!LBMaxVal.isUsable())
+ return nullptr;
+ // OuterVar = Max, LBVal
+ LBMaxVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, LBMaxVal.get(), LBVal);
+ if (!LBMaxVal.isUsable())
+ return nullptr;
+ // (OuterVar = Max, LBVal)
+ LBMaxVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, LBMaxVal.get());
+ if (!LBMaxVal.isUsable())
+ return nullptr;
+
+ Expr *LBMin = tryBuildCapture(SemaRef, LBMinVal.get(), Captures).get();
+ Expr *LBMax = tryBuildCapture(SemaRef, LBMaxVal.get(), Captures).get();
+ if (!LBMin || !LBMax)
+ return nullptr;
+ // LB(MinVal) < LB(MaxVal)
+ ExprResult MinLessMaxRes =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_LT, LBMin, LBMax);
+ if (!MinLessMaxRes.isUsable())
+ return nullptr;
+ Expr *MinLessMax =
+ tryBuildCapture(SemaRef, MinLessMaxRes.get(), Captures).get();
+ if (!MinLessMax)
+ return nullptr;
+ if (TestIsLessOp.getValue()) {
+ // LB(MinVal) < LB(MaxVal) ? LB(MinVal) : LB(MaxVal) - min(LB(MinVal),
+ // LB(MaxVal))
+ ExprResult MinLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
+ MinLessMax, LBMin, LBMax);
+ if (!MinLB.isUsable())
+ return nullptr;
+ LBVal = MinLB.get();
+ } else {
+ // LB(MinVal) < LB(MaxVal) ? LB(MaxVal) : LB(MinVal) - max(LB(MinVal),
+ // LB(MaxVal))
+ ExprResult MaxLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
+ MinLessMax, LBMax, LBMin);
+ if (!MaxLB.isUsable())
+ return nullptr;
+ LBVal = MaxLB.get();
+ }
+ }
+ // UB = TestIsLessOp.getValue() ? max(UB(MinVal), UB(MaxVal)) :
+ // min(UB(MinVal), UB(MaxVal))
+ if (CondDependOnLC) {
+ const LoopIterationSpace &IS =
+ ResultIterSpaces[ResultIterSpaces.size() - 1 -
+ InitDependOnLC.getValueOr(
+ CondDependOnLC.getValueOr(0))];
+ if (!IS.MinValue || !IS.MaxValue)
+ return nullptr;
+ // OuterVar = Min
+ ExprResult MinValue =
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MinValue);
+ if (!MinValue.isUsable())
+ return nullptr;
+
+ ExprResult UBMinVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
+ IS.CounterVar, MinValue.get());
+ if (!UBMinVal.isUsable())
+ return nullptr;
+ // OuterVar = Min, UBVal
+ UBMinVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, UBMinVal.get(), UBVal);
+ if (!UBMinVal.isUsable())
+ return nullptr;
+ // (OuterVar = Min, UBVal)
+ UBMinVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, UBMinVal.get());
+ if (!UBMinVal.isUsable())
+ return nullptr;
+
+ // OuterVar = Max
+ ExprResult MaxValue =
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, IS.MaxValue);
+ if (!MaxValue.isUsable())
+ return nullptr;
+
+ ExprResult UBMaxVal = SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign,
+ IS.CounterVar, MaxValue.get());
+ if (!UBMaxVal.isUsable())
+ return nullptr;
+ // OuterVar = Max, UBVal
+ UBMaxVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Comma, UBMaxVal.get(), UBVal);
+ if (!UBMaxVal.isUsable())
+ return nullptr;
+ // (OuterVar = Max, UBVal)
+ UBMaxVal = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, UBMaxVal.get());
+ if (!UBMaxVal.isUsable())
+ return nullptr;
+
+ Expr *UBMin = tryBuildCapture(SemaRef, UBMinVal.get(), Captures).get();
+ Expr *UBMax = tryBuildCapture(SemaRef, UBMaxVal.get(), Captures).get();
+ if (!UBMin || !UBMax)
+ return nullptr;
+ // UB(MinVal) > UB(MaxVal)
+ ExprResult MinGreaterMaxRes =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_GT, UBMin, UBMax);
+ if (!MinGreaterMaxRes.isUsable())
+ return nullptr;
+ Expr *MinGreaterMax =
+ tryBuildCapture(SemaRef, MinGreaterMaxRes.get(), Captures).get();
+ if (!MinGreaterMax)
+ return nullptr;
+ if (TestIsLessOp.getValue()) {
+ // UB(MinVal) > UB(MaxVal) ? UB(MinVal) : UB(MaxVal) - max(UB(MinVal),
+ // UB(MaxVal))
+ ExprResult MaxUB = SemaRef.ActOnConditionalOp(
+ DefaultLoc, DefaultLoc, MinGreaterMax, UBMin, UBMax);
+ if (!MaxUB.isUsable())
+ return nullptr;
+ UBVal = MaxUB.get();
+ } else {
+ // UB(MinVal) > UB(MaxVal) ? UB(MaxVal) : UB(MinVal) - min(UB(MinVal),
+ // UB(MaxVal))
+ ExprResult MinUB = SemaRef.ActOnConditionalOp(
+ DefaultLoc, DefaultLoc, MinGreaterMax, UBMax, UBMin);
+ if (!MinUB.isUsable())
+ return nullptr;
+ UBVal = MinUB.get();
+ }
+ }
+ Expr *UBExpr = TestIsLessOp.getValue() ? UBVal : LBVal;
+ Expr *LBExpr = TestIsLessOp.getValue() ? LBVal : UBVal;
+ Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
+ Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
+ if (!Upper || !Lower)
return nullptr;
- // (Upper - Lower [- 1] + Step) / Step
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
+ ExprResult Diff =
+ calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper, Step, VarType,
+ TestIsStrictOp, /*RoundToStep=*/true, Captures);
if (!Diff.isUsable())
return nullptr;
@@ -6603,55 +7370,37 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
MaxExpr = Upper;
// Build minimum/maximum value based on number of iterations.
- ExprResult Diff;
QualType VarType = LCDecl->getType().getNonReferenceType();
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
+ ExprResult Diff =
+ calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper, Step, VarType,
+ TestIsStrictOp, /*RoundToStep=*/false, Captures);
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- // Upper - Lower [- 1]
- if (TestIsStrictOp)
- Diff = SemaRef.BuildBinOp(
- S, DefaultLoc, BO_Sub, Diff.get(),
- SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
+ // ((Upper - Lower [- 1]) / Step) * Step
+ // Parentheses (for dumping/debugging purposes only).
+ Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- // Upper - Lower [- 1] + Step
ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
if (!NewStep.isUsable())
return std::make_pair(nullptr, nullptr);
-
- // Parentheses (for dumping/debugging purposes only).
- Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
-
- // (Upper - Lower [- 1]) / Step
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Mul, Diff.get(), NewStep.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- // ((Upper - Lower [- 1]) / Step) * Step
// Parentheses (for dumping/debugging purposes only).
Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Mul, Diff.get(), NewStep.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
-
- // Convert to the original type or ptrdiff_t, if original type is pointer.
- if (!VarType->isAnyPointerType() &&
- !SemaRef.Context.hasSameType(Diff.get()->getType(), VarType)) {
- Diff = SemaRef.PerformImplicitConversion(
- Diff.get(), VarType, Sema::AA_Converting, /*AllowExplicit=*/true);
- } else if (VarType->isAnyPointerType() &&
- !SemaRef.Context.hasSameType(
- Diff.get()->getType(),
- SemaRef.Context.getUnsignedPointerDiffType())) {
+ // Convert to the ptrdiff_t, if original type is pointer.
+ if (VarType->isAnyPointerType() &&
+ !SemaRef.Context.hasSameType(
+ Diff.get()->getType(),
+ SemaRef.Context.getUnsignedPointerDiffType())) {
Diff = SemaRef.PerformImplicitConversion(
Diff.get(), SemaRef.Context.getUnsignedPointerDiffType(),
Sema::AA_Converting, /*AllowExplicit=*/true);
@@ -6659,33 +7408,43 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- // Parentheses (for dumping/debugging purposes only).
- Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
-
if (TestIsLessOp.getValue()) {
// MinExpr = Lower;
// MaxExpr = Lower + (((Upper - Lower [- 1]) / Step) * Step)
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Lower, Diff.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
- Diff = SemaRef.ActOnFinishFullExpr(Diff.get(), /*DiscardedValue*/ false);
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Add,
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Lower).get(),
+ Diff.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- MaxExpr = Diff.get();
} else {
// MaxExpr = Upper;
// MinExpr = Upper - (((Upper - Lower [- 1]) / Step) * Step)
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Diff.get());
- if (!Diff.isUsable())
- return std::make_pair(nullptr, nullptr);
- Diff = SemaRef.ActOnFinishFullExpr(Diff.get(), /*DiscardedValue*/ false);
+ Diff = SemaRef.BuildBinOp(
+ S, DefaultLoc, BO_Sub,
+ SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Upper).get(),
+ Diff.get());
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- MinExpr = Diff.get();
}
+ // Convert to the original type.
+ if (SemaRef.Context.hasSameType(Diff.get()->getType(), VarType))
+ Diff = SemaRef.PerformImplicitConversion(Diff.get(), VarType,
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true);
+ if (!Diff.isUsable())
+ return std::make_pair(nullptr, nullptr);
+
+ Diff = SemaRef.ActOnFinishFullExpr(Diff.get(), /*DiscardedValue=*/false);
+ if (!Diff.isUsable())
+ return std::make_pair(nullptr, nullptr);
+
+ if (TestIsLessOp.getValue())
+ MaxExpr = Diff.get();
+ else
+ MinExpr = Diff.get();
+
return std::make_pair(MinExpr, MaxExpr);
}
@@ -6791,44 +7550,23 @@ Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
if (!Cnt)
return nullptr;
}
- ExprResult Diff;
QualType VarType = LCDecl->getType().getNonReferenceType();
- if (VarType->isIntegerType() || VarType->isPointerType() ||
- SemaRef.getLangOpts().CPlusPlus) {
- // Upper - Lower
- Expr *Upper = TestIsLessOp.getValue()
- ? Cnt
- : tryBuildCapture(SemaRef, UB, Captures).get();
- Expr *Lower = TestIsLessOp.getValue()
- ? tryBuildCapture(SemaRef, LB, Captures).get()
- : Cnt;
- if (!Upper || !Lower)
- return nullptr;
-
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
-
- if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
- // BuildBinOp already emitted error, this one is to point user to upper
- // and lower bound, and to tell what is passed to 'operator-'.
- SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
- << Upper->getSourceRange() << Lower->getSourceRange();
- return nullptr;
- }
- }
-
- if (!Diff.isUsable())
+ if (!VarType->isIntegerType() && !VarType->isPointerType() &&
+ !SemaRef.getLangOpts().CPlusPlus)
return nullptr;
-
- // Parentheses (for dumping/debugging purposes only).
- Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
- if (!Diff.isUsable())
+ // Upper - Lower
+ Expr *Upper = TestIsLessOp.getValue()
+ ? Cnt
+ : tryBuildCapture(SemaRef, LB, Captures).get();
+ Expr *Lower = TestIsLessOp.getValue()
+ ? tryBuildCapture(SemaRef, LB, Captures).get()
+ : Cnt;
+ if (!Upper || !Lower)
return nullptr;
- ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
- if (!NewStep.isUsable())
- return nullptr;
- // (Upper - Lower) / Step
- Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
+ ExprResult Diff = calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper,
+ Step, VarType, /*TestIsStrictOp=*/false,
+ /*RoundToStep=*/false, Captures);
if (!Diff.isUsable())
return nullptr;
@@ -8088,8 +8826,9 @@ Sema::ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
}
setFunctionHasBranchProtectedScope();
- return OMPForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
- Clauses, AStmt, B, DSAStack->isCancelRegion());
+ return OMPForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPForSimdDirective(
@@ -8166,6 +8905,7 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(),
DSAStack->isCancelRegion());
}
@@ -8326,9 +9066,9 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
}
setFunctionHasBranchProtectedScope();
- return OMPParallelForDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt, B,
- DSAStack->isCancelRegion());
+ return OMPParallelForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
@@ -8392,8 +9132,9 @@ Sema::ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
- return OMPParallelMasterDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPParallelMasterDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef());
}
StmtResult
@@ -8432,7 +9173,31 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
return OMPParallelSectionsDirective::Create(
- Context, StartLoc, EndLoc, Clauses, AStmt, DSAStack->isCancelRegion());
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
+}
+
+/// detach and mergeable clauses are mutially exclusive, check for it.
+static bool checkDetachMergeableClauses(Sema &S,
+ ArrayRef<OMPClause *> Clauses) {
+ const OMPClause *PrevClause = nullptr;
+ bool ErrorFound = false;
+ for (const OMPClause *C : Clauses) {
+ if (C->getClauseKind() == OMPC_detach ||
+ C->getClauseKind() == OMPC_mergeable) {
+ if (!PrevClause) {
+ PrevClause = C;
+ } else if (PrevClause->getClauseKind() != C->getClauseKind()) {
+ S.Diag(C->getBeginLoc(), diag::err_omp_clauses_mutually_exclusive)
+ << getOpenMPClauseName(C->getClauseKind())
+ << getOpenMPClauseName(PrevClause->getClauseKind());
+ S.Diag(PrevClause->getBeginLoc(), diag::note_omp_previous_clause)
+ << getOpenMPClauseName(PrevClause->getClauseKind());
+ ErrorFound = true;
+ }
+ }
+ }
+ return ErrorFound;
}
StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
@@ -8441,6 +9206,12 @@ StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
+ // OpenMP 5.0, 2.10.1 task Construct
+ // If a detach clause appears on the directive, then a mergeable clause cannot
+ // appear on the same directive.
+ if (checkDetachMergeableClauses(*this, Clauses))
+ return StmtError();
+
auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -8489,10 +9260,94 @@ StmtResult Sema::ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- assert(Clauses.size() <= 1 && "Extra clauses in flush directive");
+ OMPFlushClause *FC = nullptr;
+ OMPClause *OrderClause = nullptr;
+ for (OMPClause *C : Clauses) {
+ if (C->getClauseKind() == OMPC_flush)
+ FC = cast<OMPFlushClause>(C);
+ else
+ OrderClause = C;
+ }
+ OpenMPClauseKind MemOrderKind = OMPC_unknown;
+ SourceLocation MemOrderLoc;
+ for (const OMPClause *C : Clauses) {
+ if (C->getClauseKind() == OMPC_acq_rel ||
+ C->getClauseKind() == OMPC_acquire ||
+ C->getClauseKind() == OMPC_release) {
+ if (MemOrderKind != OMPC_unknown) {
+ Diag(C->getBeginLoc(), diag::err_omp_several_mem_order_clauses)
+ << getOpenMPDirectiveName(OMPD_flush) << 1
+ << SourceRange(C->getBeginLoc(), C->getEndLoc());
+ Diag(MemOrderLoc, diag::note_omp_previous_mem_order_clause)
+ << getOpenMPClauseName(MemOrderKind);
+ } else {
+ MemOrderKind = C->getClauseKind();
+ MemOrderLoc = C->getBeginLoc();
+ }
+ }
+ }
+ if (FC && OrderClause) {
+ Diag(FC->getLParenLoc(), diag::err_omp_flush_order_clause_and_list)
+ << getOpenMPClauseName(OrderClause->getClauseKind());
+ Diag(OrderClause->getBeginLoc(), diag::note_omp_flush_order_clause_here)
+ << getOpenMPClauseName(OrderClause->getClauseKind());
+ return StmtError();
+ }
return OMPFlushDirective::Create(Context, StartLoc, EndLoc, Clauses);
}
+StmtResult Sema::ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (Clauses.empty()) {
+ Diag(StartLoc, diag::err_omp_depobj_expected);
+ return StmtError();
+ } else if (Clauses[0]->getClauseKind() != OMPC_depobj) {
+ Diag(Clauses[0]->getBeginLoc(), diag::err_omp_depobj_expected);
+ return StmtError();
+ }
+ // Only depobj expression and another single clause is allowed.
+ if (Clauses.size() > 2) {
+ Diag(Clauses[2]->getBeginLoc(),
+ diag::err_omp_depobj_single_clause_expected);
+ return StmtError();
+ } else if (Clauses.size() < 1) {
+ Diag(Clauses[0]->getEndLoc(), diag::err_omp_depobj_single_clause_expected);
+ return StmtError();
+ }
+ return OMPDepobjDirective::Create(Context, StartLoc, EndLoc, Clauses);
+}
+
+StmtResult Sema::ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // Check that exactly one clause is specified.
+ if (Clauses.size() != 1) {
+ Diag(Clauses.empty() ? EndLoc : Clauses[1]->getBeginLoc(),
+ diag::err_omp_scan_single_clause_expected);
+ return StmtError();
+ }
+ // Check that scan directive is used in the scopeof the OpenMP loop body.
+ if (Scope *S = DSAStack->getCurScope()) {
+ Scope *ParentS = S->getParent();
+ if (!ParentS || ParentS->getParent() != ParentS->getBreakParent() ||
+ !ParentS->getBreakParent()->isOpenMPLoopScope())
+ return StmtError(Diag(StartLoc, diag::err_omp_orphaned_device_directive)
+ << getOpenMPDirectiveName(OMPD_scan) << 5);
+ }
+ // Check that only one instance of scan directives is used in the same outer
+ // region.
+ if (DSAStack->doesParentHasScanDirective()) {
+ Diag(StartLoc, diag::err_omp_several_directives_in_region) << "scan";
+ Diag(DSAStack->getParentScanDirectiveLoc(),
+ diag::note_omp_previous_directive)
+ << "scan";
+ return StmtError();
+ }
+ DSAStack->setParentHasScanDirective(StartLoc);
+ return OMPScanDirective::Create(Context, StartLoc, EndLoc, Clauses);
+}
+
StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -8555,13 +9410,29 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation ErrLoc = TC ? TC->getBeginLoc() : StartLoc;
Diag(ErrLoc, diag::err_omp_ordered_directive_with_param)
<< (TC != nullptr);
- Diag(Param->getBeginLoc(), diag::note_omp_ordered_param);
+ Diag(Param->getBeginLoc(), diag::note_omp_ordered_param) << 1;
ErrorFound = true;
}
}
if ((!AStmt && !DependFound) || ErrorFound)
return StmtError();
+ // OpenMP 5.0, 2.17.9, ordered Construct, Restrictions.
+ // During execution of an iteration of a worksharing-loop or a loop nest
+ // within a worksharing-loop, simd, or worksharing-loop SIMD region, a thread
+ // must not execute more than one ordered region corresponding to an ordered
+ // construct without a depend clause.
+ if (!DependFound) {
+ if (DSAStack->doesParentHasOrderedDirective()) {
+ Diag(StartLoc, diag::err_omp_several_directives_in_region) << "ordered";
+ Diag(DSAStack->getParentOrderedDirectiveLoc(),
+ diag::note_omp_previous_directive)
+ << "ordered";
+ return StmtError();
+ }
+ DSAStack->setParentHasOrderedDirective(StartLoc);
+ }
+
if (AStmt) {
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
@@ -8817,6 +9688,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
+ // Register location of the first atomic directive.
+ DSAStack->addAtomicDirectiveLoc(StartLoc);
if (!AStmt)
return StmtError();
@@ -8828,6 +9701,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
// longjmp() and throw() must not violate the entry/exit criteria.
OpenMPClauseKind AtomicKind = OMPC_unknown;
SourceLocation AtomicKindLoc;
+ OpenMPClauseKind MemOrderKind = OMPC_unknown;
+ SourceLocation MemOrderLoc;
for (const OMPClause *C : Clauses) {
if (C->getClauseKind() == OMPC_read || C->getClauseKind() == OMPC_write ||
C->getClauseKind() == OMPC_update ||
@@ -8835,13 +9710,51 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (AtomicKind != OMPC_unknown) {
Diag(C->getBeginLoc(), diag::err_omp_atomic_several_clauses)
<< SourceRange(C->getBeginLoc(), C->getEndLoc());
- Diag(AtomicKindLoc, diag::note_omp_atomic_previous_clause)
+ Diag(AtomicKindLoc, diag::note_omp_previous_mem_order_clause)
<< getOpenMPClauseName(AtomicKind);
} else {
AtomicKind = C->getClauseKind();
AtomicKindLoc = C->getBeginLoc();
}
}
+ if (C->getClauseKind() == OMPC_seq_cst ||
+ C->getClauseKind() == OMPC_acq_rel ||
+ C->getClauseKind() == OMPC_acquire ||
+ C->getClauseKind() == OMPC_release ||
+ C->getClauseKind() == OMPC_relaxed) {
+ if (MemOrderKind != OMPC_unknown) {
+ Diag(C->getBeginLoc(), diag::err_omp_several_mem_order_clauses)
+ << getOpenMPDirectiveName(OMPD_atomic) << 0
+ << SourceRange(C->getBeginLoc(), C->getEndLoc());
+ Diag(MemOrderLoc, diag::note_omp_previous_mem_order_clause)
+ << getOpenMPClauseName(MemOrderKind);
+ } else {
+ MemOrderKind = C->getClauseKind();
+ MemOrderLoc = C->getBeginLoc();
+ }
+ }
+ }
+ // OpenMP 5.0, 2.17.7 atomic Construct, Restrictions
+ // If atomic-clause is read then memory-order-clause must not be acq_rel or
+ // release.
+ // If atomic-clause is write then memory-order-clause must not be acq_rel or
+ // acquire.
+ // If atomic-clause is update or not present then memory-order-clause must not
+ // be acq_rel or acquire.
+ if ((AtomicKind == OMPC_read &&
+ (MemOrderKind == OMPC_acq_rel || MemOrderKind == OMPC_release)) ||
+ ((AtomicKind == OMPC_write || AtomicKind == OMPC_update ||
+ AtomicKind == OMPC_unknown) &&
+ (MemOrderKind == OMPC_acq_rel || MemOrderKind == OMPC_acquire))) {
+ SourceLocation Loc = AtomicKindLoc;
+ if (AtomicKind == OMPC_unknown)
+ Loc = StartLoc;
+ Diag(Loc, diag::err_omp_atomic_incompatible_mem_order_clause)
+ << getOpenMPClauseName(AtomicKind)
+ << (AtomicKind == OMPC_unknown ? 1 : 0)
+ << getOpenMPClauseName(MemOrderKind);
+ Diag(MemOrderLoc, diag::note_omp_previous_mem_order_clause)
+ << getOpenMPClauseName(MemOrderKind);
}
Stmt *Body = CS->getCapturedStmt();
@@ -9338,8 +10251,9 @@ Sema::ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
setFunctionHasBranchProtectedScope();
- return OMPTargetParallelDirective::Create(Context, StartLoc, EndLoc, Clauses,
- AStmt);
+ return OMPTargetParallelDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
@@ -9391,9 +10305,9 @@ StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
}
setFunctionHasBranchProtectedScope();
- return OMPTargetParallelForDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt,
- B, DSAStack->isCancelRegion());
+ return OMPTargetParallelForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
/// Check for existence of a map clause in the list of clauses.
@@ -9418,12 +10332,18 @@ StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- // OpenMP [2.10.1, Restrictions, p. 97]
- // At least one map clause must appear on the directive.
- if (!hasClauses(Clauses, OMPC_map, OMPC_use_device_ptr)) {
+ // OpenMP [2.12.2, target data Construct, Restrictions]
+ // At least one map, use_device_addr or use_device_ptr clause must appear on
+ // the directive.
+ if (!hasClauses(Clauses, OMPC_map, OMPC_use_device_ptr) &&
+ (LangOpts.OpenMP < 50 || !hasClauses(Clauses, OMPC_use_device_addr))) {
+ StringRef Expected;
+ if (LangOpts.OpenMP < 50)
+ Expected = "'map' or 'use_device_ptr'";
+ else
+ Expected = "'map', 'use_device_ptr', or 'use_device_addr'";
Diag(StartLoc, diag::err_omp_no_clause_for_directive)
- << "'map' or 'use_device_ptr'"
- << getOpenMPDirectiveName(OMPD_target_data);
+ << Expected << getOpenMPDirectiveName(OMPD_target_data);
return StmtError();
}
@@ -9604,12 +10524,10 @@ static bool checkGrainsizeNumTasksClauses(Sema &S,
if (!PrevClause)
PrevClause = C;
else if (PrevClause->getClauseKind() != C->getClauseKind()) {
- S.Diag(C->getBeginLoc(),
- diag::err_omp_grainsize_num_tasks_mutually_exclusive)
+ S.Diag(C->getBeginLoc(), diag::err_omp_clauses_mutually_exclusive)
<< getOpenMPClauseName(C->getClauseKind())
<< getOpenMPClauseName(PrevClause->getClauseKind());
- S.Diag(PrevClause->getBeginLoc(),
- diag::note_omp_previous_grainsize_num_tasks)
+ S.Diag(PrevClause->getBeginLoc(), diag::note_omp_previous_clause)
<< getOpenMPClauseName(PrevClause->getClauseKind());
ErrorFound = true;
}
@@ -9678,7 +10596,8 @@ StmtResult Sema::ActOnOpenMPTaskLoopDirective(
setFunctionHasBranchProtectedScope();
return OMPTaskLoopDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt, B);
+ NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
@@ -9763,7 +10682,8 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective(
setFunctionHasBranchProtectedScope();
return OMPMasterTaskLoopDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt, B);
+ NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
@@ -9867,7 +10787,8 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective(
setFunctionHasBranchProtectedScope();
return OMPParallelMasterTaskLoopDirective::Create(
- Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
@@ -10004,7 +10925,7 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForDirective(
setFunctionHasBranchProtectedScope();
return OMPDistributeParallelForDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
- DSAStack->isCancelRegion());
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective(
@@ -10301,7 +11222,6 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
-
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
@@ -10446,7 +11366,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective(
return OMPTeamsDistributeParallelForDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
- DSAStack->isCancelRegion());
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
@@ -10575,7 +11495,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeParallelForDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
- DSAStack->isCancelRegion());
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
@@ -10721,9 +11641,6 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_ordered:
Res = ActOnOpenMPOrderedClause(StartLoc, EndLoc, LParenLoc, Expr);
break;
- case OMPC_device:
- Res = ActOnOpenMPDeviceClause(Expr, StartLoc, LParenLoc, EndLoc);
- break;
case OMPC_num_teams:
Res = ActOnOpenMPNumTeamsClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
@@ -10742,6 +11659,13 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_hint:
Res = ActOnOpenMPHintClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_depobj:
+ Res = ActOnOpenMPDepobjClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_detach:
+ Res = ActOnOpenMPDetachClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_device:
case OMPC_if:
case OMPC_default:
case OMPC_proc_bind:
@@ -10768,6 +11692,10 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_threads:
case OMPC_simd:
@@ -10780,6 +11708,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -10789,6 +11718,13 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -10918,10 +11854,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_teams:
@@ -10939,6 +11879,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with if-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -10988,10 +11929,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_teams:
@@ -11013,6 +11958,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with num_threads-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11063,10 +12009,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11085,6 +12035,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with num_teams-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11135,10 +12086,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11157,6 +12112,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with thread_limit-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11208,10 +12164,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11229,6 +12189,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with schedule clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11280,10 +12241,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11301,6 +12266,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with schedule clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11351,10 +12317,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11373,6 +12343,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with num_teams-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11425,10 +12396,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_declare_simd:
case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
@@ -11447,6 +12422,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with grainsize-clause");
case OMPD_unknown:
+ default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
@@ -11474,11 +12450,16 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_threadprivate:
case OMPC_allocate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_threads:
case OMPC_simd:
@@ -11491,6 +12472,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -11500,6 +12482,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Unexpected OpenMP clause.");
}
return CaptureRegion;
@@ -11747,8 +12737,7 @@ static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc,
return true;
// Build the predefined allocator expressions.
bool ErrorFound = false;
- for (int I = OMPAllocateDeclAttr::OMPDefaultMemAlloc;
- I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
+ for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
StringRef Allocator =
OMPAllocateDeclAttr::ConvertAllocatorTypeTyToStr(AllocatorKind);
@@ -11775,7 +12764,8 @@ static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc,
Stack->setAllocator(AllocatorKind, Res.get());
}
if (ErrorFound) {
- S.Diag(Loc, diag::err_implied_omp_allocator_handle_t_not_found);
+ S.Diag(Loc, diag::err_omp_implied_type_not_found)
+ << "omp_allocator_handle_t";
return false;
}
OMPAllocatorHandleT.addConst();
@@ -11852,9 +12842,8 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
OMPClause *Res = nullptr;
switch (Kind) {
case OMPC_default:
- Res =
- ActOnOpenMPDefaultClause(static_cast<OpenMPDefaultClauseKind>(Argument),
- ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ Res = ActOnOpenMPDefaultClause(static_cast<DefaultKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_proc_bind:
Res = ActOnOpenMPProcBindClause(static_cast<ProcBindKind>(Argument),
@@ -11865,6 +12854,14 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
static_cast<OpenMPAtomicDefaultMemOrderClauseKind>(Argument),
ArgumentLoc, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_order:
+ Res = ActOnOpenMPOrderClause(static_cast<OpenMPOrderClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_update:
+ Res = ActOnOpenMPUpdateClause(static_cast<OpenMPDependClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -11891,11 +12888,15 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_threadprivate:
case OMPC_allocate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
- case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
case OMPC_device:
case OMPC_threads:
@@ -11915,6 +12916,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -11923,6 +12925,13 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -11946,34 +12955,36 @@ getListOfPossibleValues(OpenMPClauseKind K, unsigned First, unsigned Last,
else if (I + Skipped + 1 != Last)
Out << ", ";
}
- return Out.str();
+ return std::string(Out.str());
}
-OMPClause *Sema::ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
+OMPClause *Sema::ActOnOpenMPDefaultClause(DefaultKind Kind,
SourceLocation KindKwLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- if (Kind == OMPC_DEFAULT_unknown) {
- static_assert(OMPC_DEFAULT_unknown > 0,
- "OMPC_DEFAULT_unknown not greater than 0");
+ if (Kind == OMP_DEFAULT_unknown) {
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_default, /*First=*/0,
- /*Last=*/OMPC_DEFAULT_unknown)
+ /*Last=*/unsigned(OMP_DEFAULT_unknown))
<< getOpenMPClauseName(OMPC_default);
return nullptr;
}
+
switch (Kind) {
- case OMPC_DEFAULT_none:
+ case OMP_DEFAULT_none:
DSAStack->setDefaultDSANone(KindKwLoc);
break;
- case OMPC_DEFAULT_shared:
+ case OMP_DEFAULT_shared:
DSAStack->setDefaultDSAShared(KindKwLoc);
break;
- case OMPC_DEFAULT_unknown:
- llvm_unreachable("Clause kind is not allowed.");
+ case OMP_DEFAULT_firstprivate:
+ DSAStack->setDefaultDSAFirstPrivate(KindKwLoc);
break;
+ default:
+ llvm_unreachable("DSA unexpected in OpenMP default clause");
}
+
return new (Context)
OMPDefaultClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
@@ -12010,6 +13021,43 @@ OMPClause *Sema::ActOnOpenMPAtomicDefaultMemOrderClause(
LParenLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_ORDER_unknown) {
+ static_assert(OMPC_ORDER_unknown > 0,
+ "OMPC_ORDER_unknown not greater than 0");
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_order, /*First=*/0,
+ /*Last=*/OMPC_ORDER_unknown)
+ << getOpenMPClauseName(OMPC_order);
+ return nullptr;
+ }
+ return new (Context)
+ OMPOrderClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_DEPEND_unknown || Kind == OMPC_DEPEND_source ||
+ Kind == OMPC_DEPEND_sink || Kind == OMPC_DEPEND_depobj) {
+ unsigned Except[] = {OMPC_DEPEND_source, OMPC_DEPEND_sink,
+ OMPC_DEPEND_depobj};
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_depend, /*First=*/0,
+ /*Last=*/OMPC_DEPEND_unknown, Except)
+ << getOpenMPClauseName(OMPC_update);
+ return nullptr;
+ }
+ return OMPUpdateClause::Create(Context, StartLoc, LParenLoc, KindKwLoc, Kind,
+ EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Argument, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
@@ -12047,6 +13095,12 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
StartLoc, LParenLoc, ArgumentLoc[Modifier], ArgumentLoc[DefaultmapKind],
EndLoc);
break;
+ case OMPC_device:
+ assert(Argument.size() == 1 && ArgumentLoc.size() == 1);
+ Res = ActOnOpenMPDeviceClause(
+ static_cast<OpenMPDeviceClauseModifier>(Argument.back()), Expr,
+ StartLoc, LParenLoc, ArgumentLoc.back(), EndLoc);
+ break;
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
@@ -12073,13 +13127,17 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_threadprivate:
case OMPC_allocate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_read:
case OMPC_write:
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_depend:
- case OMPC_device:
case OMPC_threads:
case OMPC_simd:
case OMPC_map:
@@ -12095,6 +13153,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
@@ -12104,6 +13163,14 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -12170,7 +13237,9 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
// OpenMP, 2.7.1, Loop Construct, Restrictions
// The nonmonotonic modifier can only be specified with schedule(dynamic) or
// schedule(guided).
- if ((M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
+ // OpenMP 5.0 does not have this restriction.
+ if (LangOpts.OpenMP < 50 &&
+ (M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic) &&
Kind != OMPC_SCHEDULE_dynamic && Kind != OMPC_SCHEDULE_guided) {
Diag(M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ? M1Loc : M2Loc,
@@ -12250,6 +13319,18 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_seq_cst:
Res = ActOnOpenMPSeqCstClause(StartLoc, EndLoc);
break;
+ case OMPC_acq_rel:
+ Res = ActOnOpenMPAcqRelClause(StartLoc, EndLoc);
+ break;
+ case OMPC_acquire:
+ Res = ActOnOpenMPAcquireClause(StartLoc, EndLoc);
+ break;
+ case OMPC_release:
+ Res = ActOnOpenMPReleaseClause(StartLoc, EndLoc);
+ break;
+ case OMPC_relaxed:
+ Res = ActOnOpenMPRelaxedClause(StartLoc, EndLoc);
+ break;
case OMPC_threads:
Res = ActOnOpenMPThreadsClause(StartLoc, EndLoc);
break;
@@ -12271,6 +13352,9 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_dynamic_allocators:
Res = ActOnOpenMPDynamicAllocatorsClause(StartLoc, EndLoc);
break;
+ case OMPC_destroy:
+ Res = ActOnOpenMPDestroyClause(StartLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -12295,6 +13379,7 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_threadprivate:
case OMPC_allocate:
case OMPC_flush:
+ case OMPC_depobj:
case OMPC_depend:
case OMPC_device:
case OMPC_map:
@@ -12311,11 +13396,19 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_to:
case OMPC_from:
case OMPC_use_device_ptr:
+ case OMPC_use_device_addr:
case OMPC_is_device_ptr:
case OMPC_atomic_default_mem_order:
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
+ case OMPC_order:
+ case OMPC_detach:
+ case OMPC_inclusive:
+ case OMPC_exclusive:
+ case OMPC_uses_allocators:
+ case OMPC_affinity:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -12349,7 +13442,7 @@ OMPClause *Sema::ActOnOpenMPWriteClause(SourceLocation StartLoc,
OMPClause *Sema::ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
- return new (Context) OMPUpdateClause(StartLoc, EndLoc);
+ return OMPUpdateClause::Create(Context, StartLoc, EndLoc);
}
OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc,
@@ -12362,6 +13455,26 @@ OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
return new (Context) OMPSeqCstClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPAcqRelClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPAcquireClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPAcquireClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPReleaseClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPReleaseClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPRelaxedClause(StartLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
return new (Context) OMPThreadsClause(StartLoc, EndLoc);
@@ -12397,14 +13510,19 @@ OMPClause *Sema::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
return new (Context) OMPDynamicAllocatorsClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPDestroyClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPDestroyClause(StartLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPVarListClause(
- OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr,
+ OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
- SourceLocation DepLinMapLastLoc) {
+ SourceLocation ExtraModifierLoc) {
SourceLocation StartLoc = Locs.StartLoc;
SourceLocation LParenLoc = Locs.LParenLoc;
SourceLocation EndLoc = Locs.EndLoc;
@@ -12421,15 +13539,18 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
"Unexpected lastprivate modifier.");
Res = ActOnOpenMPLastprivateClause(
VarList, static_cast<OpenMPLastprivateModifier>(ExtraModifier),
- DepLinMapLastLoc, ColonLoc, StartLoc, LParenLoc, EndLoc);
+ ExtraModifierLoc, ColonLoc, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_shared:
Res = ActOnOpenMPSharedClause(VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_reduction:
- Res = ActOnOpenMPReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId);
+ assert(0 <= ExtraModifier && ExtraModifier <= OMPC_REDUCTION_unknown &&
+ "Unexpected lastprivate modifier.");
+ Res = ActOnOpenMPReductionClause(
+ VarList, static_cast<OpenMPReductionClauseModifier>(ExtraModifier),
+ StartLoc, LParenLoc, ExtraModifierLoc, ColonLoc, EndLoc,
+ ReductionOrMapperIdScopeSpec, ReductionOrMapperId);
break;
case OMPC_task_reduction:
Res = ActOnOpenMPTaskReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
@@ -12445,13 +13566,13 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_LINEAR_unknown &&
"Unexpected linear modifier.");
Res = ActOnOpenMPLinearClause(
- VarList, TailExpr, StartLoc, LParenLoc,
- static_cast<OpenMPLinearClauseKind>(ExtraModifier), DepLinMapLastLoc,
+ VarList, DepModOrTailExpr, StartLoc, LParenLoc,
+ static_cast<OpenMPLinearClauseKind>(ExtraModifier), ExtraModifierLoc,
ColonLoc, EndLoc);
break;
case OMPC_aligned:
- Res = ActOnOpenMPAlignedClause(VarList, TailExpr, StartLoc, LParenLoc,
- ColonLoc, EndLoc);
+ Res = ActOnOpenMPAlignedClause(VarList, DepModOrTailExpr, StartLoc,
+ LParenLoc, ColonLoc, EndLoc);
break;
case OMPC_copyin:
Res = ActOnOpenMPCopyinClause(VarList, StartLoc, LParenLoc, EndLoc);
@@ -12466,8 +13587,8 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_DEPEND_unknown &&
"Unexpected depend modifier.");
Res = ActOnOpenMPDependClause(
- static_cast<OpenMPDependClauseKind>(ExtraModifier), DepLinMapLastLoc,
- ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
+ DepModOrTailExpr, static_cast<OpenMPDependClauseKind>(ExtraModifier),
+ ExtraModifierLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_map:
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_MAP_unknown &&
@@ -12475,7 +13596,7 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
Res = ActOnOpenMPMapClause(
MapTypeModifiers, MapTypeModifiersLoc, ReductionOrMapperIdScopeSpec,
ReductionOrMapperId, static_cast<OpenMPMapClauseKind>(ExtraModifier),
- IsMapTypeImplicit, DepLinMapLastLoc, ColonLoc, VarList, Locs);
+ IsMapTypeImplicit, ExtraModifierLoc, ColonLoc, VarList, Locs);
break;
case OMPC_to:
Res = ActOnOpenMPToClause(VarList, ReductionOrMapperIdScopeSpec,
@@ -12488,17 +13609,31 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_use_device_ptr:
Res = ActOnOpenMPUseDevicePtrClause(VarList, Locs);
break;
+ case OMPC_use_device_addr:
+ Res = ActOnOpenMPUseDeviceAddrClause(VarList, Locs);
+ break;
case OMPC_is_device_ptr:
Res = ActOnOpenMPIsDevicePtrClause(VarList, Locs);
break;
case OMPC_allocate:
- Res = ActOnOpenMPAllocateClause(TailExpr, VarList, StartLoc, LParenLoc,
- ColonLoc, EndLoc);
+ Res = ActOnOpenMPAllocateClause(DepModOrTailExpr, VarList, StartLoc,
+ LParenLoc, ColonLoc, EndLoc);
break;
case OMPC_nontemporal:
Res = ActOnOpenMPNontemporalClause(VarList, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_inclusive:
+ Res = ActOnOpenMPInclusiveClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_exclusive:
+ Res = ActOnOpenMPExclusiveClause(VarList, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_affinity:
+ Res = ActOnOpenMPAffinityClause(StartLoc, LParenLoc, ColonLoc, EndLoc,
+ DepModOrTailExpr, VarList);
+ break;
case OMPC_if:
+ case OMPC_depobj:
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
@@ -12518,6 +13653,10 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_update:
case OMPC_capture:
case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed:
case OMPC_device:
case OMPC_threads:
case OMPC_simd:
@@ -12539,6 +13678,11 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_atomic_default_mem_order:
case OMPC_device_type:
case OMPC_match:
+ case OMPC_order:
+ case OMPC_destroy:
+ case OMPC_detach:
+ case OMPC_uses_allocators:
+ default:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -12985,7 +14129,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
ExprCaptures.push_back(Ref->getDecl());
}
}
- DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
+ if (!IsImplicitClause)
+ DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
Vars.push_back((VD || CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
@@ -13518,6 +14663,12 @@ struct ReductionData {
SmallVector<Expr *, 8> RHSs;
/// Reduction operation expression.
SmallVector<Expr *, 8> ReductionOps;
+ /// inscan copy operation expressions.
+ SmallVector<Expr *, 8> InscanCopyOps;
+ /// inscan copy temp array expressions for prefix sums.
+ SmallVector<Expr *, 8> InscanCopyArrayTemps;
+ /// inscan copy temp array element expressions for prefix sums.
+ SmallVector<Expr *, 8> InscanCopyArrayElems;
/// Taskgroup descriptors for the corresponding reduction items in
/// in_reduction clauses.
SmallVector<Expr *, 8> TaskgroupDescriptors;
@@ -13525,14 +14676,21 @@ struct ReductionData {
SmallVector<Decl *, 4> ExprCaptures;
/// List of postupdate expressions.
SmallVector<Expr *, 4> ExprPostUpdates;
+ /// Reduction modifier.
+ unsigned RedModifier = 0;
ReductionData() = delete;
/// Reserves required memory for the reduction data.
- ReductionData(unsigned Size) {
+ ReductionData(unsigned Size, unsigned Modifier = 0) : RedModifier(Modifier) {
Vars.reserve(Size);
Privates.reserve(Size);
LHSs.reserve(Size);
RHSs.reserve(Size);
ReductionOps.reserve(Size);
+ if (RedModifier == OMPC_REDUCTION_inscan) {
+ InscanCopyOps.reserve(Size);
+ InscanCopyArrayTemps.reserve(Size);
+ InscanCopyArrayElems.reserve(Size);
+ }
TaskgroupDescriptors.reserve(Size);
ExprCaptures.reserve(Size);
ExprPostUpdates.reserve(Size);
@@ -13546,16 +14704,31 @@ struct ReductionData {
RHSs.emplace_back(nullptr);
ReductionOps.emplace_back(ReductionOp);
TaskgroupDescriptors.emplace_back(nullptr);
+ if (RedModifier == OMPC_REDUCTION_inscan) {
+ InscanCopyOps.push_back(nullptr);
+ InscanCopyArrayTemps.push_back(nullptr);
+ InscanCopyArrayElems.push_back(nullptr);
+ }
}
/// Stores reduction data.
void push(Expr *Item, Expr *Private, Expr *LHS, Expr *RHS, Expr *ReductionOp,
- Expr *TaskgroupDescriptor) {
+ Expr *TaskgroupDescriptor, Expr *CopyOp, Expr *CopyArrayTemp,
+ Expr *CopyArrayElem) {
Vars.emplace_back(Item);
Privates.emplace_back(Private);
LHSs.emplace_back(LHS);
RHSs.emplace_back(RHS);
ReductionOps.emplace_back(ReductionOp);
TaskgroupDescriptors.emplace_back(TaskgroupDescriptor);
+ if (RedModifier == OMPC_REDUCTION_inscan) {
+ InscanCopyOps.push_back(CopyOp);
+ InscanCopyArrayTemps.push_back(CopyArrayTemp);
+ InscanCopyArrayElems.push_back(CopyArrayElem);
+ } else {
+ assert(CopyOp == nullptr && CopyArrayTemp == nullptr &&
+ CopyArrayElem == nullptr &&
+ "Copy operation must be used for inscan reductions only.");
+ }
}
};
} // namespace
@@ -13567,7 +14740,7 @@ static bool checkOMPArraySectionConstantForReduction(
if (Length == nullptr) {
// For array sections of the form [1:] or [:], we would need to analyze
// the lower bound...
- if (OASE->getColonLoc().isValid())
+ if (OASE->getColonLocFirst().isValid())
return false;
// This is an array subscript which has implicit length 1!
@@ -13593,7 +14766,7 @@ static bool checkOMPArraySectionConstantForReduction(
if (Length == nullptr) {
// For array sections of the form [1:] or [:], we would need to analyze
// the lower bound...
- if (OASE->getColonLoc().isValid())
+ if (OASE->getColonLocFirst().isValid())
return false;
// This is an array subscript which has implicit length 1!
@@ -13948,11 +15121,11 @@ static bool actOnOMPReductionKindClause(
if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective())) {
S.Diag(ELoc, diag::err_omp_reduction_vla_unsupported) << !!OASE;
S.Diag(ELoc, diag::note_vla_unsupported);
+ continue;
} else {
S.targetDiag(ELoc, diag::err_omp_reduction_vla_unsupported) << !!OASE;
S.targetDiag(ELoc, diag::note_vla_unsupported);
}
- continue;
}
// For arrays/array sections only:
// Create pseudo array type for private copy. The size for this array will
@@ -14007,9 +15180,9 @@ static bool actOnOMPReductionKindClause(
if (auto *ComplexTy = OrigType->getAs<ComplexType>())
Type = ComplexTy->getElementType();
if (Type->isRealFloatingType()) {
- llvm::APFloat InitValue =
- llvm::APFloat::getAllOnesValue(Context.getTypeSize(Type),
- /*isIEEE=*/true);
+ llvm::APFloat InitValue = llvm::APFloat::getAllOnesValue(
+ Context.getFloatTypeSemantics(Type),
+ Context.getTypeSize(Type));
Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true,
Type, ELoc);
} else if (Type->isScalarType()) {
@@ -14157,6 +15330,53 @@ static bool actOnOMPReductionKindClause(
continue;
}
+ // Add copy operations for inscan reductions.
+ // LHS = RHS;
+ ExprResult CopyOpRes, TempArrayRes, TempArrayElem;
+ if (ClauseKind == OMPC_reduction &&
+ RD.RedModifier == OMPC_REDUCTION_inscan) {
+ ExprResult RHS = S.DefaultLvalueConversion(RHSDRE);
+ CopyOpRes = S.BuildBinOp(Stack->getCurScope(), ELoc, BO_Assign, LHSDRE,
+ RHS.get());
+ if (!CopyOpRes.isUsable())
+ continue;
+ CopyOpRes =
+ S.ActOnFinishFullExpr(CopyOpRes.get(), /*DiscardedValue=*/true);
+ if (!CopyOpRes.isUsable())
+ continue;
+ // For simd directive and simd-based directives in simd mode no need to
+ // construct temp array, need just a single temp element.
+ if (Stack->getCurrentDirective() == OMPD_simd ||
+ (S.getLangOpts().OpenMPSimd &&
+ isOpenMPSimdDirective(Stack->getCurrentDirective()))) {
+ VarDecl *TempArrayVD =
+ buildVarDecl(S, ELoc, PrivateTy, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr);
+ // Add a constructor to the temp decl.
+ S.ActOnUninitializedDecl(TempArrayVD);
+ TempArrayRes = buildDeclRefExpr(S, TempArrayVD, PrivateTy, ELoc);
+ } else {
+ // Build temp array for prefix sum.
+ auto *Dim = new (S.Context)
+ OpaqueValueExpr(ELoc, S.Context.getSizeType(), VK_RValue);
+ QualType ArrayTy =
+ S.Context.getVariableArrayType(PrivateTy, Dim, ArrayType::Normal,
+ /*IndexTypeQuals=*/0, {ELoc, ELoc});
+ VarDecl *TempArrayVD =
+ buildVarDecl(S, ELoc, ArrayTy, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr);
+ // Add a constructor to the temp decl.
+ S.ActOnUninitializedDecl(TempArrayVD);
+ TempArrayRes = buildDeclRefExpr(S, TempArrayVD, ArrayTy, ELoc);
+ TempArrayElem =
+ S.DefaultFunctionArrayLvalueConversion(TempArrayRes.get());
+ auto *Idx = new (S.Context)
+ OpaqueValueExpr(ELoc, S.Context.getSizeType(), VK_RValue);
+ TempArrayElem = S.CreateBuiltinArraySubscriptExpr(TempArrayElem.get(),
+ ELoc, Idx, ELoc);
+ }
+ }
+
// OpenMP [2.15.4.6, Restrictions, p.2]
// A list item that appears in an in_reduction clause of a task construct
// must appear in a task_reduction clause of a construct associated with a
@@ -14167,8 +15387,8 @@ static bool actOnOMPReductionKindClause(
if (ClauseKind == OMPC_in_reduction) {
SourceRange ParentSR;
BinaryOperatorKind ParentBOK;
- const Expr *ParentReductionOp;
- Expr *ParentBOKTD, *ParentReductionOpTD;
+ const Expr *ParentReductionOp = nullptr;
+ Expr *ParentBOKTD = nullptr, *ParentReductionOpTD = nullptr;
DSAStackTy::DSAVarData ParentBOKDSA =
Stack->getTopMostTaskgroupReductionData(D, ParentSR, ParentBOK,
ParentBOKTD);
@@ -14177,13 +15397,9 @@ static bool actOnOMPReductionKindClause(
D, ParentSR, ParentReductionOp, ParentReductionOpTD);
bool IsParentBOK = ParentBOKDSA.DKind != OMPD_unknown;
bool IsParentReductionOp = ParentReductionOpDSA.DKind != OMPD_unknown;
- if (!IsParentBOK && !IsParentReductionOp) {
- S.Diag(ELoc, diag::err_omp_in_reduction_not_task_reduction);
- continue;
- }
if ((DeclareReductionRef.isUnset() && IsParentReductionOp) ||
- (DeclareReductionRef.isUsable() && IsParentBOK) || BOK != ParentBOK ||
- IsParentReductionOp) {
+ (DeclareReductionRef.isUsable() && IsParentBOK) ||
+ (IsParentBOK && BOK != ParentBOK) || IsParentReductionOp) {
bool EmitError = true;
if (IsParentReductionOp && DeclareReductionRef.isUsable()) {
llvm::FoldingSetNodeID RedId, ParentRedId;
@@ -14206,7 +15422,6 @@ static bool actOnOMPReductionKindClause(
}
}
TaskgroupDescriptor = IsParentBOK ? ParentBOKTD : ParentReductionOpTD;
- assert(TaskgroupDescriptor && "Taskgroup descriptor must be defined.");
}
DeclRefExpr *Ref = nullptr;
@@ -14245,8 +15460,17 @@ static bool actOnOMPReductionKindClause(
}
// All reduction items are still marked as reduction (to do not increase
// code base size).
- Stack->addDSA(D, RefExpr->IgnoreParens(), OMPC_reduction, Ref);
- if (CurrDir == OMPD_taskgroup) {
+ unsigned Modifier = RD.RedModifier;
+ // Consider task_reductions as reductions with task modifier. Required for
+ // correct analysis of in_reduction clauses.
+ if (CurrDir == OMPD_taskgroup && ClauseKind == OMPC_task_reduction)
+ Modifier = OMPC_REDUCTION_task;
+ Stack->addDSA(D, RefExpr->IgnoreParens(), OMPC_reduction, Ref, Modifier);
+ if (Modifier == OMPC_REDUCTION_task &&
+ (CurrDir == OMPD_taskgroup ||
+ ((isOpenMPParallelDirective(CurrDir) ||
+ isOpenMPWorksharingDirective(CurrDir)) &&
+ !isOpenMPSimdDirective(CurrDir)))) {
if (DeclareReductionRef.isUsable())
Stack->addTaskgroupReductionData(D, ReductionIdRange,
DeclareReductionRef.get());
@@ -14254,17 +15478,41 @@ static bool actOnOMPReductionKindClause(
Stack->addTaskgroupReductionData(D, ReductionIdRange, BOK);
}
RD.push(VarsExpr, PrivateDRE, LHSDRE, RHSDRE, ReductionOp.get(),
- TaskgroupDescriptor);
+ TaskgroupDescriptor, CopyOpRes.get(), TempArrayRes.get(),
+ TempArrayElem.get());
}
return RD.Vars.empty();
}
OMPClause *Sema::ActOnOpenMPReductionClause(
- ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation ColonLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
- ReductionData RD(VarList.size());
+ if (ModifierLoc.isValid() && Modifier == OMPC_REDUCTION_unknown) {
+ Diag(LParenLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_reduction, /*First=*/0,
+ /*Last=*/OMPC_REDUCTION_unknown)
+ << getOpenMPClauseName(OMPC_reduction);
+ return nullptr;
+ }
+ // OpenMP 5.0, 2.19.5.4 reduction Clause, Restrictions
+ // A reduction clause with the inscan reduction-modifier may only appear on a
+ // worksharing-loop construct, a worksharing-loop SIMD construct, a simd
+ // construct, a parallel worksharing-loop construct or a parallel
+ // worksharing-loop SIMD construct.
+ if (Modifier == OMPC_REDUCTION_inscan &&
+ (DSAStack->getCurrentDirective() != OMPD_for &&
+ DSAStack->getCurrentDirective() != OMPD_for_simd &&
+ DSAStack->getCurrentDirective() != OMPD_simd &&
+ DSAStack->getCurrentDirective() != OMPD_parallel_for &&
+ DSAStack->getCurrentDirective() != OMPD_parallel_for_simd)) {
+ Diag(ModifierLoc, diag::err_omp_wrong_inscan_reduction);
+ return nullptr;
+ }
+
+ ReductionData RD(VarList.size(), Modifier);
if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_reduction, VarList,
StartLoc, LParenLoc, ColonLoc, EndLoc,
ReductionIdScopeSpec, ReductionId,
@@ -14272,9 +15520,10 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
return nullptr;
return OMPReductionClause::Create(
- Context, StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars,
- ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId,
- RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps,
+ Context, StartLoc, LParenLoc, ModifierLoc, ColonLoc, EndLoc, Modifier,
+ RD.Vars, ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId,
+ RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps, RD.InscanCopyOps,
+ RD.InscanCopyArrayTemps, RD.InscanCopyArrayElems,
buildPreInits(Context, RD.ExprCaptures),
buildPostUpdate(*this, RD.ExprPostUpdates));
}
@@ -14330,8 +15579,8 @@ bool Sema::CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
}
bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
- OpenMPLinearClauseKind LinKind,
- QualType Type) {
+ OpenMPLinearClauseKind LinKind, QualType Type,
+ bool IsDeclareSimd) {
const auto *VD = dyn_cast_or_null<VarDecl>(D);
// A variable must not have an incomplete type or a reference type.
if (RequireCompleteType(ELoc, Type, diag::err_omp_linear_incomplete_type))
@@ -14347,8 +15596,10 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
// OpenMP 5.0 [2.19.3, List Item Privatization, Restrictions]
// A variable that is privatized must not have a const-qualified type
// unless it is of class type with a mutable member. This restriction does
- // not apply to the firstprivate clause.
- if (rejectConstNotMutableType(*this, D, Type, OMPC_linear, ELoc))
+ // not apply to the firstprivate clause, nor to the linear clause on
+ // declarative directives (like declare simd).
+ if (!IsDeclareSimd &&
+ rejectConstNotMutableType(*this, D, Type, OMPC_linear, ELoc))
return true;
// A list item must be of integral or pointer type.
@@ -14900,8 +16151,53 @@ OMPClause *Sema::ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
return OMPFlushClause::Create(Context, StartLoc, LParenLoc, EndLoc, VarList);
}
+/// Tries to find omp_depend_t. type.
+static bool findOMPDependT(Sema &S, SourceLocation Loc, DSAStackTy *Stack,
+ bool Diagnose = true) {
+ QualType OMPDependT = Stack->getOMPDependT();
+ if (!OMPDependT.isNull())
+ return true;
+ IdentifierInfo *II = &S.PP.getIdentifierTable().get("omp_depend_t");
+ ParsedType PT = S.getTypeName(*II, Loc, S.getCurScope());
+ if (!PT.getAsOpaquePtr() || PT.get().isNull()) {
+ if (Diagnose)
+ S.Diag(Loc, diag::err_omp_implied_type_not_found) << "omp_depend_t";
+ return false;
+ }
+ Stack->setOMPDependT(PT.get());
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (!Depobj)
+ return nullptr;
+
+ bool OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack);
+
+ // OpenMP 5.0, 2.17.10.1 depobj Construct
+ // depobj is an lvalue expression of type omp_depend_t.
+ if (!Depobj->isTypeDependent() && !Depobj->isValueDependent() &&
+ !Depobj->isInstantiationDependent() &&
+ !Depobj->containsUnexpandedParameterPack() &&
+ (OMPDependTFound &&
+ !Context.typesAreCompatible(DSAStack->getOMPDependT(), Depobj->getType(),
+ /*CompareUnqualified=*/true))) {
+ Diag(Depobj->getExprLoc(), diag::err_omp_expected_omp_depend_t_lvalue)
+ << 0 << Depobj->getType() << Depobj->getSourceRange();
+ }
+
+ if (!Depobj->isLValue()) {
+ Diag(Depobj->getExprLoc(), diag::err_omp_expected_omp_depend_t_lvalue)
+ << 1 << Depobj->getSourceRange();
+ }
+
+ return OMPDepobjClause::Create(Context, StartLoc, LParenLoc, EndLoc, Depobj);
+}
+
OMPClause *
-Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
+Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc) {
@@ -14911,16 +16207,38 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
<< "'source' or 'sink'" << getOpenMPClauseName(OMPC_depend);
return nullptr;
}
- if (DSAStack->getCurrentDirective() != OMPD_ordered &&
+ if ((DSAStack->getCurrentDirective() != OMPD_ordered ||
+ DSAStack->getCurrentDirective() == OMPD_depobj) &&
(DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source ||
- DepKind == OMPC_DEPEND_sink)) {
- unsigned Except[] = {OMPC_DEPEND_source, OMPC_DEPEND_sink};
+ DepKind == OMPC_DEPEND_sink ||
+ ((LangOpts.OpenMP < 50 ||
+ DSAStack->getCurrentDirective() == OMPD_depobj) &&
+ DepKind == OMPC_DEPEND_depobj))) {
+ SmallVector<unsigned, 3> Except;
+ Except.push_back(OMPC_DEPEND_source);
+ Except.push_back(OMPC_DEPEND_sink);
+ if (LangOpts.OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj)
+ Except.push_back(OMPC_DEPEND_depobj);
+ std::string Expected = (LangOpts.OpenMP >= 50 && !DepModifier)
+ ? "depend modifier(iterator) or "
+ : "";
Diag(DepLoc, diag::err_omp_unexpected_clause_value)
- << getListOfPossibleValues(OMPC_depend, /*First=*/0,
- /*Last=*/OMPC_DEPEND_unknown, Except)
+ << Expected + getListOfPossibleValues(OMPC_depend, /*First=*/0,
+ /*Last=*/OMPC_DEPEND_unknown,
+ Except)
<< getOpenMPClauseName(OMPC_depend);
return nullptr;
}
+ if (DepModifier &&
+ (DepKind == OMPC_DEPEND_source || DepKind == OMPC_DEPEND_sink)) {
+ Diag(DepModifier->getExprLoc(),
+ diag::err_omp_depend_sink_source_with_modifier);
+ return nullptr;
+ }
+ if (DepModifier &&
+ !DepModifier->getType()->isSpecificBuiltinType(BuiltinType::OMPIterator))
+ Diag(DepModifier->getExprLoc(), diag::err_omp_depend_modifier_not_iterator);
+
SmallVector<Expr *, 8> Vars;
DSAStackTy::OperatorOffsetTy OpsOffs;
llvm::APSInt DepCounter(/*BitWidth=*/32);
@@ -15021,42 +16339,97 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
}
OpsOffs.emplace_back(RHS, OOK);
} else {
- // OpenMP 5.0 [2.17.11, Restrictions]
- // List items used in depend clauses cannot be zero-length array sections.
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
- if (OASE) {
- const Expr *Length = OASE->getLength();
- Expr::EvalResult Result;
- if (Length && !Length->isValueDependent() &&
- Length->EvaluateAsInt(Result, Context) &&
- Result.Val.getInt().isNullValue()) {
- Diag(ELoc,
- diag::err_omp_depend_zero_length_array_section_not_allowed)
- << SimpleExpr->getSourceRange();
+ bool OMPDependTFound = LangOpts.OpenMP >= 50;
+ if (OMPDependTFound)
+ OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack,
+ DepKind == OMPC_DEPEND_depobj);
+ if (DepKind == OMPC_DEPEND_depobj) {
+ // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
+ // List items used in depend clauses with the depobj dependence type
+ // must be expressions of the omp_depend_t type.
+ if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
+ !RefExpr->isInstantiationDependent() &&
+ !RefExpr->containsUnexpandedParameterPack() &&
+ (OMPDependTFound &&
+ !Context.hasSameUnqualifiedType(DSAStack->getOMPDependT(),
+ RefExpr->getType()))) {
+ Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
+ << 0 << RefExpr->getType() << RefExpr->getSourceRange();
continue;
}
- }
+ if (!RefExpr->isLValue()) {
+ Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
+ << 1 << RefExpr->getType() << RefExpr->getSourceRange();
+ continue;
+ }
+ } else {
+ // OpenMP 5.0 [2.17.11, Restrictions]
+ // List items used in depend clauses cannot be zero-length array
+ // sections.
+ QualType ExprTy = RefExpr->getType().getNonReferenceType();
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
+ if (OASE) {
+ QualType BaseType =
+ OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
+ ExprTy = ATy->getElementType();
+ else
+ ExprTy = BaseType->getPointeeType();
+ ExprTy = ExprTy.getNonReferenceType();
+ const Expr *Length = OASE->getLength();
+ Expr::EvalResult Result;
+ if (Length && !Length->isValueDependent() &&
+ Length->EvaluateAsInt(Result, Context) &&
+ Result.Val.getInt().isNullValue()) {
+ Diag(ELoc,
+ diag::err_omp_depend_zero_length_array_section_not_allowed)
+ << SimpleExpr->getSourceRange();
+ continue;
+ }
+ }
- auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
- if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
- (ASE &&
- !ASE->getBase()->getType().getNonReferenceType()->isPointerType() &&
- !ASE->getBase()->getType().getNonReferenceType()->isArrayType())) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << RefExpr->getSourceRange();
- continue;
- }
+ // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
+ // List items used in depend clauses with the in, out, inout or
+ // mutexinoutset dependence types cannot be expressions of the
+ // omp_depend_t type.
+ if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
+ !RefExpr->isInstantiationDependent() &&
+ !RefExpr->containsUnexpandedParameterPack() &&
+ (OMPDependTFound &&
+ DSAStack->getOMPDependT().getTypePtr() == ExprTy.getTypePtr())) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << 1
+ << RefExpr->getSourceRange();
+ continue;
+ }
- ExprResult Res;
- {
- Sema::TentativeAnalysisScope Trap(*this);
- Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
- RefExpr->IgnoreParenImpCasts());
- }
- if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr)) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << RefExpr->getSourceRange();
- continue;
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
+ if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
+ (ASE && !ASE->getBase()->isTypeDependent() &&
+ !ASE->getBase()
+ ->getType()
+ .getNonReferenceType()
+ ->isPointerType() &&
+ !ASE->getBase()->getType().getNonReferenceType()->isArrayType())) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
+
+ ExprResult Res;
+ {
+ Sema::TentativeAnalysisScope Trap(*this);
+ Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
+ RefExpr->IgnoreParenImpCasts());
+ }
+ if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
+ !isa<OMPArrayShapingExpr>(SimpleExpr)) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
}
}
Vars.push_back(RefExpr->IgnoreParenImpCasts());
@@ -15074,24 +16447,40 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
return nullptr;
auto *C = OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- DepKind, DepLoc, ColonLoc, Vars,
- TotalDepCount.getZExtValue());
+ DepModifier, DepKind, DepLoc, ColonLoc,
+ Vars, TotalDepCount.getZExtValue());
if ((DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) &&
DSAStack->isParentOrderedRegion())
DSAStack->addDoacrossDependClause(C, OpsOffs);
return C;
}
-OMPClause *Sema::ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
+OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
+ Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation ModifierLoc,
SourceLocation EndLoc) {
+ assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 50) &&
+ "Unexpected device modifier in OpenMP < 50.");
+
+ bool ErrorFound = false;
+ if (ModifierLoc.isValid() && Modifier == OMPC_DEVICE_unknown) {
+ std::string Values =
+ getListOfPossibleValues(OMPC_device, /*First=*/0, OMPC_DEVICE_unknown);
+ Diag(ModifierLoc, diag::err_omp_unexpected_clause_value)
+ << Values << getOpenMPClauseName(OMPC_device);
+ ErrorFound = true;
+ }
+
Expr *ValExpr = Device;
Stmt *HelperValStmt = nullptr;
// OpenMP [2.9.1, Restrictions]
// The device expression must evaluate to a non-negative integer value.
- if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_device,
- /*StrictlyPositive=*/false))
+ ErrorFound = !isNonNegativeIntegerValue(ValExpr, *this, OMPC_device,
+ /*StrictlyPositive=*/false) ||
+ ErrorFound;
+ if (ErrorFound)
return nullptr;
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
@@ -15104,8 +16493,9 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
HelperValStmt = buildPreInits(Context, Captures);
}
- return new (Context) OMPDeviceClause(ValExpr, HelperValStmt, CaptureRegion,
- StartLoc, LParenLoc, EndLoc);
+ return new (Context)
+ OMPDeviceClause(Modifier, ValExpr, HelperValStmt, CaptureRegion, StartLoc,
+ LParenLoc, ModifierLoc, EndLoc);
}
static bool checkTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
@@ -15133,7 +16523,8 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
// If this is an array subscript, it refers to the whole size if the size of
// the dimension is constant and equals 1. Also, an array section assumes the
// format of an array subscript if no colon is used.
- if (isa<ArraySubscriptExpr>(E) || (OASE && OASE->getColonLoc().isInvalid())) {
+ if (isa<ArraySubscriptExpr>(E) ||
+ (OASE && OASE->getColonLocFirst().isInvalid())) {
if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
return ATy->getSize().getSExtValue() != 1;
// Size can't be evaluated statically.
@@ -15189,7 +16580,8 @@ static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
// An array subscript always refer to a single element. Also, an array section
// assumes the format of an array subscript if no colon is used.
- if (isa<ArraySubscriptExpr>(E) || (OASE && OASE->getColonLoc().isInvalid()))
+ if (isa<ArraySubscriptExpr>(E) ||
+ (OASE && OASE->getColonLocFirst().isInvalid()))
return false;
assert(OASE && "Expecting array section if not an array subscript.");
@@ -15214,256 +16606,338 @@ static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
return ConstLength.getSExtValue() != 1;
}
-// Return the expression of the base of the mappable expression or null if it
-// cannot be determined and do all the necessary checks to see if the expression
-// is valid as a standalone mappable expression. In the process, record all the
-// components of the expression.
-static const Expr *checkMapClauseExpressionBase(
- Sema &SemaRef, Expr *E,
- OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
- OpenMPClauseKind CKind, bool NoDiagnose) {
- SourceLocation ELoc = E->getExprLoc();
- SourceRange ERange = E->getSourceRange();
-
- // The base of elements of list in a map clause have to be either:
- // - a reference to variable or field.
- // - a member expression.
- // - an array expression.
- //
- // E.g. if we have the expression 'r.S.Arr[:12]', we want to retrieve the
- // reference to 'r'.
- //
- // If we have:
- //
- // struct SS {
- // Bla S;
- // foo() {
- // #pragma omp target map (S.Arr[:12]);
- // }
- // }
- //
- // We want to retrieve the member expression 'this->S';
+// The base of elements of list in a map clause have to be either:
+// - a reference to variable or field.
+// - a member expression.
+// - an array expression.
+//
+// E.g. if we have the expression 'r.S.Arr[:12]', we want to retrieve the
+// reference to 'r'.
+//
+// If we have:
+//
+// struct SS {
+// Bla S;
+// foo() {
+// #pragma omp target map (S.Arr[:12]);
+// }
+// }
+//
+// We want to retrieve the member expression 'this->S';
+// OpenMP 5.0 [2.19.7.1, map Clause, Restrictions, p.2]
+// If a list item is an array section, it must specify contiguous storage.
+//
+// For this restriction it is sufficient that we make sure only references
+// to variables or fields and array expressions, and that no array sections
+// exist except in the rightmost expression (unless they cover the whole
+// dimension of the array). E.g. these would be invalid:
+//
+// r.ArrS[3:5].Arr[6:7]
+//
+// r.ArrS[3:5].x
+//
+// but these would be valid:
+// r.ArrS[3].Arr[6:7]
+//
+// r.ArrS[3].x
+namespace {
+class MapBaseChecker final : public StmtVisitor<MapBaseChecker, bool> {
+ Sema &SemaRef;
+ OpenMPClauseKind CKind = OMPC_unknown;
+ OMPClauseMappableExprCommon::MappableExprComponentList &Components;
+ bool NoDiagnose = false;
const Expr *RelevantExpr = nullptr;
-
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.2]
- // If a list item is an array section, it must specify contiguous storage.
- //
- // For this restriction it is sufficient that we make sure only references
- // to variables or fields and array expressions, and that no array sections
- // exist except in the rightmost expression (unless they cover the whole
- // dimension of the array). E.g. these would be invalid:
- //
- // r.ArrS[3:5].Arr[6:7]
- //
- // r.ArrS[3:5].x
- //
- // but these would be valid:
- // r.ArrS[3].Arr[6:7]
- //
- // r.ArrS[3].x
-
bool AllowUnitySizeArraySection = true;
bool AllowWholeSizeArraySection = true;
+ SourceLocation ELoc;
+ SourceRange ERange;
- while (!RelevantExpr) {
- E = E->IgnoreParenImpCasts();
+ void emitErrorMsg() {
+ // If nothing else worked, this is not a valid map clause expression.
+ if (SemaRef.getLangOpts().OpenMP < 50) {
+ SemaRef.Diag(ELoc,
+ diag::err_omp_expected_named_var_member_or_array_expression)
+ << ERange;
+ } else {
+ SemaRef.Diag(ELoc, diag::err_omp_non_lvalue_in_map_or_motion_clauses)
+ << getOpenMPClauseName(CKind) << ERange;
+ }
+ }
- if (auto *CurE = dyn_cast<DeclRefExpr>(E)) {
- if (!isa<VarDecl>(CurE->getDecl()))
- return nullptr;
+public:
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ if (!isa<VarDecl>(DRE->getDecl())) {
+ emitErrorMsg();
+ return false;
+ }
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ RelevantExpr = DRE;
+ // Record the component.
+ Components.emplace_back(DRE, DRE->getDecl());
+ return true;
+ }
- RelevantExpr = CurE;
+ bool VisitMemberExpr(MemberExpr *ME) {
+ Expr *E = ME;
+ Expr *BaseE = ME->getBase()->IgnoreParenCasts();
- // If we got a reference to a declaration, we should not expect any array
- // section before that.
- AllowUnitySizeArraySection = false;
- AllowWholeSizeArraySection = false;
+ if (isa<CXXThisExpr>(BaseE)) {
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ // We found a base expression: this->Val.
+ RelevantExpr = ME;
+ } else {
+ E = BaseE;
+ }
- // Record the component.
- CurComponents.emplace_back(CurE, CurE->getDecl());
- } else if (auto *CurE = dyn_cast<MemberExpr>(E)) {
- Expr *BaseE = CurE->getBase()->IgnoreParenImpCasts();
+ if (!isa<FieldDecl>(ME->getMemberDecl())) {
+ if (!NoDiagnose) {
+ SemaRef.Diag(ELoc, diag::err_omp_expected_access_to_data_field)
+ << ME->getSourceRange();
+ return false;
+ }
+ if (RelevantExpr)
+ return false;
+ return Visit(E);
+ }
- if (isa<CXXThisExpr>(BaseE))
- // We found a base expression: this->Val.
- RelevantExpr = CurE;
- else
- E = BaseE;
+ auto *FD = cast<FieldDecl>(ME->getMemberDecl());
- if (!isa<FieldDecl>(CurE->getMemberDecl())) {
- if (!NoDiagnose) {
- SemaRef.Diag(ELoc, diag::err_omp_expected_access_to_data_field)
- << CurE->getSourceRange();
- return nullptr;
- }
- if (RelevantExpr)
- return nullptr;
- continue;
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
+ // A bit-field cannot appear in a map clause.
+ //
+ if (FD->isBitField()) {
+ if (!NoDiagnose) {
+ SemaRef.Diag(ELoc, diag::err_omp_bit_fields_forbidden_in_clause)
+ << ME->getSourceRange() << getOpenMPClauseName(CKind);
+ return false;
}
+ if (RelevantExpr)
+ return false;
+ return Visit(E);
+ }
- auto *FD = cast<FieldDecl>(CurE->getMemberDecl());
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
+ // If the type of a list item is a reference to a type T then the type
+ // will be considered to be T for all purposes of this clause.
+ QualType CurType = BaseE->getType().getNonReferenceType();
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.3]
- // A bit-field cannot appear in a map clause.
- //
- if (FD->isBitField()) {
- if (!NoDiagnose) {
- SemaRef.Diag(ELoc, diag::err_omp_bit_fields_forbidden_in_clause)
- << CurE->getSourceRange() << getOpenMPClauseName(CKind);
- return nullptr;
- }
- if (RelevantExpr)
- return nullptr;
- continue;
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.2]
+ // A list item cannot be a variable that is a member of a structure with
+ // a union type.
+ //
+ if (CurType->isUnionType()) {
+ if (!NoDiagnose) {
+ SemaRef.Diag(ELoc, diag::err_omp_union_type_not_allowed)
+ << ME->getSourceRange();
+ return false;
}
+ return RelevantExpr || Visit(E);
+ }
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
- // If the type of a list item is a reference to a type T then the type
- // will be considered to be T for all purposes of this clause.
- QualType CurType = BaseE->getType().getNonReferenceType();
+ // If we got a member expression, we should not expect any array section
+ // before that:
+ //
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.7]
+ // If a list item is an element of a structure, only the rightmost symbol
+ // of the variable reference can be an array section.
+ //
+ AllowUnitySizeArraySection = false;
+ AllowWholeSizeArraySection = false;
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C/C++, p.2]
- // A list item cannot be a variable that is a member of a structure with
- // a union type.
- //
- if (CurType->isUnionType()) {
- if (!NoDiagnose) {
- SemaRef.Diag(ELoc, diag::err_omp_union_type_not_allowed)
- << CurE->getSourceRange();
- return nullptr;
- }
- continue;
+ // Record the component.
+ Components.emplace_back(ME, FD);
+ return RelevantExpr || Visit(E);
+ }
+
+ bool VisitArraySubscriptExpr(ArraySubscriptExpr *AE) {
+ Expr *E = AE->getBase()->IgnoreParenImpCasts();
+
+ if (!E->getType()->isAnyPointerType() && !E->getType()->isArrayType()) {
+ if (!NoDiagnose) {
+ SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
+ << 0 << AE->getSourceRange();
+ return false;
}
+ return RelevantExpr || Visit(E);
+ }
- // If we got a member expression, we should not expect any array section
- // before that:
- //
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.7]
- // If a list item is an element of a structure, only the rightmost symbol
- // of the variable reference can be an array section.
- //
- AllowUnitySizeArraySection = false;
+ // If we got an array subscript that express the whole dimension we
+ // can have any array expressions before. If it only expressing part of
+ // the dimension, we can only have unitary-size array expressions.
+ if (checkArrayExpressionDoesNotReferToWholeSize(SemaRef, AE,
+ E->getType()))
AllowWholeSizeArraySection = false;
- // Record the component.
- CurComponents.emplace_back(CurE, FD);
- } else if (auto *CurE = dyn_cast<ArraySubscriptExpr>(E)) {
- E = CurE->getBase()->IgnoreParenImpCasts();
-
- if (!E->getType()->isAnyPointerType() && !E->getType()->isArrayType()) {
- if (!NoDiagnose) {
- SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
- << 0 << CurE->getSourceRange();
- return nullptr;
- }
- continue;
+ if (const auto *TE = dyn_cast<CXXThisExpr>(E->IgnoreParenCasts())) {
+ Expr::EvalResult Result;
+ if (!AE->getIdx()->isValueDependent() &&
+ AE->getIdx()->EvaluateAsInt(Result, SemaRef.getASTContext()) &&
+ !Result.Val.getInt().isNullValue()) {
+ SemaRef.Diag(AE->getIdx()->getExprLoc(),
+ diag::err_omp_invalid_map_this_expr);
+ SemaRef.Diag(AE->getIdx()->getExprLoc(),
+ diag::note_omp_invalid_subscript_on_this_ptr_map);
}
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ RelevantExpr = TE;
+ }
- // If we got an array subscript that express the whole dimension we
- // can have any array expressions before. If it only expressing part of
- // the dimension, we can only have unitary-size array expressions.
- if (checkArrayExpressionDoesNotReferToWholeSize(SemaRef, CurE,
- E->getType()))
- AllowWholeSizeArraySection = false;
+ // Record the component - we don't have any declaration associated.
+ Components.emplace_back(AE, nullptr);
- if (const auto *TE = dyn_cast<CXXThisExpr>(E)) {
- Expr::EvalResult Result;
- if (CurE->getIdx()->EvaluateAsInt(Result, SemaRef.getASTContext())) {
- if (!Result.Val.getInt().isNullValue()) {
- SemaRef.Diag(CurE->getIdx()->getExprLoc(),
- diag::err_omp_invalid_map_this_expr);
- SemaRef.Diag(CurE->getIdx()->getExprLoc(),
- diag::note_omp_invalid_subscript_on_this_ptr_map);
- }
- }
- RelevantExpr = TE;
- }
+ return RelevantExpr || Visit(E);
+ }
- // Record the component - we don't have any declaration associated.
- CurComponents.emplace_back(CurE, nullptr);
- } else if (auto *CurE = dyn_cast<OMPArraySectionExpr>(E)) {
- assert(!NoDiagnose && "Array sections cannot be implicitly mapped.");
- E = CurE->getBase()->IgnoreParenImpCasts();
+ bool VisitOMPArraySectionExpr(OMPArraySectionExpr *OASE) {
+ assert(!NoDiagnose && "Array sections cannot be implicitly mapped.");
+ Expr *E = OASE->getBase()->IgnoreParenImpCasts();
+ QualType CurType =
+ OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
- QualType CurType =
- OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
+ // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
+ // If the type of a list item is a reference to a type T then the type
+ // will be considered to be T for all purposes of this clause.
+ if (CurType->isReferenceType())
+ CurType = CurType->getPointeeType();
- // OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
- // If the type of a list item is a reference to a type T then the type
- // will be considered to be T for all purposes of this clause.
- if (CurType->isReferenceType())
- CurType = CurType->getPointeeType();
+ bool IsPointer = CurType->isAnyPointerType();
- bool IsPointer = CurType->isAnyPointerType();
+ if (!IsPointer && !CurType->isArrayType()) {
+ SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
+ << 0 << OASE->getSourceRange();
+ return false;
+ }
- if (!IsPointer && !CurType->isArrayType()) {
- SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
- << 0 << CurE->getSourceRange();
- return nullptr;
- }
+ bool NotWhole =
+ checkArrayExpressionDoesNotReferToWholeSize(SemaRef, OASE, CurType);
+ bool NotUnity =
+ checkArrayExpressionDoesNotReferToUnitySize(SemaRef, OASE, CurType);
- bool NotWhole =
- checkArrayExpressionDoesNotReferToWholeSize(SemaRef, CurE, CurType);
- bool NotUnity =
- checkArrayExpressionDoesNotReferToUnitySize(SemaRef, CurE, CurType);
+ if (AllowWholeSizeArraySection) {
+ // Any array section is currently allowed. Allowing a whole size array
+ // section implies allowing a unity array section as well.
+ //
+ // If this array section refers to the whole dimension we can still
+ // accept other array sections before this one, except if the base is a
+ // pointer. Otherwise, only unitary sections are accepted.
+ if (NotWhole || IsPointer)
+ AllowWholeSizeArraySection = false;
+ } else if (AllowUnitySizeArraySection && NotUnity) {
+ // A unity or whole array section is not allowed and that is not
+ // compatible with the properties of the current array section.
+ SemaRef.Diag(
+ ELoc, diag::err_array_section_does_not_specify_contiguous_storage)
+ << OASE->getSourceRange();
+ return false;
+ }
- if (AllowWholeSizeArraySection) {
- // Any array section is currently allowed. Allowing a whole size array
- // section implies allowing a unity array section as well.
- //
- // If this array section refers to the whole dimension we can still
- // accept other array sections before this one, except if the base is a
- // pointer. Otherwise, only unitary sections are accepted.
- if (NotWhole || IsPointer)
- AllowWholeSizeArraySection = false;
- } else if (AllowUnitySizeArraySection && NotUnity) {
- // A unity or whole array section is not allowed and that is not
- // compatible with the properties of the current array section.
- SemaRef.Diag(
- ELoc, diag::err_array_section_does_not_specify_contiguous_storage)
- << CurE->getSourceRange();
- return nullptr;
+ if (const auto *TE = dyn_cast<CXXThisExpr>(E)) {
+ Expr::EvalResult ResultR;
+ Expr::EvalResult ResultL;
+ if (!OASE->getLength()->isValueDependent() &&
+ OASE->getLength()->EvaluateAsInt(ResultR, SemaRef.getASTContext()) &&
+ !ResultR.Val.getInt().isOneValue()) {
+ SemaRef.Diag(OASE->getLength()->getExprLoc(),
+ diag::err_omp_invalid_map_this_expr);
+ SemaRef.Diag(OASE->getLength()->getExprLoc(),
+ diag::note_omp_invalid_length_on_this_ptr_mapping);
}
-
- if (const auto *TE = dyn_cast<CXXThisExpr>(E)) {
- Expr::EvalResult ResultR;
- Expr::EvalResult ResultL;
- if (CurE->getLength()->EvaluateAsInt(ResultR,
- SemaRef.getASTContext())) {
- if (!ResultR.Val.getInt().isOneValue()) {
- SemaRef.Diag(CurE->getLength()->getExprLoc(),
- diag::err_omp_invalid_map_this_expr);
- SemaRef.Diag(CurE->getLength()->getExprLoc(),
- diag::note_omp_invalid_length_on_this_ptr_mapping);
- }
- }
- if (CurE->getLowerBound() && CurE->getLowerBound()->EvaluateAsInt(
- ResultL, SemaRef.getASTContext())) {
- if (!ResultL.Val.getInt().isNullValue()) {
- SemaRef.Diag(CurE->getLowerBound()->getExprLoc(),
- diag::err_omp_invalid_map_this_expr);
- SemaRef.Diag(CurE->getLowerBound()->getExprLoc(),
- diag::note_omp_invalid_lower_bound_on_this_ptr_mapping);
- }
- }
- RelevantExpr = TE;
+ if (OASE->getLowerBound() && !OASE->getLowerBound()->isValueDependent() &&
+ OASE->getLowerBound()->EvaluateAsInt(ResultL,
+ SemaRef.getASTContext()) &&
+ !ResultL.Val.getInt().isNullValue()) {
+ SemaRef.Diag(OASE->getLowerBound()->getExprLoc(),
+ diag::err_omp_invalid_map_this_expr);
+ SemaRef.Diag(OASE->getLowerBound()->getExprLoc(),
+ diag::note_omp_invalid_lower_bound_on_this_ptr_mapping);
}
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ RelevantExpr = TE;
+ }
- // Record the component - we don't have any declaration associated.
- CurComponents.emplace_back(CurE, nullptr);
- } else {
- if (!NoDiagnose) {
- // If nothing else worked, this is not a valid map clause expression.
- SemaRef.Diag(
- ELoc, diag::err_omp_expected_named_var_member_or_array_expression)
- << ERange;
- }
- return nullptr;
+ // Record the component - we don't have any declaration associated.
+ Components.emplace_back(OASE, nullptr);
+ return RelevantExpr || Visit(E);
+ }
+ bool VisitOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
+ Expr *Base = E->getBase();
+
+ // Record the component - we don't have any declaration associated.
+ Components.emplace_back(E, nullptr);
+
+ return Visit(Base->IgnoreParenImpCasts());
+ }
+
+ bool VisitUnaryOperator(UnaryOperator *UO) {
+ if (SemaRef.getLangOpts().OpenMP < 50 || !UO->isLValue() ||
+ UO->getOpcode() != UO_Deref) {
+ emitErrorMsg();
+ return false;
}
+ if (!RelevantExpr) {
+ // Record the component if haven't found base decl.
+ Components.emplace_back(UO, nullptr);
+ }
+ return RelevantExpr || Visit(UO->getSubExpr()->IgnoreParenImpCasts());
}
+ bool VisitBinaryOperator(BinaryOperator *BO) {
+ if (SemaRef.getLangOpts().OpenMP < 50 || !BO->getType()->isPointerType()) {
+ emitErrorMsg();
+ return false;
+ }
+
+ // Pointer arithmetic is the only thing we expect to happen here so after we
+ // make sure the binary operator is a pointer type, the we only thing need
+ // to to is to visit the subtree that has the same type as root (so that we
+ // know the other subtree is just an offset)
+ Expr *LE = BO->getLHS()->IgnoreParenImpCasts();
+ Expr *RE = BO->getRHS()->IgnoreParenImpCasts();
+ Components.emplace_back(BO, nullptr);
+ assert((LE->getType().getTypePtr() == BO->getType().getTypePtr() ||
+ RE->getType().getTypePtr() == BO->getType().getTypePtr()) &&
+ "Either LHS or RHS have base decl inside");
+ if (BO->getType().getTypePtr() == LE->getType().getTypePtr())
+ return RelevantExpr || Visit(LE);
+ return RelevantExpr || Visit(RE);
+ }
+ bool VisitCXXThisExpr(CXXThisExpr *CTE) {
+ assert(!RelevantExpr && "RelevantExpr is expected to be nullptr");
+ RelevantExpr = CTE;
+ Components.emplace_back(CTE, nullptr);
+ return true;
+ }
+ bool VisitStmt(Stmt *) {
+ emitErrorMsg();
+ return false;
+ }
+ const Expr *getFoundBase() const {
+ return RelevantExpr;
+ }
+ explicit MapBaseChecker(
+ Sema &SemaRef, OpenMPClauseKind CKind,
+ OMPClauseMappableExprCommon::MappableExprComponentList &Components,
+ bool NoDiagnose, SourceLocation &ELoc, SourceRange &ERange)
+ : SemaRef(SemaRef), CKind(CKind), Components(Components),
+ NoDiagnose(NoDiagnose), ELoc(ELoc), ERange(ERange) {}
+};
+} // namespace
- return RelevantExpr;
+/// Return the expression of the base of the mappable expression or null if it
+/// cannot be determined and do all the necessary checks to see if the expression
+/// is valid as a standalone mappable expression. In the process, record all the
+/// components of the expression.
+static const Expr *checkMapClauseExpressionBase(
+ Sema &SemaRef, Expr *E,
+ OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
+ OpenMPClauseKind CKind, bool NoDiagnose) {
+ SourceLocation ELoc = E->getExprLoc();
+ SourceRange ERange = E->getSourceRange();
+ MapBaseChecker Checker(SemaRef, CKind, CurComponents, NoDiagnose, ELoc,
+ ERange);
+ if (Checker.Visit(E->IgnoreParens()))
+ return Checker.getFoundBase();
+ return nullptr;
}
// Return true if expression E associated with value VD has conflicts with other
@@ -15520,9 +16994,11 @@ static bool checkMapConflicts(
// variable in map clauses of the same construct.
if (CurrentRegionOnly &&
(isa<ArraySubscriptExpr>(CI->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(CI->getAssociatedExpression())) &&
+ isa<OMPArraySectionExpr>(CI->getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(CI->getAssociatedExpression())) &&
(isa<ArraySubscriptExpr>(SI->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(SI->getAssociatedExpression()))) {
+ isa<OMPArraySectionExpr>(SI->getAssociatedExpression()) ||
+ isa<OMPArrayShapingExpr>(SI->getAssociatedExpression()))) {
SemaRef.Diag(CI->getAssociatedExpression()->getExprLoc(),
diag::err_omp_multiple_array_items_in_map_clause)
<< CI->getAssociatedExpression()->getSourceRange();
@@ -15554,6 +17030,9 @@ static bool checkMapConflicts(
const Expr *E = OASE->getBase()->IgnoreParenImpCasts();
Type =
OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
+ } else if (const auto *OASE = dyn_cast<OMPArrayShapingExpr>(
+ SI->getAssociatedExpression())) {
+ Type = OASE->getBase()->getType()->getPointeeType();
}
if (Type.isNull() || Type->isAnyPointerType() ||
checkArrayExpressionDoesNotReferToWholeSize(
@@ -15916,10 +17395,15 @@ static void checkMappableExpressionList(
Expr *SimpleExpr = RE->IgnoreParenCasts();
- if (!RE->IgnoreParenImpCasts()->isLValue()) {
- SemaRef.Diag(ELoc,
- diag::err_omp_expected_named_var_member_or_array_expression)
- << RE->getSourceRange();
+ if (!RE->isLValue()) {
+ if (SemaRef.getLangOpts().OpenMP < 50) {
+ SemaRef.Diag(
+ ELoc, diag::err_omp_expected_named_var_member_or_array_expression)
+ << RE->getSourceRange();
+ } else {
+ SemaRef.Diag(ELoc, diag::err_omp_non_lvalue_in_map_or_motion_clauses)
+ << getOpenMPClauseName(CKind) << RE->getSourceRange();
+ }
continue;
}
@@ -16011,6 +17495,7 @@ static void checkMappableExpressionList(
QualType Type;
auto *ASE = dyn_cast<ArraySubscriptExpr>(VE->IgnoreParens());
auto *OASE = dyn_cast<OMPArraySectionExpr>(VE->IgnoreParens());
+ auto *OAShE = dyn_cast<OMPArrayShapingExpr>(VE->IgnoreParens());
if (ASE) {
Type = ASE->getType().getNonReferenceType();
} else if (OASE) {
@@ -16021,6 +17506,8 @@ static void checkMappableExpressionList(
else
Type = BaseType->getPointeeType();
Type = Type.getNonReferenceType();
+ } else if (OAShE) {
+ Type = OAShE->getBase()->getType()->getPointeeType();
} else {
Type = VE->getType();
}
@@ -16064,6 +17551,21 @@ static void checkMappableExpressionList(
continue;
}
+ // target, target data
+ // OpenMP 5.0 [2.12.2, Restrictions, p. 163]
+ // OpenMP 5.0 [2.12.5, Restrictions, p. 174]
+ // A map-type in a map clause must be to, from, tofrom or alloc
+ if ((DKind == OMPD_target_data ||
+ isOpenMPTargetExecutionDirective(DKind)) &&
+ !(MapType == OMPC_MAP_to || MapType == OMPC_MAP_from ||
+ MapType == OMPC_MAP_tofrom || MapType == OMPC_MAP_alloc)) {
+ SemaRef.Diag(StartLoc, diag::err_omp_invalid_map_type_for_directive)
+ << (IsMapTypeImplicit ? 1 : 0)
+ << getOpenMPSimpleClauseTypeName(OMPC_map, MapType)
+ << getOpenMPDirectiveName(DKind);
+ continue;
+ }
+
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
@@ -16124,7 +17626,7 @@ OMPClause *Sema::ActOnOpenMPMapClause(
OpenMPMapModifierKind Modifiers[] = {OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown};
- SourceLocation ModifiersLoc[OMPMapClause::NumberOfModifiers];
+ SourceLocation ModifiersLoc[NumberOfOMPMapClauseModifiers];
// Process map-type-modifiers, flag errors for duplicate modifiers.
unsigned Count = 0;
@@ -16134,7 +17636,7 @@ OMPClause *Sema::ActOnOpenMPMapClause(
Diag(MapTypeModifiersLoc[I], diag::err_omp_duplicate_map_type_modifier);
continue;
}
- assert(Count < OMPMapClause::NumberOfModifiers &&
+ assert(Count < NumberOfOMPMapClauseModifiers &&
"Modifiers exceed the allowed number of map type modifiers");
Modifiers[Count] = MapTypeModifiers[I];
ModifiersLoc[Count] = MapTypeModifiersLoc[I];
@@ -16678,6 +18180,69 @@ OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
OMPHintClause(HintExpr.get(), StartLoc, LParenLoc, EndLoc);
}
+/// Tries to find omp_event_handle_t type.
+static bool findOMPEventHandleT(Sema &S, SourceLocation Loc,
+ DSAStackTy *Stack) {
+ QualType OMPEventHandleT = Stack->getOMPEventHandleT();
+ if (!OMPEventHandleT.isNull())
+ return true;
+ IdentifierInfo *II = &S.PP.getIdentifierTable().get("omp_event_handle_t");
+ ParsedType PT = S.getTypeName(*II, Loc, S.getCurScope());
+ if (!PT.getAsOpaquePtr() || PT.get().isNull()) {
+ S.Diag(Loc, diag::err_omp_implied_type_not_found) << "omp_event_handle_t";
+ return false;
+ }
+ Stack->setOMPEventHandleT(PT.get());
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (!Evt->isValueDependent() && !Evt->isTypeDependent() &&
+ !Evt->isInstantiationDependent() &&
+ !Evt->containsUnexpandedParameterPack()) {
+ if (!findOMPEventHandleT(*this, Evt->getExprLoc(), DSAStack))
+ return nullptr;
+ // OpenMP 5.0, 2.10.1 task Construct.
+ // event-handle is a variable of the omp_event_handle_t type.
+ auto *Ref = dyn_cast<DeclRefExpr>(Evt->IgnoreParenImpCasts());
+ if (!Ref) {
+ Diag(Evt->getExprLoc(), diag::err_omp_var_expected)
+ << "omp_event_handle_t" << 0 << Evt->getSourceRange();
+ return nullptr;
+ }
+ auto *VD = dyn_cast_or_null<VarDecl>(Ref->getDecl());
+ if (!VD) {
+ Diag(Evt->getExprLoc(), diag::err_omp_var_expected)
+ << "omp_event_handle_t" << 0 << Evt->getSourceRange();
+ return nullptr;
+ }
+ if (!Context.hasSameUnqualifiedType(DSAStack->getOMPEventHandleT(),
+ VD->getType()) ||
+ VD->getType().isConstant(Context)) {
+ Diag(Evt->getExprLoc(), diag::err_omp_var_expected)
+ << "omp_event_handle_t" << 1 << VD->getType()
+ << Evt->getSourceRange();
+ return nullptr;
+ }
+ // OpenMP 5.0, 2.10.1 task Construct
+ // [detach clause]... The event-handle will be considered as if it was
+ // specified on a firstprivate clause.
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, /*FromParent=*/false);
+ if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_firstprivate &&
+ DVar.RefExpr) {
+ Diag(Evt->getExprLoc(), diag::err_omp_wrong_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_firstprivate);
+ reportOriginalDsa(*this, DSAStack, VD, DVar);
+ return nullptr;
+ }
+ }
+
+ return new (Context) OMPDetachClause(Evt, StartLoc, LParenLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc,
@@ -16758,7 +18323,8 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
}
} else {
bool isDefaultmapModifier = (M != OMPC_DEFAULTMAP_MODIFIER_unknown);
- bool isDefaultmapKind = (Kind != OMPC_DEFAULTMAP_unknown);
+ bool isDefaultmapKind = (Kind != OMPC_DEFAULTMAP_unknown) ||
+ (LangOpts.OpenMP >= 50 && KindLoc.isInvalid());
if (!isDefaultmapKind || !isDefaultmapModifier) {
std::string ModifierValue = "'alloc', 'from', 'to', 'tofrom', "
"'firstprivate', 'none', 'default'";
@@ -16786,7 +18352,14 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
return nullptr;
}
}
- DSAStack->setDefaultDMAAttr(M, Kind, StartLoc);
+ if (Kind == OMPC_DEFAULTMAP_unknown) {
+ // Variable category is not specified - mark all categories.
+ DSAStack->setDefaultDMAAttr(M, OMPC_DEFAULTMAP_aggregate, StartLoc);
+ DSAStack->setDefaultDMAAttr(M, OMPC_DEFAULTMAP_scalar, StartLoc);
+ DSAStack->setDefaultDMAAttr(M, OMPC_DEFAULTMAP_pointer, StartLoc);
+ } else {
+ DSAStack->setDefaultDMAAttr(M, Kind, StartLoc);
+ }
return new (Context)
OMPDefaultmapClause(StartLoc, LParenLoc, MLoc, KindLoc, EndLoc, Kind, M);
@@ -16955,15 +18528,6 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
Diag(FD->getLocation(), diag::note_defined_here) << FD;
return;
}
- // Mark the function as must be emitted for the device.
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(FD);
- if (LangOpts.OpenMPIsDevice && Res.hasValue() && IdLoc.isValid() &&
- *DevTy != OMPDeclareTargetDeclAttr::DT_Host)
- checkOpenMPDeviceFunction(IdLoc, FD, /*CheckForDelayedContext=*/false);
- if (!LangOpts.OpenMPIsDevice && Res.hasValue() && IdLoc.isValid() &&
- *DevTy != OMPDeclareTargetDeclAttr::DT_NoHost)
- checkOpenMPHostFunction(IdLoc, FD, /*CheckCaller=*/false);
}
if (auto *VD = dyn_cast<ValueDecl>(D)) {
// Problem if any with var declared with incomplete type will be reported
@@ -17109,6 +18673,58 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
MVLI.VarBaseDeclarations, MVLI.VarComponents);
}
+OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
+ MappableVarListInfo MVLI(VarList);
+
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP use_device_addr clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ if (Res.second) {
+ // It will be analyzed later.
+ MVLI.ProcessedVarList.push_back(RefExpr);
+ }
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+ auto *VD = dyn_cast<VarDecl>(D);
+
+ // If required, build a capture to implement the privatization initialized
+ // with the current list item value.
+ DeclRefExpr *Ref = nullptr;
+ if (!VD)
+ Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ MVLI.ProcessedVarList.push_back(VD ? RefExpr->IgnoreParens() : Ref);
+
+ // We need to add a data sharing attribute for this variable to make sure it
+ // is correctly captured. A variable that shows up in a use_device_addr has
+ // similar properties of a first private variable.
+ DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
+
+ // Create a mappable component for the list item. List items in this clause
+ // only need a component.
+ MVLI.VarBaseDeclarations.push_back(D);
+ MVLI.VarComponents.emplace_back();
+ Expr *Component = SimpleRefExpr;
+ if (VD && (isa<OMPArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
+ isa<ArraySubscriptExpr>(RefExpr->IgnoreParenImpCasts())))
+ Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
+ MVLI.VarComponents.back().push_back(
+ OMPClauseMappableExprCommon::MappableComponent(Component, D));
+ }
+
+ if (MVLI.ProcessedVarList.empty())
+ return nullptr;
+
+ return OMPUseDeviceAddrClause::Create(Context, Locs, MVLI.ProcessedVarList,
+ MVLI.VarBaseDeclarations,
+ MVLI.VarComponents);
+}
+
OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs) {
MappableVarListInfo MVLI(VarList);
@@ -17248,6 +18864,8 @@ OMPClause *Sema::ActOnOpenMPAllocateClause(
if (Vars.empty())
return nullptr;
+ if (Allocator)
+ DSAStack->addInnerAllocatorExpr(Allocator);
return OMPAllocateClause::Create(Context, StartLoc, LParenLoc, Allocator,
ColonLoc, EndLoc, Vars);
}
@@ -17290,3 +18908,266 @@ OMPClause *Sema::ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
return OMPNontemporalClause::Create(Context, StartLoc, LParenLoc, EndLoc,
Vars);
}
+
+OMPClause *Sema::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ if (Res.second)
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+
+ const DSAStackTy::DSAVarData DVar =
+ DSAStack->getTopDSA(D, /*FromParent=*/true);
+ // OpenMP 5.0, 2.9.6, scan Directive, Restrictions.
+ // A list item that appears in the inclusive or exclusive clause must appear
+ // in a reduction clause with the inscan modifier on the enclosing
+ // worksharing-loop, worksharing-loop SIMD, or simd construct.
+ if (DVar.CKind != OMPC_reduction ||
+ DVar.Modifier != OMPC_REDUCTION_inscan)
+ Diag(ELoc, diag::err_omp_inclusive_exclusive_not_reduction)
+ << RefExpr->getSourceRange();
+
+ if (DSAStack->getParentDirective() != OMPD_unknown)
+ DSAStack->markDeclAsUsedInScanDirective(D);
+ Vars.push_back(RefExpr);
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPInclusiveClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+}
+
+OMPClause *Sema::ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ SmallVector<Expr *, 8> Vars;
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ if (Res.second)
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+
+ OpenMPDirectiveKind ParentDirective = DSAStack->getParentDirective();
+ DSAStackTy::DSAVarData DVar;
+ if (ParentDirective != OMPD_unknown)
+ DVar = DSAStack->getTopDSA(D, /*FromParent=*/true);
+ // OpenMP 5.0, 2.9.6, scan Directive, Restrictions.
+ // A list item that appears in the inclusive or exclusive clause must appear
+ // in a reduction clause with the inscan modifier on the enclosing
+ // worksharing-loop, worksharing-loop SIMD, or simd construct.
+ if (ParentDirective == OMPD_unknown || DVar.CKind != OMPC_reduction ||
+ DVar.Modifier != OMPC_REDUCTION_inscan) {
+ Diag(ELoc, diag::err_omp_inclusive_exclusive_not_reduction)
+ << RefExpr->getSourceRange();
+ } else {
+ DSAStack->markDeclAsUsedInScanDirective(D);
+ }
+ Vars.push_back(RefExpr);
+ }
+
+ if (Vars.empty())
+ return nullptr;
+
+ return OMPExclusiveClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars);
+}
+
+/// Tries to find omp_alloctrait_t type.
+static bool findOMPAlloctraitT(Sema &S, SourceLocation Loc, DSAStackTy *Stack) {
+ QualType OMPAlloctraitT = Stack->getOMPAlloctraitT();
+ if (!OMPAlloctraitT.isNull())
+ return true;
+ IdentifierInfo &II = S.PP.getIdentifierTable().get("omp_alloctrait_t");
+ ParsedType PT = S.getTypeName(II, Loc, S.getCurScope());
+ if (!PT.getAsOpaquePtr() || PT.get().isNull()) {
+ S.Diag(Loc, diag::err_omp_implied_type_not_found) << "omp_alloctrait_t";
+ return false;
+ }
+ Stack->setOMPAlloctraitT(PT.get());
+ return true;
+}
+
+OMPClause *Sema::ActOnOpenMPUsesAllocatorClause(
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<UsesAllocatorsData> Data) {
+ // OpenMP [2.12.5, target Construct]
+ // allocator is an identifier of omp_allocator_handle_t type.
+ if (!findOMPAllocatorHandleT(*this, StartLoc, DSAStack))
+ return nullptr;
+ // OpenMP [2.12.5, target Construct]
+ // allocator-traits-array is an identifier of const omp_alloctrait_t * type.
+ if (llvm::any_of(
+ Data,
+ [](const UsesAllocatorsData &D) { return D.AllocatorTraits; }) &&
+ !findOMPAlloctraitT(*this, StartLoc, DSAStack))
+ return nullptr;
+ llvm::SmallSet<CanonicalDeclPtr<Decl>, 4> PredefinedAllocators;
+ for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
+ auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
+ StringRef Allocator =
+ OMPAllocateDeclAttr::ConvertAllocatorTypeTyToStr(AllocatorKind);
+ DeclarationName AllocatorName = &Context.Idents.get(Allocator);
+ PredefinedAllocators.insert(LookupSingleName(
+ TUScope, AllocatorName, StartLoc, Sema::LookupAnyName));
+ }
+
+ SmallVector<OMPUsesAllocatorsClause::Data, 4> NewData;
+ for (const UsesAllocatorsData &D : Data) {
+ Expr *AllocatorExpr = nullptr;
+ // Check allocator expression.
+ if (D.Allocator->isTypeDependent()) {
+ AllocatorExpr = D.Allocator;
+ } else {
+ // Traits were specified - need to assign new allocator to the specified
+ // allocator, so it must be an lvalue.
+ AllocatorExpr = D.Allocator->IgnoreParenImpCasts();
+ auto *DRE = dyn_cast<DeclRefExpr>(AllocatorExpr);
+ bool IsPredefinedAllocator = false;
+ if (DRE)
+ IsPredefinedAllocator = PredefinedAllocators.count(DRE->getDecl());
+ if (!DRE ||
+ !(Context.hasSameUnqualifiedType(
+ AllocatorExpr->getType(), DSAStack->getOMPAllocatorHandleT()) ||
+ Context.typesAreCompatible(AllocatorExpr->getType(),
+ DSAStack->getOMPAllocatorHandleT(),
+ /*CompareUnqualified=*/true)) ||
+ (!IsPredefinedAllocator &&
+ (AllocatorExpr->getType().isConstant(Context) ||
+ !AllocatorExpr->isLValue()))) {
+ Diag(D.Allocator->getExprLoc(), diag::err_omp_var_expected)
+ << "omp_allocator_handle_t" << (DRE ? 1 : 0)
+ << AllocatorExpr->getType() << D.Allocator->getSourceRange();
+ continue;
+ }
+ // OpenMP [2.12.5, target Construct]
+ // Predefined allocators appearing in a uses_allocators clause cannot have
+ // traits specified.
+ if (IsPredefinedAllocator && D.AllocatorTraits) {
+ Diag(D.AllocatorTraits->getExprLoc(),
+ diag::err_omp_predefined_allocator_with_traits)
+ << D.AllocatorTraits->getSourceRange();
+ Diag(D.Allocator->getExprLoc(), diag::note_omp_predefined_allocator)
+ << cast<NamedDecl>(DRE->getDecl())->getName()
+ << D.Allocator->getSourceRange();
+ continue;
+ }
+ // OpenMP [2.12.5, target Construct]
+ // Non-predefined allocators appearing in a uses_allocators clause must
+ // have traits specified.
+ if (!IsPredefinedAllocator && !D.AllocatorTraits) {
+ Diag(D.Allocator->getExprLoc(),
+ diag::err_omp_nonpredefined_allocator_without_traits);
+ continue;
+ }
+ // No allocator traits - just convert it to rvalue.
+ if (!D.AllocatorTraits)
+ AllocatorExpr = DefaultLvalueConversion(AllocatorExpr).get();
+ DSAStack->addUsesAllocatorsDecl(
+ DRE->getDecl(),
+ IsPredefinedAllocator
+ ? DSAStackTy::UsesAllocatorsDeclKind::PredefinedAllocator
+ : DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator);
+ }
+ Expr *AllocatorTraitsExpr = nullptr;
+ if (D.AllocatorTraits) {
+ if (D.AllocatorTraits->isTypeDependent()) {
+ AllocatorTraitsExpr = D.AllocatorTraits;
+ } else {
+ // OpenMP [2.12.5, target Construct]
+ // Arrays that contain allocator traits that appear in a uses_allocators
+ // clause must be constant arrays, have constant values and be defined
+ // in the same scope as the construct in which the clause appears.
+ AllocatorTraitsExpr = D.AllocatorTraits->IgnoreParenImpCasts();
+ // Check that traits expr is a constant array.
+ QualType TraitTy;
+ if (const ArrayType *Ty =
+ AllocatorTraitsExpr->getType()->getAsArrayTypeUnsafe())
+ if (const auto *ConstArrayTy = dyn_cast<ConstantArrayType>(Ty))
+ TraitTy = ConstArrayTy->getElementType();
+ if (TraitTy.isNull() ||
+ !(Context.hasSameUnqualifiedType(TraitTy,
+ DSAStack->getOMPAlloctraitT()) ||
+ Context.typesAreCompatible(TraitTy, DSAStack->getOMPAlloctraitT(),
+ /*CompareUnqualified=*/true))) {
+ Diag(D.AllocatorTraits->getExprLoc(),
+ diag::err_omp_expected_array_alloctraits)
+ << AllocatorTraitsExpr->getType();
+ continue;
+ }
+ // Do not map by default allocator traits if it is a standalone
+ // variable.
+ if (auto *DRE = dyn_cast<DeclRefExpr>(AllocatorTraitsExpr))
+ DSAStack->addUsesAllocatorsDecl(
+ DRE->getDecl(),
+ DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait);
+ }
+ }
+ OMPUsesAllocatorsClause::Data &NewD = NewData.emplace_back();
+ NewD.Allocator = AllocatorExpr;
+ NewD.AllocatorTraits = AllocatorTraitsExpr;
+ NewD.LParenLoc = D.LParenLoc;
+ NewD.RParenLoc = D.RParenLoc;
+ }
+ return OMPUsesAllocatorsClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ NewData);
+}
+
+OMPClause *Sema::ActOnOpenMPAffinityClause(
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators) {
+ SmallVector<Expr *, 8> Vars;
+ for (Expr *RefExpr : Locators) {
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr) || RefExpr->isTypeDependent()) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ Expr *SimpleExpr = RefExpr->IgnoreParenImpCasts();
+
+ if (!SimpleExpr->isLValue()) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << 1 << 0 << RefExpr->getSourceRange();
+ continue;
+ }
+
+ ExprResult Res;
+ {
+ Sema::TentativeAnalysisScope Trap(*this);
+ Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf, SimpleExpr);
+ }
+ if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
+ !isa<OMPArrayShapingExpr>(SimpleExpr)) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << 1 << 0 << RefExpr->getSourceRange();
+ continue;
+ }
+ Vars.push_back(SimpleExpr);
+ }
+
+ return OMPAffinityClause::Create(Context, StartLoc, LParenLoc, ColonLoc,
+ EndLoc, Modifier, Vars);
+}
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 0fd932fac970..8635397f4806 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -10,10 +10,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/Overload.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -21,9 +21,11 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Overload.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
@@ -38,6 +40,8 @@
using namespace clang;
using namespace sema;
+using AllowedExplicit = Sema::AllowedExplicit;
+
static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
return llvm::any_of(FD->parameters(), [](const ParmVarDecl *P) {
return P->hasAttr<PassObjectSizeAttr>();
@@ -91,10 +95,9 @@ static OverloadingResult
IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
UserDefinedConversionSequence& User,
OverloadCandidateSet& Conversions,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool AllowObjCConversionOnExplicit);
-
static ImplicitConversionSequence::CompareKind
CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
@@ -229,7 +232,6 @@ bool StandardConversionSequence::isPointerConversionToBool() const {
getFromType()->isMemberPointerType() ||
getFromType()->isObjCObjectPointerType() ||
getFromType()->isBlockPointerType() ||
- getFromType()->isNullPtrType() ||
First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer))
return true;
@@ -327,9 +329,8 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
goto FloatingIntegralConversion;
if (FromType->isIntegralOrUnscopedEnumerationType())
goto IntegralConversion;
- // Boolean conversions can be from pointers and pointers to members
- // [conv.bool], and those aren't considered narrowing conversions.
- return NK_Not_Narrowing;
+ // -- from a pointer type or pointer-to-member type to bool, or
+ return NK_Type_Narrowing;
// -- from a floating-point type to an integer type, or
//
@@ -1317,7 +1318,7 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
static ImplicitConversionSequence
TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion,
@@ -1420,7 +1421,7 @@ TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
static ImplicitConversionSequence
TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion,
@@ -1475,13 +1476,12 @@ TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
ImplicitConversionSequence
Sema::TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion) {
- return ::TryImplicitConversion(*this, From, ToType,
- SuppressUserConversions, AllowExplicit,
- InOverloadResolution, CStyle,
+ return ::TryImplicitConversion(*this, From, ToType, SuppressUserConversions,
+ AllowExplicit, InOverloadResolution, CStyle,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
}
@@ -1514,10 +1514,10 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
From->getType(), From);
ICS = ::TryImplicitConversion(*this, From, ToType,
/*SuppressUserConversions=*/false,
- AllowExplicit,
+ AllowExplicit ? AllowedExplicit::All
+ : AllowedExplicit::None,
/*InOverloadResolution=*/false,
- /*CStyle=*/false,
- AllowObjCWritebackConversion,
+ /*CStyle=*/false, AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
return PerformImplicitConversion(From, ToType, ICS, Action);
}
@@ -1653,9 +1653,13 @@ static bool IsVectorConversion(Sema &S, QualType FromType,
// 1)vector types are equivalent AltiVec and GCC vector types
// 2)lax vector conversions are permitted and the vector types are of the
// same size
+ // 3)the destination type does not have the ARM MVE strict-polymorphism
+ // attribute, which inhibits lax vector conversion for overload resolution
+ // only
if (ToType->isVectorType() && FromType->isVectorType()) {
if (S.Context.areCompatibleVectorTypes(FromType, ToType) ||
- S.isLaxVectorConversion(FromType, ToType)) {
+ (S.isLaxVectorConversion(FromType, ToType) &&
+ !ToType->hasAttr(attr::ArmMveStrictPolymorphism))) {
ICK = ICK_Vector_Conversion;
return true;
}
@@ -1844,8 +1848,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
(FromType->isArithmeticType() ||
FromType->isAnyPointerType() ||
FromType->isBlockPointerType() ||
- FromType->isMemberPointerType() ||
- FromType->isNullPtrType())) {
+ FromType->isMemberPointerType())) {
// Boolean conversions (C++ 4.12).
SCS.Second = ICK_Boolean_Conversion;
FromType = S.Context.BoolTy;
@@ -1867,6 +1870,10 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// FIXME: disable conversions between long double and __float128 if
// their representation is different until there is back end support
// We of course allow this conversion if long double is really double.
+
+ // Conversions between bfloat and other floats are not permitted.
+ if (FromType == S.Context.BFloat16Ty || ToType == S.Context.BFloat16Ty)
+ return false;
if (&S.Context.getFloatTypeSemantics(FromType) !=
&S.Context.getFloatTypeSemantics(ToType)) {
bool Float128AndLongDouble = ((FromType == S.Context.Float128Ty &&
@@ -1885,6 +1892,10 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
ToType->isIntegralType(S.Context)) ||
(FromType->isIntegralOrUnscopedEnumerationType() &&
ToType->isRealFloatingType())) {
+ // Conversions between bfloat and int are not permitted.
+ if (FromType->isBFloat16Type() || ToType->isBFloat16Type())
+ return false;
+
// Floating-integral conversions (C++ 4.9).
SCS.Second = ICK_Floating_Integral;
FromType = ToType.getUnqualifiedType();
@@ -3000,13 +3011,13 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
// We must have a derived-to-base conversion. Check an
// ambiguous or inaccessible conversion.
unsigned InaccessibleID = 0;
- unsigned AmbigiousID = 0;
+ unsigned AmbiguousID = 0;
if (Diagnose) {
InaccessibleID = diag::err_upcast_to_inaccessible_base;
- AmbigiousID = diag::err_ambiguous_derived_to_base_conv;
+ AmbiguousID = diag::err_ambiguous_derived_to_base_conv;
}
if (CheckDerivedToBaseConversion(
- FromPointeeType, ToPointeeType, InaccessibleID, AmbigiousID,
+ FromPointeeType, ToPointeeType, InaccessibleID, AmbiguousID,
From->getExprLoc(), From->getSourceRange(), DeclarationName(),
&BasePath, IgnoreBaseAccess))
return true;
@@ -3176,7 +3187,7 @@ static bool isNonTrivialObjCLifetimeConversion(Qualifiers FromQuals,
/// FromType and \p ToType is permissible, given knowledge about whether every
/// outer layer is const-qualified.
static bool isQualificationConversionStep(QualType FromType, QualType ToType,
- bool CStyle,
+ bool CStyle, bool IsTopLevel,
bool &PreviousToQualsIncludeConst,
bool &ObjCLifetimeConversion) {
Qualifiers FromQuals = FromType.getQualifiers();
@@ -3213,11 +3224,15 @@ static bool isQualificationConversionStep(QualType FromType, QualType ToType,
if (!CStyle && !ToQuals.compatiblyIncludes(FromQuals))
return false;
- // For a C-style cast, just require the address spaces to overlap.
- // FIXME: Does "superset" also imply the representation of a pointer is the
- // same? We're assuming that it does here and in compatiblyIncludes.
- if (CStyle && !ToQuals.isAddressSpaceSupersetOf(FromQuals) &&
- !FromQuals.isAddressSpaceSupersetOf(ToQuals))
+ // If address spaces mismatch:
+ // - in top level it is only valid to convert to addr space that is a
+ // superset in all cases apart from C-style casts where we allow
+ // conversions between overlapping address spaces.
+ // - in non-top levels it is not a valid conversion.
+ if (ToQuals.getAddressSpace() != FromQuals.getAddressSpace() &&
+ (!IsTopLevel ||
+ !(ToQuals.isAddressSpaceSupersetOf(FromQuals) ||
+ (CStyle && FromQuals.isAddressSpaceSupersetOf(ToQuals)))))
return false;
// -- if the cv 1,j and cv 2,j are different, then const is in
@@ -3258,9 +3273,9 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
bool PreviousToQualsIncludeConst = true;
bool UnwrappedAnyPointer = false;
while (Context.UnwrapSimilarTypes(FromType, ToType)) {
- if (!isQualificationConversionStep(FromType, ToType, CStyle,
- PreviousToQualsIncludeConst,
- ObjCLifetimeConversion))
+ if (!isQualificationConversionStep(
+ FromType, ToType, CStyle, !UnwrappedAnyPointer,
+ PreviousToQualsIncludeConst, ObjCLifetimeConversion))
return false;
UnwrappedAnyPointer = true;
}
@@ -3393,9 +3408,10 @@ static OverloadingResult
IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
UserDefinedConversionSequence &User,
OverloadCandidateSet &CandidateSet,
- bool AllowExplicit,
+ AllowedExplicit AllowExplicit,
bool AllowObjCConversionOnExplicit) {
- assert(AllowExplicit || !AllowObjCConversionOnExplicit);
+ assert(AllowExplicit != AllowedExplicit::None ||
+ !AllowObjCConversionOnExplicit);
CandidateSet.clear(OverloadCandidateSet::CSK_InitByUserDefinedConversion);
// Whether we will only visit constructors.
@@ -3428,7 +3444,8 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
if (InitListExpr *InitList = dyn_cast<InitListExpr>(From)) {
// But first, see if there is an init-list-constructor that will work.
OverloadingResult Result = IsInitializerListConstructorConversion(
- S, From, ToType, ToRecordDecl, User, CandidateSet, AllowExplicit);
+ S, From, ToType, ToRecordDecl, User, CandidateSet,
+ AllowExplicit == AllowedExplicit::All);
if (Result != OR_No_Viable_Function)
return Result;
// Never mind.
@@ -3467,14 +3484,16 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
Info.ConstructorTmpl, Info.FoundDecl,
/*ExplicitArgs*/ nullptr, llvm::makeArrayRef(Args, NumArgs),
CandidateSet, SuppressUserConversions,
- /*PartialOverloading*/ false, AllowExplicit);
+ /*PartialOverloading*/ false,
+ AllowExplicit == AllowedExplicit::All);
else
// Allow one user-defined conversion when user specifies a
// From->ToType conversion via an static cast (c-style, etc).
S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl,
llvm::makeArrayRef(Args, NumArgs),
CandidateSet, SuppressUserConversions,
- /*PartialOverloading*/ false, AllowExplicit);
+ /*PartialOverloading*/ false,
+ AllowExplicit == AllowedExplicit::All);
}
}
}
@@ -3507,11 +3526,12 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
if (ConvTemplate)
S.AddTemplateConversionCandidate(
ConvTemplate, FoundDecl, ActingContext, From, ToType,
- CandidateSet, AllowObjCConversionOnExplicit, AllowExplicit);
+ CandidateSet, AllowObjCConversionOnExplicit,
+ AllowExplicit != AllowedExplicit::None);
else
- S.AddConversionCandidate(
- Conv, FoundDecl, ActingContext, From, ToType, CandidateSet,
- AllowObjCConversionOnExplicit, AllowExplicit);
+ S.AddConversionCandidate(Conv, FoundDecl, ActingContext, From, ToType,
+ CandidateSet, AllowObjCConversionOnExplicit,
+ AllowExplicit != AllowedExplicit::None);
}
}
}
@@ -3597,7 +3617,7 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
OverloadCandidateSet::CSK_Normal);
OverloadingResult OvResult =
IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
- CandidateSet, false, false);
+ CandidateSet, AllowedExplicit::None, false);
if (!(OvResult == OR_Ambiguous ||
(OvResult == OR_No_Viable_Function && !CandidateSet.empty())))
@@ -4499,7 +4519,7 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
// If we find a qualifier mismatch, the types are not reference-compatible,
// but are still be reference-related if they're similar.
bool ObjCLifetimeConversion = false;
- if (!isQualificationConversionStep(T2, T1, /*CStyle=*/false,
+ if (!isQualificationConversionStep(T2, T1, /*CStyle=*/false, TopLevel,
PreviousToQualsIncludeConst,
ObjCLifetimeConversion))
return (ConvertedReferent || Context.hasSimilarType(T1, T2))
@@ -4689,7 +4709,7 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
Sema::ReferenceConversions::NestedQualification)
? ICK_Qualification
: ICK_Identity;
- ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
+ ICS.Standard.setFromType(T2);
ICS.Standard.setToType(0, T2);
ICS.Standard.setToType(1, T1);
ICS.Standard.setToType(2, T1);
@@ -4858,7 +4878,7 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
// cv-qualification is subsumed by the initialization itself
// and does not constitute a conversion.
ICS = TryImplicitConversion(S, Init, T1, SuppressUserConversions,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false,
@@ -5027,7 +5047,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
if (ToType->isRecordType() && !ToType->isAggregateType()) {
// This function can deal with initializer lists.
return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
InOverloadResolution, /*CStyle=*/false,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
@@ -5179,7 +5199,7 @@ TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
return TryImplicitConversion(S, From, ToType,
SuppressUserConversions,
- /*AllowExplicit=*/false,
+ AllowedExplicit::None,
InOverloadResolution,
/*CStyle=*/false,
AllowObjCWritebackConversion,
@@ -5425,9 +5445,20 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
/// expression From to bool (C++0x [conv]p3).
static ImplicitConversionSequence
TryContextuallyConvertToBool(Sema &S, Expr *From) {
+ // C++ [dcl.init]/17.8:
+ // - Otherwise, if the initialization is direct-initialization, the source
+ // type is std::nullptr_t, and the destination type is bool, the initial
+ // value of the object being initialized is false.
+ if (From->getType()->isNullPtrType())
+ return ImplicitConversionSequence::getNullptrToBool(From->getType(),
+ S.Context.BoolTy,
+ From->isGLValue());
+
+ // All other direct-initialization of bool is equivalent to an implicit
+ // conversion to bool in which explicit conversions are permitted.
return TryImplicitConversion(S, From, S.Context.BoolTy,
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/true,
+ AllowedExplicit::Conversions,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false,
@@ -5699,7 +5730,7 @@ TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) {
= TryImplicitConversion(S, From, Ty,
// FIXME: Are these flags correct?
/*SuppressUserConversions=*/false,
- /*AllowExplicit=*/true,
+ AllowedExplicit::Conversions,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false,
@@ -6291,9 +6322,9 @@ void Sema::AddOverloadCandidate(
return;
}
- if (Expr *RequiresClause = Function->getTrailingRequiresClause()) {
+ if (Function->getTrailingRequiresClause()) {
ConstraintSatisfaction Satisfaction;
- if (CheckConstraintSatisfaction(RequiresClause, Satisfaction) ||
+ if (CheckFunctionConstraints(Function, Satisfaction) ||
!Satisfaction.IsSatisfied) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_constraints_not_satisfied;
@@ -6333,7 +6364,8 @@ void Sema::AddOverloadCandidate(
}
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Function, Args)) {
+ if (EnableIfAttr *FailedAttr =
+ CheckEnableIf(Function, CandidateSet.getLocation(), Args)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -6439,11 +6471,10 @@ Sema::SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance,
return nullptr;
}
-static bool
-convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
- ArrayRef<Expr *> Args, Sema::SFINAETrap &Trap,
- bool MissingImplicitThis, Expr *&ConvertedThis,
- SmallVectorImpl<Expr *> &ConvertedArgs) {
+static bool convertArgsForAvailabilityChecks(
+ Sema &S, FunctionDecl *Function, Expr *ThisArg, SourceLocation CallLoc,
+ ArrayRef<Expr *> Args, Sema::SFINAETrap &Trap, bool MissingImplicitThis,
+ Expr *&ConvertedThis, SmallVectorImpl<Expr *> &ConvertedArgs) {
if (ThisArg) {
CXXMethodDecl *Method = cast<CXXMethodDecl>(Function);
assert(!isa<CXXConstructorDecl>(Method) &&
@@ -6488,17 +6519,9 @@ convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
if (!Function->isVariadic() && Args.size() < Function->getNumParams()) {
for (unsigned i = Args.size(), e = Function->getNumParams(); i != e; ++i) {
ParmVarDecl *P = Function->getParamDecl(i);
- Expr *DefArg = P->hasUninstantiatedDefaultArg()
- ? P->getUninstantiatedDefaultArg()
- : P->getDefaultArg();
- // This can only happen in code completion, i.e. when PartialOverloading
- // is true.
- if (!DefArg)
+ if (!P->hasDefaultArg())
return false;
- ExprResult R =
- S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
- S.Context, Function->getParamDecl(i)),
- SourceLocation(), DefArg);
+ ExprResult R = S.BuildCXXDefaultArgExpr(CallLoc, Function, P);
if (R.isInvalid())
return false;
ConvertedArgs.push_back(R.get());
@@ -6510,7 +6533,9 @@ convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
return true;
}
-EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
+EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function,
+ SourceLocation CallLoc,
+ ArrayRef<Expr *> Args,
bool MissingImplicitThis) {
auto EnableIfAttrs = Function->specific_attrs<EnableIfAttr>();
if (EnableIfAttrs.begin() == EnableIfAttrs.end())
@@ -6521,7 +6546,7 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
// FIXME: We should look into making enable_if late-parsed.
Expr *DiscardedThis;
if (!convertArgsForAvailabilityChecks(
- *this, Function, /*ThisArg=*/nullptr, Args, Trap,
+ *this, Function, /*ThisArg=*/nullptr, CallLoc, Args, Trap,
/*MissingImplicitThis=*/true, DiscardedThis, ConvertedArgs))
return *EnableIfAttrs.begin();
@@ -6808,9 +6833,9 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
return;
}
- if (Expr *RequiresClause = Method->getTrailingRequiresClause()) {
+ if (Method->getTrailingRequiresClause()) {
ConstraintSatisfaction Satisfaction;
- if (CheckConstraintSatisfaction(RequiresClause, Satisfaction) ||
+ if (CheckFunctionConstraints(Method, Satisfaction) ||
!Satisfaction.IsSatisfied) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_constraints_not_satisfied;
@@ -6851,7 +6876,8 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
}
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Method, Args, true)) {
+ if (EnableIfAttr *FailedAttr =
+ CheckEnableIf(Method, CandidateSet.getLocation(), Args, true)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -7204,10 +7230,9 @@ void Sema::AddConversionCandidate(
return;
}
- Expr *RequiresClause = Conversion->getTrailingRequiresClause();
- if (RequiresClause) {
+ if (Conversion->getTrailingRequiresClause()) {
ConstraintSatisfaction Satisfaction;
- if (CheckConstraintSatisfaction(RequiresClause, Satisfaction) ||
+ if (CheckFunctionConstraints(Conversion, Satisfaction) ||
!Satisfaction.IsSatisfied) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_constraints_not_satisfied;
@@ -7305,7 +7330,8 @@ void Sema::AddConversionCandidate(
"Can only end up with a standard conversion sequence or failure");
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
+ if (EnableIfAttr *FailedAttr =
+ CheckEnableIf(Conversion, CandidateSet.getLocation(), None)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -7475,7 +7501,8 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
}
}
- if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
+ if (EnableIfAttr *FailedAttr =
+ CheckEnableIf(Conversion, CandidateSet.getLocation(), None)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -7665,6 +7692,10 @@ class BuiltinCandidateTypeSet {
/// candidates.
TypeSet VectorTypes;
+ /// The set of matrix types that will be used in the built-in
+ /// candidates.
+ TypeSet MatrixTypes;
+
/// A flag indicating non-record types are viable candidates
bool HasNonRecordTypes;
@@ -7722,9 +7753,11 @@ public:
/// enumeration_end - Past the last enumeration type found;
iterator enumeration_end() { return EnumerationTypes.end(); }
- iterator vector_begin() { return VectorTypes.begin(); }
- iterator vector_end() { return VectorTypes.end(); }
+ llvm::iterator_range<iterator> vector_types() { return VectorTypes; }
+
+ llvm::iterator_range<iterator> matrix_types() { return MatrixTypes; }
+ bool containsMatrixType(QualType Ty) const { return MatrixTypes.count(Ty); }
bool hasNonRecordTypes() { return HasNonRecordTypes; }
bool hasArithmeticOrEnumeralTypes() { return HasArithmeticOrEnumeralTypes; }
bool hasNullPtrType() const { return HasNullPtrType; }
@@ -7899,6 +7932,11 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
// extension.
HasArithmeticOrEnumeralTypes = true;
VectorTypes.insert(Ty);
+ } else if (Ty->isMatrixType()) {
+ // Similar to vector types, we treat vector types as arithmetic types in
+ // many contexts as an extension.
+ HasArithmeticOrEnumeralTypes = true;
+ MatrixTypes.insert(Ty);
} else if (Ty->isNullPtrType()) {
HasNullPtrType = true;
} else if (AllowUserConversions && TyRec) {
@@ -8127,6 +8165,13 @@ class BuiltinOperatorOverloadBuilder {
}
+ /// Helper to add an overload candidate for a binary builtin with types \p L
+ /// and \p R.
+ void AddCandidate(QualType L, QualType R) {
+ QualType LandR[2] = {L, R};
+ S.AddBuiltinCandidate(LandR, Args, CandidateSet);
+ }
+
public:
BuiltinOperatorOverloadBuilder(
Sema &S, ArrayRef<Expr *> Args,
@@ -8254,13 +8299,8 @@ public:
}
// Extension: We also add these operators for vector types.
- for (BuiltinCandidateTypeSet::iterator
- Vec = CandidateTypes[0].vector_begin(),
- VecEnd = CandidateTypes[0].vector_end();
- Vec != VecEnd; ++Vec) {
- QualType VecTy = *Vec;
+ for (QualType VecTy : CandidateTypes[0].vector_types())
S.AddBuiltinCandidate(&VecTy, Args, CandidateSet);
- }
}
// C++ [over.built]p8:
@@ -8294,13 +8334,8 @@ public:
}
// Extension: We also add this operator for vector types.
- for (BuiltinCandidateTypeSet::iterator
- Vec = CandidateTypes[0].vector_begin(),
- VecEnd = CandidateTypes[0].vector_end();
- Vec != VecEnd; ++Vec) {
- QualType VecTy = *Vec;
+ for (QualType VecTy : CandidateTypes[0].vector_types())
S.AddBuiltinCandidate(&VecTy, Args, CandidateSet);
- }
}
// C++ [over.match.oper]p16:
@@ -8380,7 +8415,7 @@ public:
// We interpret "same parameter-type-list" as applying to the
// "synthesized candidate, with the order of the two parameters
// reversed", not to the original function.
- bool Reversed = C->RewriteKind & CRK_Reversed;
+ bool Reversed = C->isReversed();
QualType FirstParamType = C->Function->getParamDecl(Reversed ? 1 : 0)
->getType()
.getUnqualifiedType();
@@ -8531,17 +8566,31 @@ public:
// Extension: Add the binary operators ==, !=, <, <=, >=, >, *, /, and the
// conditional operator for vector types.
- for (BuiltinCandidateTypeSet::iterator
- Vec1 = CandidateTypes[0].vector_begin(),
- Vec1End = CandidateTypes[0].vector_end();
- Vec1 != Vec1End; ++Vec1) {
- for (BuiltinCandidateTypeSet::iterator
- Vec2 = CandidateTypes[1].vector_begin(),
- Vec2End = CandidateTypes[1].vector_end();
- Vec2 != Vec2End; ++Vec2) {
- QualType LandR[2] = { *Vec1, *Vec2 };
+ for (QualType Vec1Ty : CandidateTypes[0].vector_types())
+ for (QualType Vec2Ty : CandidateTypes[1].vector_types()) {
+ QualType LandR[2] = {Vec1Ty, Vec2Ty};
S.AddBuiltinCandidate(LandR, Args, CandidateSet);
}
+ }
+
+ /// Add binary operator overloads for each candidate matrix type M1, M2:
+ /// * (M1, M1) -> M1
+ /// * (M1, M1.getElementType()) -> M1
+ /// * (M2.getElementType(), M2) -> M2
+ /// * (M2, M2) -> M2 // Only if M2 is not part of CandidateTypes[0].
+ void addMatrixBinaryArithmeticOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (QualType M1 : CandidateTypes[0].matrix_types()) {
+ AddCandidate(M1, cast<MatrixType>(M1)->getElementType());
+ AddCandidate(M1, M1);
+ }
+
+ for (QualType M2 : CandidateTypes[1].matrix_types()) {
+ AddCandidate(cast<MatrixType>(M2)->getElementType(), M2);
+ if (!CandidateTypes[0].containsMatrixType(M2))
+ AddCandidate(M2, M2);
}
}
@@ -8802,30 +8851,23 @@ public:
}
// Extension: Add the binary operators =, +=, -=, *=, /= for vector types.
- for (BuiltinCandidateTypeSet::iterator
- Vec1 = CandidateTypes[0].vector_begin(),
- Vec1End = CandidateTypes[0].vector_end();
- Vec1 != Vec1End; ++Vec1) {
- for (BuiltinCandidateTypeSet::iterator
- Vec2 = CandidateTypes[1].vector_begin(),
- Vec2End = CandidateTypes[1].vector_end();
- Vec2 != Vec2End; ++Vec2) {
+ for (QualType Vec1Ty : CandidateTypes[0].vector_types())
+ for (QualType Vec2Ty : CandidateTypes[0].vector_types()) {
QualType ParamTypes[2];
- ParamTypes[1] = *Vec2;
+ ParamTypes[1] = Vec2Ty;
// Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] = S.Context.getLValueReferenceType(*Vec1);
+ ParamTypes[0] = S.Context.getLValueReferenceType(Vec1Ty);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/isEqualOp);
// Add this built-in operator as a candidate (VQ is 'volatile').
if (VisibleTypeConversionsQuals.hasVolatile()) {
- ParamTypes[0] = S.Context.getVolatileType(*Vec1);
+ ParamTypes[0] = S.Context.getVolatileType(Vec1Ty);
ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/isEqualOp);
}
}
- }
}
// C++ [over.built]p22:
@@ -9118,14 +9160,17 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
} else {
OpBuilder.addBinaryPlusOrMinusPointerOverloads(Op);
OpBuilder.addGenericBinaryArithmeticOverloads();
+ OpBuilder.addMatrixBinaryArithmeticOverloads();
}
break;
case OO_Star: // '*' is either unary or binary
if (Args.size() == 1)
OpBuilder.addUnaryStarPointerOverloads();
- else
+ else {
OpBuilder.addGenericBinaryArithmeticOverloads();
+ OpBuilder.addMatrixBinaryArithmeticOverloads();
+ }
break;
case OO_Slash:
@@ -9270,17 +9315,31 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
if (ExplicitTemplateArgs)
continue;
- AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet,
- /*SuppressUserConversions=*/false, PartialOverloading,
- /*AllowExplicit*/ true,
- /*AllowExplicitConversions*/ false,
- ADLCallKind::UsesADL);
+ AddOverloadCandidate(
+ FD, FoundDecl, Args, CandidateSet, /*SuppressUserConversions=*/false,
+ PartialOverloading, /*AllowExplicit=*/true,
+ /*AllowExplicitConversions=*/false, ADLCallKind::UsesADL);
+ if (CandidateSet.getRewriteInfo().shouldAddReversed(Context, FD)) {
+ AddOverloadCandidate(
+ FD, FoundDecl, {Args[1], Args[0]}, CandidateSet,
+ /*SuppressUserConversions=*/false, PartialOverloading,
+ /*AllowExplicit=*/true, /*AllowExplicitConversions=*/false,
+ ADLCallKind::UsesADL, None, OverloadCandidateParamOrder::Reversed);
+ }
} else {
+ auto *FTD = cast<FunctionTemplateDecl>(*I);
AddTemplateOverloadCandidate(
- cast<FunctionTemplateDecl>(*I), FoundDecl, ExplicitTemplateArgs, Args,
- CandidateSet,
+ FTD, FoundDecl, ExplicitTemplateArgs, Args, CandidateSet,
/*SuppressUserConversions=*/false, PartialOverloading,
- /*AllowExplicit*/true, ADLCallKind::UsesADL);
+ /*AllowExplicit=*/true, ADLCallKind::UsesADL);
+ if (CandidateSet.getRewriteInfo().shouldAddReversed(
+ Context, FTD->getTemplatedDecl())) {
+ AddTemplateOverloadCandidate(
+ FTD, FoundDecl, ExplicitTemplateArgs, {Args[1], Args[0]},
+ CandidateSet, /*SuppressUserConversions=*/false, PartialOverloading,
+ /*AllowExplicit=*/true, ADLCallKind::UsesADL,
+ OverloadCandidateParamOrder::Reversed);
+ }
}
}
}
@@ -9338,16 +9397,22 @@ static Comparison compareEnableIfAttrs(const Sema &S, const FunctionDecl *Cand1,
return Comparison::Equal;
}
-static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
- const OverloadCandidate &Cand2) {
+static Comparison
+isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
+ const OverloadCandidate &Cand2) {
if (!Cand1.Function || !Cand1.Function->isMultiVersion() || !Cand2.Function ||
!Cand2.Function->isMultiVersion())
- return false;
+ return Comparison::Equal;
- // If Cand1 is invalid, it cannot be a better match, if Cand2 is invalid, this
- // is obviously better.
- if (Cand1.Function->isInvalidDecl()) return false;
- if (Cand2.Function->isInvalidDecl()) return true;
+ // If both are invalid, they are equal. If one of them is invalid, the other
+ // is better.
+ if (Cand1.Function->isInvalidDecl()) {
+ if (Cand2.Function->isInvalidDecl())
+ return Comparison::Equal;
+ return Comparison::Worse;
+ }
+ if (Cand2.Function->isInvalidDecl())
+ return Comparison::Better;
// If this is a cpu_dispatch/cpu_specific multiversion situation, prefer
// cpu_dispatch, else arbitrarily based on the identifiers.
@@ -9357,16 +9422,18 @@ static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
const auto *Cand2CPUSpec = Cand2.Function->getAttr<CPUSpecificAttr>();
if (!Cand1CPUDisp && !Cand2CPUDisp && !Cand1CPUSpec && !Cand2CPUSpec)
- return false;
+ return Comparison::Equal;
if (Cand1CPUDisp && !Cand2CPUDisp)
- return true;
+ return Comparison::Better;
if (Cand2CPUDisp && !Cand1CPUDisp)
- return false;
+ return Comparison::Worse;
if (Cand1CPUSpec && Cand2CPUSpec) {
if (Cand1CPUSpec->cpus_size() != Cand2CPUSpec->cpus_size())
- return Cand1CPUSpec->cpus_size() < Cand2CPUSpec->cpus_size();
+ return Cand1CPUSpec->cpus_size() < Cand2CPUSpec->cpus_size()
+ ? Comparison::Better
+ : Comparison::Worse;
std::pair<CPUSpecificAttr::cpus_iterator, CPUSpecificAttr::cpus_iterator>
FirstDiff = std::mismatch(
@@ -9379,11 +9446,56 @@ static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
assert(FirstDiff.first != Cand1CPUSpec->cpus_end() &&
"Two different cpu-specific versions should not have the same "
"identifier list, otherwise they'd be the same decl!");
- return (*FirstDiff.first)->getName() < (*FirstDiff.second)->getName();
+ return (*FirstDiff.first)->getName() < (*FirstDiff.second)->getName()
+ ? Comparison::Better
+ : Comparison::Worse;
}
llvm_unreachable("No way to get here unless both had cpu_dispatch");
}
+/// Compute the type of the implicit object parameter for the given function,
+/// if any. Returns None if there is no implicit object parameter, and a null
+/// QualType if there is a 'matches anything' implicit object parameter.
+static Optional<QualType> getImplicitObjectParamType(ASTContext &Context,
+ const FunctionDecl *F) {
+ if (!isa<CXXMethodDecl>(F) || isa<CXXConstructorDecl>(F))
+ return llvm::None;
+
+ auto *M = cast<CXXMethodDecl>(F);
+ // Static member functions' object parameters match all types.
+ if (M->isStatic())
+ return QualType();
+
+ QualType T = M->getThisObjectType();
+ if (M->getRefQualifier() == RQ_RValue)
+ return Context.getRValueReferenceType(T);
+ return Context.getLValueReferenceType(T);
+}
+
+static bool haveSameParameterTypes(ASTContext &Context, const FunctionDecl *F1,
+ const FunctionDecl *F2, unsigned NumParams) {
+ if (declaresSameEntity(F1, F2))
+ return true;
+
+ auto NextParam = [&](const FunctionDecl *F, unsigned &I, bool First) {
+ if (First) {
+ if (Optional<QualType> T = getImplicitObjectParamType(Context, F))
+ return *T;
+ }
+ assert(I < F->getNumParams());
+ return F->getParamDecl(I++)->getType();
+ };
+
+ unsigned I1 = 0, I2 = 0;
+ for (unsigned I = 0; I != NumParams; ++I) {
+ QualType T1 = NextParam(F1, I1, I == 0);
+ QualType T2 = NextParam(F2, I2, I == 0);
+ if (!T1.isNull() && !T1.isNull() && !Context.hasSameUnqualifiedType(T1, T2))
+ return false;
+ }
+ return true;
+}
+
/// isBetterOverloadCandidate - Determines whether the first overload
/// candidate is a better candidate than the second (C++ 13.3.3p1).
bool clang::isBetterOverloadCandidate(
@@ -9451,18 +9563,20 @@ bool clang::isBetterOverloadCandidate(
break;
case ImplicitConversionSequence::Worse:
- if (Cand1.Function && Cand1.Function == Cand2.Function &&
- (Cand2.RewriteKind & CRK_Reversed) != 0) {
+ if (Cand1.Function && Cand2.Function &&
+ Cand1.isReversed() != Cand2.isReversed() &&
+ haveSameParameterTypes(S.Context, Cand1.Function, Cand2.Function,
+ NumArgs)) {
// Work around large-scale breakage caused by considering reversed
// forms of operator== in C++20:
//
- // When comparing a function against its reversed form, if we have a
- // better conversion for one argument and a worse conversion for the
- // other, we prefer the non-reversed form.
+ // When comparing a function against a reversed function with the same
+ // parameter types, if we have a better conversion for one argument and
+ // a worse conversion for the other, the implicit conversion sequences
+ // are treated as being equally good.
//
- // This prevents a conversion function from being considered ambiguous
- // with its own reversed form in various where it's only incidentally
- // heterogeneous.
+ // This prevents a comparison function from being considered ambiguous
+ // with a reversed form that is written in the same way.
//
// We diagnose this as an extension from CreateOverloadedBinOp.
HasWorseConversion = true;
@@ -9480,10 +9594,8 @@ bool clang::isBetterOverloadCandidate(
// -- for some argument j, ICSj(F1) is a better conversion sequence than
// ICSj(F2), or, if not that,
- if (HasBetterConversion)
+ if (HasBetterConversion && !HasWorseConversion)
return true;
- if (HasWorseConversion)
- return false;
// -- the context is an initialization by user-defined conversion
// (see 8.5, 13.3.1.5) and the standard conversion sequence
@@ -9540,14 +9652,13 @@ bool clang::isBetterOverloadCandidate(
// according to the partial ordering rules described in 14.5.5.2, or,
// if not that,
if (Cand1IsSpecialization && Cand2IsSpecialization) {
- if (FunctionTemplateDecl *BetterTemplate
- = S.getMoreSpecializedTemplate(Cand1.Function->getPrimaryTemplate(),
- Cand2.Function->getPrimaryTemplate(),
- Loc,
- isa<CXXConversionDecl>(Cand1.Function)? TPOC_Conversion
- : TPOC_Call,
- Cand1.ExplicitCallArguments,
- Cand2.ExplicitCallArguments))
+ if (FunctionTemplateDecl *BetterTemplate = S.getMoreSpecializedTemplate(
+ Cand1.Function->getPrimaryTemplate(),
+ Cand2.Function->getPrimaryTemplate(), Loc,
+ isa<CXXConversionDecl>(Cand1.Function) ? TPOC_Conversion
+ : TPOC_Call,
+ Cand1.ExplicitCallArguments, Cand2.ExplicitCallArguments,
+ Cand1.isReversed() ^ Cand2.isReversed()))
return BetterTemplate == Cand1.Function->getPrimaryTemplate();
}
@@ -9566,17 +9677,15 @@ bool clang::isBetterOverloadCandidate(
if (RC1 && RC2) {
bool AtLeastAsConstrained1, AtLeastAsConstrained2;
if (S.IsAtLeastAsConstrained(Cand1.Function, {RC1}, Cand2.Function,
- {RC2}, AtLeastAsConstrained1))
- return false;
- if (!AtLeastAsConstrained1)
- return false;
- if (S.IsAtLeastAsConstrained(Cand2.Function, {RC2}, Cand1.Function,
+ {RC2}, AtLeastAsConstrained1) ||
+ S.IsAtLeastAsConstrained(Cand2.Function, {RC2}, Cand1.Function,
{RC1}, AtLeastAsConstrained2))
return false;
- if (!AtLeastAsConstrained2)
- return true;
- } else if (RC1 || RC2)
+ if (AtLeastAsConstrained1 != AtLeastAsConstrained2)
+ return AtLeastAsConstrained1;
+ } else if (RC1 || RC2) {
return RC1 != nullptr;
+ }
}
}
@@ -9646,7 +9755,8 @@ bool clang::isBetterOverloadCandidate(
if (HasPS1 != HasPS2 && HasPS1)
return true;
- return isBetterMultiversionCandidate(Cand1, Cand2);
+ Comparison MV = isBetterMultiversionCandidate(Cand1, Cand2);
+ return MV == Comparison::Better;
}
/// Determine whether two declarations are "equivalent" for the purposes of
@@ -9947,9 +10057,9 @@ static bool checkAddressOfFunctionIsAvailable(Sema &S, const FunctionDecl *FD,
return false;
}
- if (const Expr *RC = FD->getTrailingRequiresClause()) {
+ if (FD->getTrailingRequiresClause()) {
ConstraintSatisfaction Satisfaction;
- if (S.CheckConstraintSatisfaction(RC, Satisfaction))
+ if (S.CheckFunctionConstraints(FD, Satisfaction, Loc))
return false;
if (!Satisfaction.IsSatisfied) {
if (Complain) {
@@ -10974,8 +11084,7 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
<< (unsigned)FnKindPair.first << (unsigned)ocs_non_template
<< FnDesc /* Ignored */;
ConstraintSatisfaction Satisfaction;
- if (S.CheckConstraintSatisfaction(Fn->getTrailingRequiresClause(),
- Satisfaction))
+ if (S.CheckFunctionConstraints(Fn, Satisfaction))
break;
S.DiagnoseUnsatisfiedConstraint(Satisfaction);
}
@@ -11275,7 +11384,7 @@ CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
unsigned ConvIdx = 0;
unsigned ArgIdx = 0;
ArrayRef<QualType> ParamTypes;
- bool Reversed = Cand->RewriteKind & CRK_Reversed;
+ bool Reversed = Cand->isReversed();
if (Cand->IsSurrogate) {
QualType ConvType
@@ -12688,9 +12797,7 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
// base classes.
CallExpr *CE = CallExpr::Create(Context, Fn, Args, Context.DependentTy,
VK_RValue, RParenLoc);
- CE->setTypeDependent(true);
- CE->setValueDependent(true);
- CE->setInstantiationDependent(true);
+ CE->markDependentForPostponedNameLookup();
*Result = CE;
return true;
}
@@ -12703,6 +12810,42 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
return false;
}
+// Guess at what the return type for an unresolvable overload should be.
+static QualType chooseRecoveryType(OverloadCandidateSet &CS,
+ OverloadCandidateSet::iterator *Best) {
+ llvm::Optional<QualType> Result;
+ // Adjust Type after seeing a candidate.
+ auto ConsiderCandidate = [&](const OverloadCandidate &Candidate) {
+ if (!Candidate.Function)
+ return;
+ QualType T = Candidate.Function->getReturnType();
+ if (T.isNull())
+ return;
+ if (!Result)
+ Result = T;
+ else if (Result != T)
+ Result = QualType();
+ };
+
+ // Look for an unambiguous type from a progressively larger subset.
+ // e.g. if types disagree, but all *viable* overloads return int, choose int.
+ //
+ // First, consider only the best candidate.
+ if (Best && *Best != CS.end())
+ ConsiderCandidate(**Best);
+ // Next, consider only viable candidates.
+ if (!Result)
+ for (const auto &C : CS)
+ if (C.Viable)
+ ConsiderCandidate(C);
+ // Finally, consider all candidates.
+ if (!Result)
+ for (const auto &C : CS)
+ ConsiderCandidate(C);
+
+ return Result.getValueOr(QualType());
+}
+
/// FinishOverloadedCallExpr - given an OverloadCandidateSet, builds and returns
/// the completed call expression. If overload resolution fails, emits
/// diagnostics and returns ExprError()
@@ -12792,8 +12935,11 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
}
}
- // Overload resolution failed.
- return ExprError();
+ // Overload resolution failed, try to recover.
+ SmallVector<Expr *, 8> SubExprs = {Fn};
+ SubExprs.append(Args.begin(), Args.end());
+ return SemaRef.CreateRecoveryExpr(Fn->getBeginLoc(), RParenLoc, SubExprs,
+ chooseRecoveryType(*CandidateSet, Best));
}
static void markUnaddressableCandidatesUnviable(Sema &S,
@@ -12893,8 +13039,9 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
if (Input->isTypeDependent()) {
if (Fns.empty())
- return new (Context) UnaryOperator(Input, Opc, Context.DependentTy,
- VK_RValue, OK_Ordinary, OpLoc, false);
+ return UnaryOperator::Create(Context, Input, Opc, Context.DependentTy,
+ VK_RValue, OK_Ordinary, OpLoc, false,
+ CurFPFeatureOverrides());
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(
@@ -12902,7 +13049,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
/*ADL*/ true, IsOverloaded(Fns), Fns.begin(), Fns.end());
return CXXOperatorCallExpr::Create(Context, Op, Fn, ArgsArray,
Context.DependentTy, VK_RValue, OpLoc,
- FPOptions());
+ CurFPFeatureOverrides());
}
// Build an empty overload set.
@@ -12976,7 +13123,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Args[0] = Input;
CallExpr *TheCall = CXXOperatorCallExpr::Create(
Context, Op, FnExpr.get(), ArgsArray, ResultTy, VK, OpLoc,
- FPOptions(), Best->IsADLCandidate);
+ CurFPFeatureOverrides(), Best->IsADLCandidate);
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall, FnDecl))
return ExprError();
@@ -12984,8 +13131,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
if (CheckFunctionCall(FnDecl, TheCall,
FnDecl->getType()->castAs<FunctionProtoType>()))
return ExprError();
-
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), FnDecl);
} else {
// We matched a built-in operator. Convert the arguments, then
// break out so that we will build the appropriate built-in
@@ -13134,7 +13280,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
Expr *Args[2] = { LHS, RHS };
LHS=RHS=nullptr; // Please use only Args instead of LHS/RHS couple
- if (!getLangOpts().CPlusPlus2a)
+ if (!getLangOpts().CPlusPlus20)
AllowRewrittenCandidates = false;
OverloadedOperatorKind Op = BinaryOperator::getOverloadedOperator(Opc);
@@ -13146,14 +13292,13 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// If there are no functions to store, just build a dependent
// BinaryOperator or CompoundAssignment.
if (Opc <= BO_Assign || Opc > BO_OrAssign)
- return new (Context) BinaryOperator(
- Args[0], Args[1], Opc, Context.DependentTy, VK_RValue, OK_Ordinary,
- OpLoc, FPFeatures);
-
- return new (Context) CompoundAssignOperator(
- Args[0], Args[1], Opc, Context.DependentTy, VK_LValue, OK_Ordinary,
- Context.DependentTy, Context.DependentTy, OpLoc,
- FPFeatures);
+ return BinaryOperator::Create(
+ Context, Args[0], Args[1], Opc, Context.DependentTy, VK_RValue,
+ OK_Ordinary, OpLoc, CurFPFeatureOverrides());
+ return CompoundAssignOperator::Create(
+ Context, Args[0], Args[1], Opc, Context.DependentTy, VK_LValue,
+ OK_Ordinary, OpLoc, CurFPFeatureOverrides(), Context.DependentTy,
+ Context.DependentTy);
}
// FIXME: save results of ADL from here?
@@ -13166,7 +13311,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
/*ADL*/ PerformADL, IsOverloaded(Fns), Fns.begin(), Fns.end());
return CXXOperatorCallExpr::Create(Context, Op, Fn, Args,
Context.DependentTy, VK_RValue, OpLoc,
- FPFeatures);
+ CurFPFeatureOverrides());
}
// Always do placeholder-like conversions on the RHS.
@@ -13210,7 +13355,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// We found a built-in operator or an overloaded operator.
FunctionDecl *FnDecl = Best->Function;
- bool IsReversed = (Best->RewriteKind & CRK_Reversed);
+ bool IsReversed = Best->isReversed();
if (IsReversed)
std::swap(Args[0], Args[1]);
@@ -13227,36 +13372,56 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// resolution for an operator@, its return type shall be cv bool
if (Best->RewriteKind && ChosenOp == OO_EqualEqual &&
!FnDecl->getReturnType()->isBooleanType()) {
- Diag(OpLoc, diag::err_ovl_rewrite_equalequal_not_bool)
+ bool IsExtension =
+ FnDecl->getReturnType()->isIntegralOrUnscopedEnumerationType();
+ Diag(OpLoc, IsExtension ? diag::ext_ovl_rewrite_equalequal_not_bool
+ : diag::err_ovl_rewrite_equalequal_not_bool)
<< FnDecl->getReturnType() << BinaryOperator::getOpcodeStr(Opc)
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
Diag(FnDecl->getLocation(), diag::note_declared_at);
- return ExprError();
+ if (!IsExtension)
+ return ExprError();
}
if (AllowRewrittenCandidates && !IsReversed &&
- CandidateSet.getRewriteInfo().shouldAddReversed(ChosenOp)) {
- // We could have reversed this operator, but didn't. Check if the
+ CandidateSet.getRewriteInfo().isReversible()) {
+ // We could have reversed this operator, but didn't. Check if some
// reversed form was a viable candidate, and if so, if it had a
// better conversion for either parameter. If so, this call is
// formally ambiguous, and allowing it is an extension.
+ llvm::SmallVector<FunctionDecl*, 4> AmbiguousWith;
for (OverloadCandidate &Cand : CandidateSet) {
- if (Cand.Viable && Cand.Function == FnDecl &&
- Cand.RewriteKind & CRK_Reversed) {
+ if (Cand.Viable && Cand.Function && Cand.isReversed() &&
+ haveSameParameterTypes(Context, Cand.Function, FnDecl, 2)) {
for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
if (CompareImplicitConversionSequences(
*this, OpLoc, Cand.Conversions[ArgIdx],
Best->Conversions[ArgIdx]) ==
ImplicitConversionSequence::Better) {
- Diag(OpLoc, diag::ext_ovl_ambiguous_oper_binary_reversed)
- << BinaryOperator::getOpcodeStr(Opc)
- << Args[0]->getType() << Args[1]->getType()
- << Args[0]->getSourceRange() << Args[1]->getSourceRange();
- Diag(FnDecl->getLocation(),
- diag::note_ovl_ambiguous_oper_binary_reversed_candidate);
+ AmbiguousWith.push_back(Cand.Function);
+ break;
}
}
- break;
+ }
+ }
+
+ if (!AmbiguousWith.empty()) {
+ bool AmbiguousWithSelf =
+ AmbiguousWith.size() == 1 &&
+ declaresSameEntity(AmbiguousWith.front(), FnDecl);
+ Diag(OpLoc, diag::ext_ovl_ambiguous_oper_binary_reversed)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << Args[0]->getType() << Args[1]->getType() << AmbiguousWithSelf
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ if (AmbiguousWithSelf) {
+ Diag(FnDecl->getLocation(),
+ diag::note_ovl_ambiguous_oper_binary_reversed_self);
+ } else {
+ Diag(FnDecl->getLocation(),
+ diag::note_ovl_ambiguous_oper_binary_selected_candidate);
+ for (auto *F : AmbiguousWith)
+ Diag(F->getLocation(),
+ diag::note_ovl_ambiguous_oper_binary_reversed_candidate);
}
}
}
@@ -13315,7 +13480,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
Context, ChosenOp, FnExpr.get(), Args, ResultTy, VK, OpLoc,
- FPFeatures, Best->IsADLCandidate);
+ CurFPFeatureOverrides(), Best->IsADLCandidate);
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall,
FnDecl))
@@ -13341,6 +13506,10 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (R.isInvalid())
return ExprError();
+ R = CheckForImmediateInvocation(R, FnDecl);
+ if (R.isInvalid())
+ return ExprError();
+
// For a rewritten candidate, we've already reversed the arguments
// if needed. Perform the rest of the rewrite now.
if ((Best->RewriteKind & CRK_DifferentOperator) ||
@@ -13580,10 +13749,10 @@ ExprResult Sema::BuildSynthesizedThreeWayComparison(
// Build a PseudoObjectExpr to model the rewriting of an <=> operator, and to
// bind the OpaqueValueExprs before they're (repeatedly) used.
- Expr *SyntacticForm = new (Context)
- BinaryOperator(OrigLHS, OrigRHS, BO_Cmp, Result.get()->getType(),
- Result.get()->getValueKind(),
- Result.get()->getObjectKind(), OpLoc, FPFeatures);
+ Expr *SyntacticForm = BinaryOperator::Create(
+ Context, OrigLHS, OrigRHS, BO_Cmp, Result.get()->getType(),
+ Result.get()->getValueKind(), Result.get()->getObjectKind(), OpLoc,
+ CurFPFeatureOverrides());
Expr *SemanticForm[] = {LHS, RHS, Result.get()};
return PseudoObjectExpr::Create(Context, SyntacticForm, SemanticForm, 2);
}
@@ -13614,7 +13783,7 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
return CXXOperatorCallExpr::Create(Context, OO_Subscript, Fn, Args,
Context.DependentTy, VK_RValue, RLoc,
- FPOptions());
+ CurFPFeatureOverrides());
}
// Handle placeholders on both operands.
@@ -13687,10 +13856,9 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall =
- CXXOperatorCallExpr::Create(Context, OO_Subscript, FnExpr.get(),
- Args, ResultTy, VK, RLoc, FPOptions());
-
+ CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
+ Context, OO_Subscript, FnExpr.get(), Args, ResultTy, VK, RLoc,
+ CurFPFeatureOverrides());
if (CheckCallReturnType(FnDecl->getReturnType(), LLoc, TheCall, FnDecl))
return ExprError();
@@ -14000,7 +14168,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// resolution process, we still need to handle the enable_if attribute. Do
// that here, so it will not hide previous -- and more relevant -- errors.
if (auto *MemE = dyn_cast<MemberExpr>(NakedMemExpr)) {
- if (const EnableIfAttr *Attr = CheckEnableIf(Method, Args, true)) {
+ if (const EnableIfAttr *Attr =
+ CheckEnableIf(Method, LParenLoc, Args, true)) {
Diag(MemE->getMemberLoc(),
diag::err_ovl_no_viable_member_function_in_call)
<< Method << Method->getSourceRange();
@@ -14039,7 +14208,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
MemExpr->getMemberLoc());
}
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall),
+ TheCall->getMethodDecl());
}
/// BuildCallToObjectOfClassType - Build a call to an object of class
@@ -14310,9 +14480,9 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall =
- CXXOperatorCallExpr::Create(Context, OO_Call, NewFn.get(), MethodArgs,
- ResultTy, VK, RParenLoc, FPOptions());
+ CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
+ Context, OO_Call, NewFn.get(), MethodArgs, ResultTy, VK, RParenLoc,
+ CurFPFeatureOverrides());
if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
return true;
@@ -14320,7 +14490,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (CheckFunctionCall(Method, TheCall, Proto))
return true;
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), Method);
}
/// BuildOverloadedArrowExpr - Build a call to an overloaded @c operator->
@@ -14428,8 +14598,9 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
QualType ResultTy = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
- Context, OO_Arrow, FnExpr.get(), Base, ResultTy, VK, OpLoc, FPOptions());
+ CXXOperatorCallExpr *TheCall =
+ CXXOperatorCallExpr::Create(Context, OO_Arrow, FnExpr.get(), Base,
+ ResultTy, VK, OpLoc, CurFPFeatureOverrides());
if (CheckCallReturnType(Method->getReturnType(), OpLoc, TheCall, Method))
return ExprError();
@@ -14515,7 +14686,7 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
if (CheckFunctionCall(FD, UDL, nullptr))
return ExprError();
- return MaybeBindToTemporary(UDL);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(UDL), FD);
}
/// Build a call to 'begin' or 'end' for a C++11 for-range statement. If the
@@ -14676,9 +14847,9 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
(void)isCompleteType(UnOp->getOperatorLoc(), MemPtrType);
- return new (Context) UnaryOperator(SubExpr, UO_AddrOf, MemPtrType,
- VK_RValue, OK_Ordinary,
- UnOp->getOperatorLoc(), false);
+ return UnaryOperator::Create(
+ Context, SubExpr, UO_AddrOf, MemPtrType, VK_RValue, OK_Ordinary,
+ UnOp->getOperatorLoc(), false, CurFPFeatureOverrides());
}
}
Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
@@ -14686,10 +14857,10 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
if (SubExpr == UnOp->getSubExpr())
return UnOp;
- return new (Context) UnaryOperator(SubExpr, UO_AddrOf,
- Context.getPointerType(SubExpr->getType()),
- VK_RValue, OK_Ordinary,
- UnOp->getOperatorLoc(), false);
+ return UnaryOperator::Create(Context, SubExpr, UO_AddrOf,
+ Context.getPointerType(SubExpr->getType()),
+ VK_RValue, OK_Ordinary, UnOp->getOperatorLoc(),
+ false, CurFPFeatureOverrides());
}
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
diff --git a/clang/lib/Sema/SemaPseudoObject.cpp b/clang/lib/Sema/SemaPseudoObject.cpp
index 5587e0d24c7f..d17599a6ed14 100644
--- a/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/clang/lib/Sema/SemaPseudoObject.cpp
@@ -127,12 +127,10 @@ namespace {
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
assert(uop->getOpcode() == UO_Extension);
e = rebuild(uop->getSubExpr());
- return new (S.Context) UnaryOperator(e, uop->getOpcode(),
- uop->getType(),
- uop->getValueKind(),
- uop->getObjectKind(),
- uop->getOperatorLoc(),
- uop->canOverflow());
+ return UnaryOperator::Create(
+ S.Context, e, uop->getOpcode(), uop->getType(), uop->getValueKind(),
+ uop->getObjectKind(), uop->getOperatorLoc(), uop->canOverflow(),
+ S.CurFPFeatureOverrides());
}
if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
@@ -167,16 +165,11 @@ namespace {
Expr *&rebuiltExpr = ce->isConditionTrue() ? LHS : RHS;
rebuiltExpr = rebuild(rebuiltExpr);
- return new (S.Context) ChooseExpr(ce->getBuiltinLoc(),
- ce->getCond(),
- LHS, RHS,
- rebuiltExpr->getType(),
- rebuiltExpr->getValueKind(),
- rebuiltExpr->getObjectKind(),
- ce->getRParenLoc(),
- ce->isConditionTrue(),
- rebuiltExpr->isTypeDependent(),
- rebuiltExpr->isValueDependent());
+ return new (S.Context)
+ ChooseExpr(ce->getBuiltinLoc(), ce->getCond(), LHS, RHS,
+ rebuiltExpr->getType(), rebuiltExpr->getValueKind(),
+ rebuiltExpr->getObjectKind(), ce->getRParenLoc(),
+ ce->isConditionTrue());
}
llvm_unreachable("bad expression to rebuild!");
@@ -453,11 +446,11 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
ExprResult result;
if (opcode == BO_Assign) {
result = semanticRHS;
- syntactic = new (S.Context) BinaryOperator(syntacticLHS, capturedRHS,
- opcode, capturedRHS->getType(),
- capturedRHS->getValueKind(),
- OK_Ordinary, opcLoc,
- FPOptions());
+ syntactic = BinaryOperator::Create(S.Context, syntacticLHS, capturedRHS,
+ opcode, capturedRHS->getType(),
+ capturedRHS->getValueKind(), OK_Ordinary,
+ opcLoc, S.CurFPFeatureOverrides());
+
} else {
ExprResult opLHS = buildGet();
if (opLHS.isInvalid()) return ExprError();
@@ -468,14 +461,11 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
result = S.BuildBinOp(Sc, opcLoc, nonCompound, opLHS.get(), semanticRHS);
if (result.isInvalid()) return ExprError();
- syntactic =
- new (S.Context) CompoundAssignOperator(syntacticLHS, capturedRHS, opcode,
- result.get()->getType(),
- result.get()->getValueKind(),
- OK_Ordinary,
- opLHS.get()->getType(),
- result.get()->getType(),
- opcLoc, FPOptions());
+ syntactic = CompoundAssignOperator::Create(
+ S.Context, syntacticLHS, capturedRHS, opcode, result.get()->getType(),
+ result.get()->getValueKind(), OK_Ordinary, opcLoc,
+ S.CurFPFeatureOverrides(), opLHS.get()->getType(),
+ result.get()->getType());
}
// The result of the assignment, if not void, is the value set into
@@ -536,12 +526,14 @@ PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
(result.get()->isTypeDependent() || CanCaptureValue(result.get())))
setResultToLastSemantic();
- UnaryOperator *syntactic = new (S.Context) UnaryOperator(
- syntacticOp, opcode, resultType, VK_LValue, OK_Ordinary, opcLoc,
- !resultType->isDependentType()
- ? S.Context.getTypeSize(resultType) >=
- S.Context.getTypeSize(S.Context.IntTy)
- : false);
+ UnaryOperator *syntactic =
+ UnaryOperator::Create(S.Context, syntacticOp, opcode, resultType,
+ VK_LValue, OK_Ordinary, opcLoc,
+ !resultType->isDependentType()
+ ? S.Context.getTypeSize(resultType) >=
+ S.Context.getTypeSize(S.Context.IntTy)
+ : false,
+ S.CurFPFeatureOverrides());
return complete(syntactic);
}
@@ -590,7 +582,7 @@ bool ObjCPropertyOpBuilder::isWeakProperty() const {
QualType T;
if (RefExpr->isExplicitProperty()) {
const ObjCPropertyDecl *Prop = RefExpr->getExplicitProperty();
- if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
return true;
T = Prop->getType();
@@ -1561,8 +1553,9 @@ ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
UnaryOperatorKind opcode, Expr *op) {
// Do nothing if the operand is dependent.
if (op->isTypeDependent())
- return new (Context) UnaryOperator(op, opcode, Context.DependentTy,
- VK_RValue, OK_Ordinary, opcLoc, false);
+ return UnaryOperator::Create(Context, op, opcode, Context.DependentTy,
+ VK_RValue, OK_Ordinary, opcLoc, false,
+ CurFPFeatureOverrides());
assert(UnaryOperator::isIncrementDecrementOp(opcode));
Expr *opaqueRef = op->IgnoreParens();
@@ -1591,9 +1584,9 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
Expr *LHS, Expr *RHS) {
// Do nothing if either argument is dependent.
if (LHS->isTypeDependent() || RHS->isTypeDependent())
- return new (Context) BinaryOperator(LHS, RHS, opcode, Context.DependentTy,
- VK_RValue, OK_Ordinary, opcLoc,
- FPOptions());
+ return BinaryOperator::Create(Context, LHS, RHS, opcode,
+ Context.DependentTy, VK_RValue, OK_Ordinary,
+ opcLoc, CurFPFeatureOverrides());
// Filter out non-overload placeholder types in the RHS.
if (RHS->getType()->isNonOverloadPlaceholderType()) {
@@ -1646,28 +1639,30 @@ Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
Expr *syntax = E->getSyntacticForm();
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
- return new (Context) UnaryOperator(
- op, uop->getOpcode(), uop->getType(), uop->getValueKind(),
- uop->getObjectKind(), uop->getOperatorLoc(), uop->canOverflow());
+ return UnaryOperator::Create(Context, op, uop->getOpcode(), uop->getType(),
+ uop->getValueKind(), uop->getObjectKind(),
+ uop->getOperatorLoc(), uop->canOverflow(),
+ CurFPFeatureOverrides());
} else if (CompoundAssignOperator *cop
= dyn_cast<CompoundAssignOperator>(syntax)) {
Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr();
- return new (Context) CompoundAssignOperator(lhs, rhs, cop->getOpcode(),
- cop->getType(),
- cop->getValueKind(),
- cop->getObjectKind(),
- cop->getComputationLHSType(),
- cop->getComputationResultType(),
- cop->getOperatorLoc(),
- FPOptions());
+ return CompoundAssignOperator::Create(
+ Context, lhs, rhs, cop->getOpcode(), cop->getType(),
+ cop->getValueKind(), cop->getObjectKind(), cop->getOperatorLoc(),
+ CurFPFeatureOverrides(), cop->getComputationLHSType(),
+ cop->getComputationResultType());
+
} else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) {
Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr();
- return new (Context) BinaryOperator(lhs, rhs, bop->getOpcode(),
- bop->getType(), bop->getValueKind(),
- bop->getObjectKind(),
- bop->getOperatorLoc(), FPOptions());
+ return BinaryOperator::Create(Context, lhs, rhs, bop->getOpcode(),
+ bop->getType(), bop->getValueKind(),
+ bop->getObjectKind(), bop->getOperatorLoc(),
+ CurFPFeatureOverrides());
+
+ } else if (isa<CallExpr>(syntax)) {
+ return syntax;
} else {
assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject));
return stripOpaqueValuesFromPseudoObjectRef(*this, syntax);
diff --git a/clang/lib/Sema/SemaSYCL.cpp b/clang/lib/Sema/SemaSYCL.cpp
new file mode 100644
index 000000000000..db7603b42f7b
--- /dev/null
+++ b/clang/lib/Sema/SemaSYCL.cpp
@@ -0,0 +1,49 @@
+//===- SemaSYCL.cpp - Semantic Analysis for SYCL constructs ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This implements Semantic Analysis for SYCL constructs.
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+
+// -----------------------------------------------------------------------------
+// SYCL device specific diagnostics implementation
+// -----------------------------------------------------------------------------
+
+Sema::DeviceDiagBuilder Sema::SYCLDiagIfDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
+ assert(getLangOpts().SYCLIsDevice &&
+ "Should only be called during SYCL compilation");
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(getCurLexicalContext());
+ DeviceDiagBuilder::Kind DiagKind = [this, FD] {
+ if (!FD)
+ return DeviceDiagBuilder::K_Nop;
+ if (getEmissionStatus(FD) == Sema::FunctionEmissionStatus::Emitted)
+ return DeviceDiagBuilder::K_ImmediateWithCallStack;
+ return DeviceDiagBuilder::K_Deferred;
+ }();
+ return DeviceDiagBuilder(DiagKind, Loc, DiagID, FD, *this);
+}
+
+bool Sema::checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
+ assert(getLangOpts().SYCLIsDevice &&
+ "Should only be called during SYCL compilation");
+ assert(Callee && "Callee may not be null.");
+
+ // Errors in unevaluated context don't need to be generated,
+ // so we can safely skip them.
+ if (isUnevaluatedContext() || isConstantEvaluated())
+ return true;
+
+ DeviceDiagBuilder::Kind DiagKind = DeviceDiagBuilder::K_Nop;
+
+ return DiagKind != DeviceDiagBuilder::K_Immediate &&
+ DiagKind != DeviceDiagBuilder::K_ImmediateWithCallStack;
+}
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index d6c3af9e84c8..73f3183c163f 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -334,6 +334,11 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
}
} else if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
const Expr *Source = POE->getSyntacticForm();
+ // Handle the actually selected call of an OpenMP specialized call.
+ if (LangOpts.OpenMP && isa<CallExpr>(Source) &&
+ POE->getNumSemanticExprs() == 1 &&
+ isa<CallExpr>(POE->getSemanticExpr(0)))
+ return DiagnoseUnusedExprResult(POE->getSemanticExpr(0));
if (isa<ObjCSubscriptRefExpr>(Source))
DiagID = diag::warn_unused_container_subscript_expr;
else
@@ -365,7 +370,10 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
}
}
- if (E->isGLValue() && E->getType().isVolatileQualified()) {
+ // Tell the user to assign it into a variable to force a volatile load if this
+ // isn't an array.
+ if (E->isGLValue() && E->getType().isVolatileQualified() &&
+ !E->getType()->isArrayType()) {
Diag(Loc, diag::warn_unused_volatile) << R1 << R2;
return;
}
@@ -389,6 +397,11 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr) {
const unsigned NumElts = Elts.size();
+ // Mark the current function as usng floating point constrained intrinsics
+ if (getCurFPFeatures().isFPConstrained())
+ if (FunctionDecl *F = dyn_cast<FunctionDecl>(CurContext))
+ F->setUsesFPIntrin(true);
+
// If we're in C89 mode, check that we don't have any decls after stmts. If
// so, emit an extension diagnostic.
if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
@@ -464,7 +477,9 @@ Sema::ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val) {
return ER;
};
- ExprResult Converted = CorrectDelayedTyposInExpr(Val, CheckAndFinish);
+ ExprResult Converted = CorrectDelayedTyposInExpr(
+ Val, /*InitDecl=*/nullptr, /*RecoverUncorrectedTypos=*/false,
+ CheckAndFinish);
if (Converted.get() == Val.get())
Converted = CheckAndFinish(Val.get());
return Converted;
@@ -730,11 +745,11 @@ StmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
if (CondExpr && !CondExpr->isTypeDependent()) {
// We have already converted the expression to an integral or enumeration
- // type, when we parsed the switch condition. If we don't have an
- // appropriate type now, enter the switch scope but remember that it's
- // invalid.
- assert(CondExpr->getType()->isIntegralOrEnumerationType() &&
- "invalid condition type");
+ // type, when we parsed the switch condition. There are cases where we don't
+ // have an appropriate type, e.g. a typo-expr Cond was corrected to an
+ // inappropriate-type expr, we just return an error.
+ if (!CondExpr->getType()->isIntegralOrEnumerationType())
+ return StmtError();
if (CondExpr->isKnownToHaveBooleanValue()) {
// switch(bool_expr) {...} is often a programmer error, e.g.
// switch(n && mask) { ... } // Doh - should be "n & mask".
@@ -1313,8 +1328,9 @@ Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
}
}
-StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
- Stmt *Body) {
+StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc,
+ SourceLocation LParenLoc, ConditionResult Cond,
+ SourceLocation RParenLoc, Stmt *Body) {
if (Cond.isInvalid())
return StmtError();
@@ -1329,7 +1345,7 @@ StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
getCurCompoundScope().setHasEmptyLoopBodies();
return WhileStmt::Create(Context, CondVal.first, CondVal.second, Body,
- WhileLoc);
+ WhileLoc, LParenLoc, RParenLoc);
}
StmtResult
@@ -1387,10 +1403,9 @@ namespace {
Simple = false;
}
- // Any Stmt not whitelisted will cause the condition to be marked complex.
- void VisitStmt(Stmt *S) {
- Simple = false;
- }
+ // Any Stmt not explicitly listed will cause the condition to be marked
+ // complex.
+ void VisitStmt(Stmt *S) { Simple = false; }
void VisitBinaryOperator(BinaryOperator *E) {
Visit(E->getLHS());
@@ -2114,18 +2129,22 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
return StmtError();
}
+ // This function is responsible for attaching an initializer to LoopVar. We
+ // must call ActOnInitializerError if we fail to do so.
Decl *LoopVar = DS->getSingleDecl();
if (LoopVar->isInvalidDecl() || !Range ||
DiagnoseUnexpandedParameterPack(Range, UPPC_Expression)) {
- LoopVar->setInvalidDecl();
+ ActOnInitializerError(LoopVar);
return StmtError();
}
// Build the coroutine state immediately and not later during template
// instantiation
if (!CoawaitLoc.isInvalid()) {
- if (!ActOnCoroutineBodyStart(S, CoawaitLoc, "co_await"))
+ if (!ActOnCoroutineBodyStart(S, CoawaitLoc, "co_await")) {
+ ActOnInitializerError(LoopVar);
return StmtError();
+ }
}
// Build auto && __range = range-init
@@ -2137,7 +2156,7 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
std::string("__range") + DepthStr);
if (FinishForRangeVarDecl(*this, RangeVar, Range, RangeLoc,
diag::err_for_range_deduction_failure)) {
- LoopVar->setInvalidDecl();
+ ActOnInitializerError(LoopVar);
return StmtError();
}
@@ -2146,14 +2165,20 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
BuildDeclaratorGroup(MutableArrayRef<Decl *>((Decl **)&RangeVar, 1));
StmtResult RangeDecl = ActOnDeclStmt(RangeGroup, RangeLoc, RangeLoc);
if (RangeDecl.isInvalid()) {
- LoopVar->setInvalidDecl();
+ ActOnInitializerError(LoopVar);
return StmtError();
}
- return BuildCXXForRangeStmt(
+ StmtResult R = BuildCXXForRangeStmt(
ForLoc, CoawaitLoc, InitStmt, ColonLoc, RangeDecl.get(),
/*BeginStmt=*/nullptr, /*EndStmt=*/nullptr,
/*Cond=*/nullptr, /*Inc=*/nullptr, DS, RParenLoc, Kind);
+ if (R.isInvalid()) {
+ ActOnInitializerError(LoopVar);
+ return StmtError();
+ }
+
+ return R;
}
/// Create the initialization, compare, and increment steps for
@@ -2336,22 +2361,6 @@ static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
AdjustedRange.get(), RParenLoc, Sema::BFRK_Rebuild);
}
-namespace {
-/// RAII object to automatically invalidate a declaration if an error occurs.
-struct InvalidateOnErrorScope {
- InvalidateOnErrorScope(Sema &SemaRef, Decl *D, bool Enabled)
- : Trap(SemaRef.Diags), D(D), Enabled(Enabled) {}
- ~InvalidateOnErrorScope() {
- if (Enabled && Trap.hasErrorOccurred())
- D->setInvalidDecl();
- }
-
- DiagnosticErrorTrap Trap;
- Decl *D;
- bool Enabled;
-};
-}
-
/// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement.
StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc, Stmt *InitStmt,
@@ -2378,11 +2387,6 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
DeclStmt *LoopVarDS = cast<DeclStmt>(LoopVarDecl);
VarDecl *LoopVar = cast<VarDecl>(LoopVarDS->getSingleDecl());
- // If we hit any errors, mark the loop variable as invalid if its type
- // contains 'auto'.
- InvalidateOnErrorScope Invalidate(*this, LoopVar,
- LoopVar->getType()->isUndeducedType());
-
StmtResult BeginDeclStmt = Begin;
StmtResult EndDeclStmt = End;
ExprResult NotEqExpr = Cond, IncrExpr = Inc;
@@ -2664,7 +2668,8 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
// trying to determine whether this would be a valid range.
if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) {
AddInitializerToDecl(LoopVar, DerefExpr.get(), /*DirectInit=*/false);
- if (LoopVar->isInvalidDecl())
+ if (LoopVar->isInvalidDecl() ||
+ (LoopVar->getInit() && LoopVar->getInit()->containsErrors()))
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
}
}
@@ -2741,22 +2746,24 @@ static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef,
E = E->IgnoreImpCasts();
}
- bool ReturnsReference = false;
+ QualType ReferenceReturnType;
if (isa<UnaryOperator>(E)) {
- ReturnsReference = true;
+ ReferenceReturnType = SemaRef.Context.getLValueReferenceType(E->getType());
} else {
const CXXOperatorCallExpr *Call = cast<CXXOperatorCallExpr>(E);
const FunctionDecl *FD = Call->getDirectCallee();
QualType ReturnType = FD->getReturnType();
- ReturnsReference = ReturnType->isReferenceType();
+ if (ReturnType->isReferenceType())
+ ReferenceReturnType = ReturnType;
}
- if (ReturnsReference) {
+ if (!ReferenceReturnType.isNull()) {
// Loop variable creates a temporary. Suggest either to go with
// non-reference loop variable to indicate a copy is made, or
- // the correct time to bind a const reference.
- SemaRef.Diag(VD->getLocation(), diag::warn_for_range_const_reference_copy)
- << VD << VariableType << E->getType();
+ // the correct type to bind a const reference.
+ SemaRef.Diag(VD->getLocation(),
+ diag::warn_for_range_const_ref_binds_temp_built_from_ref)
+ << VD << VariableType << ReferenceReturnType;
QualType NonReferenceType = VariableType.getNonReferenceType();
NonReferenceType.removeLocalConst();
QualType NewReferenceType =
@@ -2769,7 +2776,7 @@ static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef,
// Suggest removing the reference from the loop variable.
// If the type is a rvalue reference do not warn since that changes the
// semantic of the code.
- SemaRef.Diag(VD->getLocation(), diag::warn_for_range_variable_always_copy)
+ SemaRef.Diag(VD->getLocation(), diag::warn_for_range_ref_binds_ret_temp)
<< VD << RangeInitType;
QualType NonReferenceType = VariableType.getNonReferenceType();
NonReferenceType.removeLocalConst();
@@ -2821,7 +2828,7 @@ static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
// Suggest changing from a const variable to a const reference variable
// if doing so will prevent a copy.
SemaRef.Diag(VD->getLocation(), diag::warn_for_range_copy)
- << VD << VariableType << InitExpr->getType();
+ << VD << VariableType;
SemaRef.Diag(VD->getBeginLoc(), diag::note_use_reference_type)
<< SemaRef.Context.getLValueReferenceType(VariableType)
<< VD->getSourceRange()
@@ -2838,9 +2845,13 @@ static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
/// Suggest "const foo &x" to prevent the copy.
static void DiagnoseForRangeVariableCopies(Sema &SemaRef,
const CXXForRangeStmt *ForStmt) {
- if (SemaRef.Diags.isIgnored(diag::warn_for_range_const_reference_copy,
- ForStmt->getBeginLoc()) &&
- SemaRef.Diags.isIgnored(diag::warn_for_range_variable_always_copy,
+ if (SemaRef.inTemplateInstantiation())
+ return;
+
+ if (SemaRef.Diags.isIgnored(
+ diag::warn_for_range_const_ref_binds_temp_built_from_ref,
+ ForStmt->getBeginLoc()) &&
+ SemaRef.Diags.isIgnored(diag::warn_for_range_ref_binds_ret_temp,
ForStmt->getBeginLoc()) &&
SemaRef.Diags.isIgnored(diag::warn_for_range_copy,
ForStmt->getBeginLoc())) {
@@ -2860,6 +2871,9 @@ static void DiagnoseForRangeVariableCopies(Sema &SemaRef,
if (!InitExpr)
return;
+ if (InitExpr->getExprLoc().isMacroID())
+ return;
+
if (VariableType->isReferenceType()) {
DiagnoseForRangeReferenceVariableCopies(SemaRef, VD,
ForStmt->getRangeInit()->getType());
@@ -3286,6 +3300,7 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
assert(AT && "lost auto type from lambda return type");
if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
FD->setInvalidDecl();
+ // FIXME: preserve the ill-formed return expression.
return StmtError();
}
CurCap->ReturnType = FnRetType = FD->getReturnType();
@@ -3616,6 +3631,12 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (isa<CXXBoolLiteralExpr>(RetValExp))
Diag(ReturnLoc, diag::warn_main_returns_bool_literal)
<< RetValExp->getSourceRange();
+ if (FD->hasAttr<CmseNSEntryAttr>() && RetValExp) {
+ if (const auto *RT = dyn_cast<RecordType>(FnRetType.getCanonicalType())) {
+ if (RT->getDecl()->isOrContainsUnion())
+ Diag(RetValExp->getBeginLoc(), diag::warn_cmse_nonsecure_union) << 1;
+ }
+ }
} else if (ObjCMethodDecl *MD = getCurMethodDecl()) {
FnRetType = MD->getReturnType();
isObjCMethod = true;
diff --git a/clang/lib/Sema/SemaStmtAsm.cpp b/clang/lib/Sema/SemaStmtAsm.cpp
index 93faf2d151f9..10fa24682f9c 100644
--- a/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/clang/lib/Sema/SemaStmtAsm.cpp
@@ -296,6 +296,14 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
checkExprMemoryConstraintCompat(*this, OutputExpr, Info, false))
return StmtError();
+ // Disallow _ExtInt, since the backends tend to have difficulties with
+ // non-normal sizes.
+ if (OutputExpr->getType()->isExtIntType())
+ return StmtError(
+ Diag(OutputExpr->getBeginLoc(), diag::err_asm_invalid_type)
+ << OutputExpr->getType() << 0 /*Input*/
+ << OutputExpr->getSourceRange());
+
OutputConstraintInfos.push_back(Info);
// If this is dependent, just continue.
@@ -420,6 +428,12 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
}
}
+ if (InputExpr->getType()->isExtIntType())
+ return StmtError(
+ Diag(InputExpr->getBeginLoc(), diag::err_asm_invalid_type)
+ << InputExpr->getType() << 1 /*Output*/
+ << InputExpr->getSourceRange());
+
InputConstraintInfos.push_back(Info);
const Type *Ty = Exprs[i]->getType().getTypePtr();
@@ -478,10 +492,10 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// Look for the correct constraint index.
unsigned ConstraintIdx = Piece.getOperandNo();
+ unsigned NumOperands = NS->getNumOutputs() + NS->getNumInputs();
// Labels are the last in the Exprs list.
- if (NS->isAsmGoto() && ConstraintIdx >= NS->getNumInputs())
+ if (NS->isAsmGoto() && ConstraintIdx >= NumOperands)
continue;
- unsigned NumOperands = NS->getNumOutputs() + NS->getNumInputs();
// Look for the (ConstraintIdx - NumOperands + 1)th constraint with
// modifier '+'.
if (ConstraintIdx >= NumOperands) {
@@ -892,6 +906,15 @@ StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
SourceLocation EndLoc) {
bool IsSimple = (NumOutputs != 0 || NumInputs != 0);
setFunctionHasBranchProtectedScope();
+
+ for (uint64_t I = 0; I < NumOutputs + NumInputs; ++I) {
+ if (Exprs[I]->getType()->isExtIntType())
+ return StmtError(
+ Diag(Exprs[I]->getBeginLoc(), diag::err_asm_invalid_type)
+ << Exprs[I]->getType() << (I < NumOutputs)
+ << Exprs[I]->getSourceRange());
+ }
+
MSAsmStmt *NS =
new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
/*IsVolatile*/ true, AsmToks, NumOutputs, NumInputs,
diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp
index 3d91893b4065..e9d3c755eb23 100644
--- a/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/clang/lib/Sema/SemaStmtAttr.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/Basic/SourceManager.h"
@@ -170,6 +171,44 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
return LoopHintAttr::CreateImplicit(S.Context, Option, State, ValueExpr, A);
}
+namespace {
+class CallExprFinder : public ConstEvaluatedExprVisitor<CallExprFinder> {
+ bool FoundCallExpr = false;
+
+public:
+ typedef ConstEvaluatedExprVisitor<CallExprFinder> Inherited;
+
+ CallExprFinder(Sema &S, const Stmt *St) : Inherited(S.Context) { Visit(St); }
+
+ bool foundCallExpr() { return FoundCallExpr; }
+
+ void VisitCallExpr(const CallExpr *E) { FoundCallExpr = true; }
+
+ void Visit(const Stmt *St) {
+ if (!St)
+ return;
+ ConstEvaluatedExprVisitor<CallExprFinder>::Visit(St);
+ }
+};
+} // namespace
+
+static Attr *handleNoMergeAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ NoMergeAttr NMA(S.Context, A);
+ if (S.CheckAttrNoArgs(A))
+ return nullptr;
+
+ CallExprFinder CEF(S, St);
+
+ if (!CEF.foundCallExpr()) {
+ S.Diag(St->getBeginLoc(), diag::warn_nomerge_attribute_ignored_in_stmt)
+ << NMA.getSpelling();
+ return nullptr;
+ }
+
+ return ::new (S.Context) NoMergeAttr(S.Context, A);
+}
+
static void
CheckForIncompatibleAttributes(Sema &S,
const SmallVectorImpl<const Attr *> &Attrs) {
@@ -335,6 +374,8 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
return handleOpenCLUnrollHint(S, St, A, Range);
case ParsedAttr::AT_Suppress:
return handleSuppressAttr(S, St, A, Range);
+ case ParsedAttr::AT_NoMerge:
+ return handleNoMergeAttr(S, St, A, Range);
default:
// if we're here, then we parsed a known attribute, but didn't recognize
// it as a statement attribute => it is declaration attribute
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 1184446796eb..c05ed0b14e3e 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -46,6 +46,47 @@ clang::getTemplateParamsRange(TemplateParameterList const * const *Ps,
return SourceRange(Ps[0]->getTemplateLoc(), Ps[N-1]->getRAngleLoc());
}
+unsigned Sema::getTemplateDepth(Scope *S) const {
+ unsigned Depth = 0;
+
+ // Each template parameter scope represents one level of template parameter
+ // depth.
+ for (Scope *TempParamScope = S->getTemplateParamParent(); TempParamScope;
+ TempParamScope = TempParamScope->getParent()->getTemplateParamParent()) {
+ ++Depth;
+ }
+
+ // Note that there are template parameters with the given depth.
+ auto ParamsAtDepth = [&](unsigned D) { Depth = std::max(Depth, D + 1); };
+
+ // Look for parameters of an enclosing generic lambda. We don't create a
+ // template parameter scope for these.
+ for (FunctionScopeInfo *FSI : getFunctionScopes()) {
+ if (auto *LSI = dyn_cast<LambdaScopeInfo>(FSI)) {
+ if (!LSI->TemplateParams.empty()) {
+ ParamsAtDepth(LSI->AutoTemplateParameterDepth);
+ break;
+ }
+ if (LSI->GLTemplateParameterList) {
+ ParamsAtDepth(LSI->GLTemplateParameterList->getDepth());
+ break;
+ }
+ }
+ }
+
+ // Look for parameters of an enclosing terse function template. We don't
+ // create a template parameter scope for these either.
+ for (const InventedTemplateParameterInfo &Info :
+ getInventedParameterInfos()) {
+ if (!Info.TemplateParams.empty()) {
+ ParamsAtDepth(Info.AutoTemplateParameterDepth);
+ break;
+ }
+ }
+
+ return Depth;
+}
+
/// \brief Determine whether the declaration found is acceptable as the name
/// of a template and, if so, return that template declaration. Otherwise,
/// returns null.
@@ -132,7 +173,8 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
ParsedType ObjectTypePtr,
bool EnteringContext,
TemplateTy &TemplateResult,
- bool &MemberOfUnknownSpecialization) {
+ bool &MemberOfUnknownSpecialization,
+ bool Disambiguation) {
assert(getLangOpts().CPlusPlus && "No template names in C!");
DeclarationName TName;
@@ -162,7 +204,8 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
LookupResult R(*this, TName, Name.getBeginLoc(), LookupOrdinaryName);
if (LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
MemberOfUnknownSpecialization, SourceLocation(),
- &AssumedTemplate))
+ &AssumedTemplate,
+ /*AllowTypoCorrection=*/!Disambiguation))
return TNK_Non_template;
if (AssumedTemplate != AssumedTemplateKind::None) {
@@ -328,11 +371,15 @@ bool Sema::LookupTemplateName(LookupResult &Found,
QualType ObjectType,
bool EnteringContext,
bool &MemberOfUnknownSpecialization,
- SourceLocation TemplateKWLoc,
- AssumedTemplateKind *ATK) {
+ RequiredTemplateKind RequiredTemplate,
+ AssumedTemplateKind *ATK,
+ bool AllowTypoCorrection) {
if (ATK)
*ATK = AssumedTemplateKind::None;
+ if (SS.isInvalid())
+ return true;
+
Found.setTemplateNameLookup(true);
// Determine where to perform name lookup
@@ -342,7 +389,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
if (!ObjectType.isNull()) {
// This nested-name-specifier occurs in a member access expression, e.g.,
// x->B::f, and we are looking into the type of the object.
- assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
+ assert(SS.isEmpty() && "ObjectType and scope specifier cannot coexist");
LookupCtx = computeDeclContext(ObjectType);
IsDependent = !LookupCtx && ObjectType->isDependentType();
assert((IsDependent || !ObjectType->isIncompleteType() ||
@@ -368,11 +415,11 @@ bool Sema::LookupTemplateName(LookupResult &Found,
Found.clear();
return false;
}
- } else if (SS.isSet()) {
+ } else if (SS.isNotEmpty()) {
// This nested-name-specifier occurs after another nested-name-specifier,
// so long into the context associated with the prior nested-name-specifier.
LookupCtx = computeDeclContext(SS, EnteringContext);
- IsDependent = !LookupCtx;
+ IsDependent = !LookupCtx && isDependentScopeSpecifier(SS);
// The declaration context must be complete.
if (LookupCtx && RequireCompleteDeclContext(SS, LookupCtx))
@@ -399,7 +446,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
IsDependent |= Found.wasNotFoundInCurrentInstantiation();
}
- if (!SS.isSet() && (ObjectType.isNull() || Found.empty())) {
+ if (SS.isEmpty() && (ObjectType.isNull() || Found.empty())) {
// C++ [basic.lookup.classref]p1:
// In a class member access expression (5.2.5), if the . or -> token is
// immediately followed by an identifier followed by a <, the
@@ -426,7 +473,8 @@ bool Sema::LookupTemplateName(LookupResult &Found,
if (Found.isAmbiguous())
return false;
- if (ATK && !SS.isSet() && ObjectType.isNull() && TemplateKWLoc.isInvalid()) {
+ if (ATK && SS.isEmpty() && ObjectType.isNull() &&
+ !RequiredTemplate.hasTemplateKeyword()) {
// C++2a [temp.names]p2:
// A name is also considered to refer to a template if it is an
// unqualified-id followed by a < and name lookup finds either one or more
@@ -436,7 +484,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
// all language modes, and diagnose the empty lookup in ActOnCallExpr if we
// successfully form a call to an undeclared template-id.
bool AllFunctions =
- getLangOpts().CPlusPlus2a &&
+ getLangOpts().CPlusPlus20 &&
std::all_of(Found.begin(), Found.end(), [](NamedDecl *ND) {
return isa<FunctionDecl>(ND->getUnderlyingDecl());
});
@@ -452,8 +500,9 @@ bool Sema::LookupTemplateName(LookupResult &Found,
}
}
- if (Found.empty() && !IsDependent) {
- // If we did not find any names, attempt to correct any typos.
+ if (Found.empty() && !IsDependent && AllowTypoCorrection) {
+ // If we did not find any names, and this is not a disambiguation, attempt
+ // to correct any typos.
DeclarationName Name = Found.getLookupName();
Found.clear();
// Simple filter callback that, for keywords, only accepts the C++ *_cast
@@ -497,9 +546,11 @@ bool Sema::LookupTemplateName(LookupResult &Found,
// If a 'template' keyword was used, a lookup that finds only non-template
// names is an error.
- if (ExampleLookupResult && TemplateKWLoc.isValid()) {
+ if (ExampleLookupResult && RequiredTemplate) {
Diag(Found.getNameLoc(), diag::err_template_kw_refers_to_non_template)
- << Found.getLookupName() << SS.getRange();
+ << Found.getLookupName() << SS.getRange()
+ << RequiredTemplate.hasTemplateKeyword()
+ << RequiredTemplate.getTemplateKeywordLoc();
Diag(ExampleLookupResult->getUnderlyingDecl()->getLocation(),
diag::note_template_kw_refers_to_non_template)
<< Found.getLookupName();
@@ -1050,7 +1101,8 @@ makeTemplateArgumentListInfo(Sema &S, TemplateIdAnnotation &TemplateId) {
return TemplateArgs;
}
-bool Sema::ActOnTypeConstraint(TemplateIdAnnotation *TypeConstr,
+bool Sema::ActOnTypeConstraint(const CXXScopeSpec &SS,
+ TemplateIdAnnotation *TypeConstr,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc) {
ConceptDecl *CD =
@@ -1080,14 +1132,57 @@ bool Sema::ActOnTypeConstraint(TemplateIdAnnotation *TypeConstr,
makeTemplateArgumentListInfo(*this, *TypeConstr);
}
return AttachTypeConstraint(
- TypeConstr->SS.isSet() ? TypeConstr->SS.getWithLocInContext(Context) :
- NestedNameSpecifierLoc(),
+ SS.isSet() ? SS.getWithLocInContext(Context) : NestedNameSpecifierLoc(),
DeclarationNameInfo(DeclarationName(TypeConstr->Name),
TypeConstr->TemplateNameLoc), CD,
TypeConstr->LAngleLoc.isValid() ? &TemplateArgs : nullptr,
ConstrainedParameter, EllipsisLoc);
}
+template<typename ArgumentLocAppender>
+static ExprResult formImmediatelyDeclaredConstraint(
+ Sema &S, NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo,
+ ConceptDecl *NamedConcept, SourceLocation LAngleLoc,
+ SourceLocation RAngleLoc, QualType ConstrainedType,
+ SourceLocation ParamNameLoc, ArgumentLocAppender Appender,
+ SourceLocation EllipsisLoc) {
+
+ TemplateArgumentListInfo ConstraintArgs;
+ ConstraintArgs.addArgument(
+ S.getTrivialTemplateArgumentLoc(TemplateArgument(ConstrainedType),
+ /*NTTPType=*/QualType(), ParamNameLoc));
+
+ ConstraintArgs.setRAngleLoc(RAngleLoc);
+ ConstraintArgs.setLAngleLoc(LAngleLoc);
+ Appender(ConstraintArgs);
+
+ // C++2a [temp.param]p4:
+ // [...] This constraint-expression E is called the immediately-declared
+ // constraint of T. [...]
+ CXXScopeSpec SS;
+ SS.Adopt(NS);
+ ExprResult ImmediatelyDeclaredConstraint = S.CheckConceptTemplateId(
+ SS, /*TemplateKWLoc=*/SourceLocation(), NameInfo,
+ /*FoundDecl=*/NamedConcept, NamedConcept, &ConstraintArgs);
+ if (ImmediatelyDeclaredConstraint.isInvalid() || !EllipsisLoc.isValid())
+ return ImmediatelyDeclaredConstraint;
+
+ // C++2a [temp.param]p4:
+ // [...] If T is not a pack, then E is E', otherwise E is (E' && ...).
+ //
+ // We have the following case:
+ //
+ // template<typename T> concept C1 = true;
+ // template<C1... T> struct s1;
+ //
+ // The constraint: (C1<T> && ...)
+ return S.BuildCXXFoldExpr(/*LParenLoc=*/SourceLocation(),
+ ImmediatelyDeclaredConstraint.get(), BO_LAnd,
+ EllipsisLoc, /*RHS=*/nullptr,
+ /*RParenLoc=*/SourceLocation(),
+ /*NumExpansions=*/None);
+}
+
/// Attach a type-constraint to a template parameter.
/// \returns true if an error occured. This can happen if the
/// immediately-declared constraint could not be formed (e.g. incorrect number
@@ -1106,51 +1201,21 @@ bool Sema::AttachTypeConstraint(NestedNameSpecifierLoc NS,
*TemplateArgs) : nullptr;
QualType ParamAsArgument(ConstrainedParameter->getTypeForDecl(), 0);
- TemplateArgumentListInfo ConstraintArgs;
- ConstraintArgs.addArgument(
- TemplateArgumentLoc(
- TemplateArgument(ParamAsArgument),
- TemplateArgumentLocInfo(
- Context.getTrivialTypeSourceInfo(ParamAsArgument,
- ConstrainedParameter->getLocation()))));
- if (TemplateArgs) {
- ConstraintArgs.setRAngleLoc(TemplateArgs->getRAngleLoc());
- ConstraintArgs.setLAngleLoc(TemplateArgs->getLAngleLoc());
- for (const TemplateArgumentLoc &ArgLoc : TemplateArgs->arguments())
- ConstraintArgs.addArgument(ArgLoc);
- }
- // C++2a [temp.param]p4:
- // [...] This constraint-expression E is called the immediately-declared
- // constraint of T. [...]
- CXXScopeSpec SS;
- SS.Adopt(NS);
- ExprResult ImmediatelyDeclaredConstraint = CheckConceptTemplateId(SS,
- /*TemplateKWLoc=*/SourceLocation(), NameInfo, /*FoundDecl=*/NamedConcept,
- NamedConcept, &ConstraintArgs);
+ ExprResult ImmediatelyDeclaredConstraint =
+ formImmediatelyDeclaredConstraint(
+ *this, NS, NameInfo, NamedConcept,
+ TemplateArgs ? TemplateArgs->getLAngleLoc() : SourceLocation(),
+ TemplateArgs ? TemplateArgs->getRAngleLoc() : SourceLocation(),
+ ParamAsArgument, ConstrainedParameter->getLocation(),
+ [&] (TemplateArgumentListInfo &ConstraintArgs) {
+ if (TemplateArgs)
+ for (const auto &ArgLoc : TemplateArgs->arguments())
+ ConstraintArgs.addArgument(ArgLoc);
+ }, EllipsisLoc);
if (ImmediatelyDeclaredConstraint.isInvalid())
return true;
- if (ConstrainedParameter->isParameterPack()) {
- // C++2a [temp.param]p4:
- // [...] If T is not a pack, then E is E', otherwise E is (E' && ...).
- //
- // We have the following case:
- //
- // template<typename T> concept C1 = true;
- // template<C1... T> struct s1;
- //
- // The constraint: (C1<T> && ...)
- ImmediatelyDeclaredConstraint =
- BuildCXXFoldExpr(/*LParenLoc=*/SourceLocation(),
- ImmediatelyDeclaredConstraint.get(), BO_LAnd,
- EllipsisLoc, /*RHS=*/nullptr,
- /*RParenLoc=*/SourceLocation(),
- /*NumExpansions=*/None).get();
- if (ImmediatelyDeclaredConstraint.isInvalid())
- return true;
- }
-
ConstrainedParameter->setTypeConstraint(NS, NameInfo,
/*FoundDecl=*/NamedConcept,
NamedConcept, ArgsAsWritten,
@@ -1158,6 +1223,38 @@ bool Sema::AttachTypeConstraint(NestedNameSpecifierLoc NS,
return false;
}
+bool Sema::AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *NTTP,
+ SourceLocation EllipsisLoc) {
+ if (NTTP->getType() != TL.getType() ||
+ TL.getAutoKeyword() != AutoTypeKeyword::Auto) {
+ Diag(NTTP->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
+ diag::err_unsupported_placeholder_constraint)
+ << NTTP->getTypeSourceInfo()->getTypeLoc().getSourceRange();
+ return true;
+ }
+ // FIXME: Concepts: This should be the type of the placeholder, but this is
+ // unclear in the wording right now.
+ DeclRefExpr *Ref = BuildDeclRefExpr(NTTP, NTTP->getType(), VK_RValue,
+ NTTP->getLocation());
+ if (!Ref)
+ return true;
+ ExprResult ImmediatelyDeclaredConstraint =
+ formImmediatelyDeclaredConstraint(
+ *this, TL.getNestedNameSpecifierLoc(), TL.getConceptNameInfo(),
+ TL.getNamedConcept(), TL.getLAngleLoc(), TL.getRAngleLoc(),
+ BuildDecltypeType(Ref, NTTP->getLocation()), NTTP->getLocation(),
+ [&] (TemplateArgumentListInfo &ConstraintArgs) {
+ for (unsigned I = 0, C = TL.getNumArgs(); I != C; ++I)
+ ConstraintArgs.addArgument(TL.getArgLoc(I));
+ }, EllipsisLoc);
+ if (ImmediatelyDeclaredConstraint.isInvalid() ||
+ !ImmediatelyDeclaredConstraint.isUsable())
+ return true;
+
+ NTTP->setPlaceholderTypeConstraint(ImmediatelyDeclaredConstraint.get());
+ return false;
+}
+
/// Check that the type of a non-type template parameter is
/// well-formed.
///
@@ -1242,11 +1339,11 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
// Check that we have valid decl-specifiers specified.
auto CheckValidDeclSpecifiers = [this, &D] {
// C++ [temp.param]
- // p1
+ // p1
// template-parameter:
// ...
// parameter-declaration
- // p2
+ // p2
// ... A storage class shall not be specified in a template-parameter
// declaration.
// [dcl.typedef]p1:
@@ -1319,6 +1416,11 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
TInfo);
Param->setAccess(AS_public);
+ if (AutoTypeLoc TL = TInfo->getTypeLoc().getContainedAutoTypeLoc())
+ if (TL.isConstrained())
+ if (AttachTypeConstraint(TL, Param, D.getEllipsisLoc()))
+ Invalid = true;
+
if (Invalid)
Param->setInvalidDecl();
@@ -1844,16 +1946,46 @@ namespace {
/// constructor to a deduction guide.
class ExtractTypeForDeductionGuide
: public TreeTransform<ExtractTypeForDeductionGuide> {
+ llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs;
+
public:
typedef TreeTransform<ExtractTypeForDeductionGuide> Base;
- ExtractTypeForDeductionGuide(Sema &SemaRef) : Base(SemaRef) {}
+ ExtractTypeForDeductionGuide(
+ Sema &SemaRef,
+ llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs)
+ : Base(SemaRef), MaterializedTypedefs(MaterializedTypedefs) {}
TypeSourceInfo *transform(TypeSourceInfo *TSI) { return TransformType(TSI); }
QualType TransformTypedefType(TypeLocBuilder &TLB, TypedefTypeLoc TL) {
- return TransformType(
- TLB,
- TL.getTypedefNameDecl()->getTypeSourceInfo()->getTypeLoc());
+ ASTContext &Context = SemaRef.getASTContext();
+ TypedefNameDecl *OrigDecl = TL.getTypedefNameDecl();
+ TypeLocBuilder InnerTLB;
+ QualType Transformed =
+ TransformType(InnerTLB, OrigDecl->getTypeSourceInfo()->getTypeLoc());
+ TypeSourceInfo *TSI =
+ TransformType(InnerTLB.getTypeSourceInfo(Context, Transformed));
+
+ TypedefNameDecl *Decl = nullptr;
+
+ if (isa<TypeAliasDecl>(OrigDecl))
+ Decl = TypeAliasDecl::Create(
+ Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
+ OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
+ else {
+ assert(isa<TypedefDecl>(OrigDecl) && "Not a Type alias or typedef");
+ Decl = TypedefDecl::Create(
+ Context, Context.getTranslationUnitDecl(), OrigDecl->getBeginLoc(),
+ OrigDecl->getLocation(), OrigDecl->getIdentifier(), TSI);
+ }
+
+ MaterializedTypedefs.push_back(Decl);
+
+ QualType TDTy = Context.getTypedefType(Decl);
+ TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(TDTy);
+ TypedefTL.setNameLoc(TL.getNameLoc());
+
+ return TDTy;
}
};
@@ -1905,6 +2037,7 @@ struct ConvertConstructorToDeductionGuideTransform {
// a list of substituted template arguments as we go.
for (NamedDecl *Param : *InnerParams) {
MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
Args.addOuterTemplateArguments(SubstArgs);
Args.addOuterRetainedLevel();
NamedDecl *NewParam = transformTemplateParameter(Param, Args);
@@ -1924,6 +2057,7 @@ struct ConvertConstructorToDeductionGuideTransform {
// substitute references to the old parameters into references to the
// new ones.
MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
if (FTD) {
Args.addOuterTemplateArguments(SubstArgs);
Args.addOuterRetainedLevel();
@@ -1938,14 +2072,16 @@ struct ConvertConstructorToDeductionGuideTransform {
// new ones.
TypeLocBuilder TLB;
SmallVector<ParmVarDecl*, 8> Params;
- QualType NewType = transformFunctionProtoType(TLB, FPTL, Params, Args);
+ SmallVector<TypedefNameDecl *, 4> MaterializedTypedefs;
+ QualType NewType = transformFunctionProtoType(TLB, FPTL, Params, Args,
+ MaterializedTypedefs);
if (NewType.isNull())
return nullptr;
TypeSourceInfo *NewTInfo = TLB.getTypeSourceInfo(SemaRef.Context, NewType);
return buildDeductionGuide(TemplateParams, CD->getExplicitSpecifier(),
NewTInfo, CD->getBeginLoc(), CD->getLocation(),
- CD->getEndLoc());
+ CD->getEndLoc(), MaterializedTypedefs);
}
/// Build a deduction guide with the specified parameter types.
@@ -1996,12 +2132,14 @@ private:
if (const auto *TC = TTP->getTypeConstraint()) {
TemplateArgumentListInfo TransformedArgs;
const auto *ArgsAsWritten = TC->getTemplateArgsAsWritten();
- if (SemaRef.Subst(ArgsAsWritten->getTemplateArgs(),
+ if (!ArgsAsWritten ||
+ SemaRef.Subst(ArgsAsWritten->getTemplateArgs(),
ArgsAsWritten->NumTemplateArgs, TransformedArgs,
Args))
SemaRef.AttachTypeConstraint(
TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
- TC->getNamedConcept(), &TransformedArgs, NewTTP,
+ TC->getNamedConcept(), ArgsAsWritten ? &TransformedArgs : nullptr,
+ NewTTP,
NewTTP->isParameterPack()
? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
->getEllipsisLoc()
@@ -2038,16 +2176,18 @@ private:
return NewParam;
}
- QualType transformFunctionProtoType(TypeLocBuilder &TLB,
- FunctionProtoTypeLoc TL,
- SmallVectorImpl<ParmVarDecl*> &Params,
- MultiLevelTemplateArgumentList &Args) {
+ QualType transformFunctionProtoType(
+ TypeLocBuilder &TLB, FunctionProtoTypeLoc TL,
+ SmallVectorImpl<ParmVarDecl *> &Params,
+ MultiLevelTemplateArgumentList &Args,
+ SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs) {
SmallVector<QualType, 4> ParamTypes;
const FunctionProtoType *T = TL.getTypePtr();
// -- The types of the function parameters are those of the constructor.
for (auto *OldParam : TL.getParams()) {
- ParmVarDecl *NewParam = transformFunctionTypeParam(OldParam, Args);
+ ParmVarDecl *NewParam =
+ transformFunctionTypeParam(OldParam, Args, MaterializedTypedefs);
if (!NewParam)
return QualType();
ParamTypes.push_back(NewParam->getType());
@@ -2089,9 +2229,9 @@ private:
return Result;
}
- ParmVarDecl *
- transformFunctionTypeParam(ParmVarDecl *OldParam,
- MultiLevelTemplateArgumentList &Args) {
+ ParmVarDecl *transformFunctionTypeParam(
+ ParmVarDecl *OldParam, MultiLevelTemplateArgumentList &Args,
+ llvm::SmallVectorImpl<TypedefNameDecl *> &MaterializedTypedefs) {
TypeSourceInfo *OldDI = OldParam->getTypeSourceInfo();
TypeSourceInfo *NewDI;
if (auto PackTL = OldDI->getTypeLoc().getAs<PackExpansionTypeLoc>()) {
@@ -2114,15 +2254,22 @@ private:
// members of the current instantiations with the definitions of those
// typedefs, avoiding triggering instantiation of the deduced type during
// deduction.
- NewDI = ExtractTypeForDeductionGuide(SemaRef).transform(NewDI);
+ NewDI = ExtractTypeForDeductionGuide(SemaRef, MaterializedTypedefs)
+ .transform(NewDI);
// Resolving a wording defect, we also inherit default arguments from the
// constructor.
ExprResult NewDefArg;
if (OldParam->hasDefaultArg()) {
- NewDefArg = SemaRef.SubstExpr(OldParam->getDefaultArg(), Args);
- if (NewDefArg.isInvalid())
- return nullptr;
+ // We don't care what the value is (we won't use it); just create a
+ // placeholder to indicate there is a default argument.
+ QualType ParamTy = NewDI->getType();
+ NewDefArg = new (SemaRef.Context)
+ OpaqueValueExpr(OldParam->getDefaultArg()->getBeginLoc(),
+ ParamTy.getNonLValueExprType(SemaRef.Context),
+ ParamTy->isLValueReferenceType() ? VK_LValue :
+ ParamTy->isRValueReferenceType() ? VK_XValue :
+ VK_RValue);
}
ParmVarDecl *NewParam = ParmVarDecl::Create(SemaRef.Context, DC,
@@ -2139,10 +2286,11 @@ private:
return NewParam;
}
- NamedDecl *buildDeductionGuide(TemplateParameterList *TemplateParams,
- ExplicitSpecifier ES, TypeSourceInfo *TInfo,
- SourceLocation LocStart, SourceLocation Loc,
- SourceLocation LocEnd) {
+ FunctionTemplateDecl *buildDeductionGuide(
+ TemplateParameterList *TemplateParams, ExplicitSpecifier ES,
+ TypeSourceInfo *TInfo, SourceLocation LocStart, SourceLocation Loc,
+ SourceLocation LocEnd,
+ llvm::ArrayRef<TypedefNameDecl *> MaterializedTypedefs = {}) {
DeclarationNameInfo Name(DeductionGuideName, Loc);
ArrayRef<ParmVarDecl *> Params =
TInfo->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams();
@@ -2156,6 +2304,8 @@ private:
for (auto *Param : Params)
Param->setDeclContext(Guide);
+ for (auto *TD : MaterializedTypedefs)
+ TD->setDeclContext(Guide);
auto *GuideTemplate = FunctionTemplateDecl::Create(
SemaRef.Context, DC, Loc, DeductionGuideName, TemplateParams, Guide);
@@ -2762,7 +2912,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS,
TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend,
- bool &IsMemberSpecialization, bool &Invalid) {
+ bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic) {
IsMemberSpecialization = false;
Invalid = false;
@@ -2870,8 +3020,9 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
auto CheckExplicitSpecialization = [&](SourceRange Range, bool Recovery) {
if (SawNonEmptyTemplateParameterList) {
- Diag(DeclLoc, diag::err_specialize_member_of_template)
- << !Recovery << Range;
+ if (!SuppressDiagnostic)
+ Diag(DeclLoc, diag::err_specialize_member_of_template)
+ << !Recovery << Range;
Invalid = true;
IsMemberSpecialization = false;
return true;
@@ -2892,9 +3043,10 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
else
ExpectedTemplateLoc = DeclStartLoc;
- Diag(DeclLoc, diag::err_template_spec_needs_header)
- << Range
- << FixItHint::CreateInsertion(ExpectedTemplateLoc, "template<> ");
+ if (!SuppressDiagnostic)
+ Diag(DeclLoc, diag::err_template_spec_needs_header)
+ << Range
+ << FixItHint::CreateInsertion(ExpectedTemplateLoc, "template<> ");
return false;
};
@@ -2984,12 +3136,13 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
if (ParamIdx < ParamLists.size()) {
if (ParamLists[ParamIdx]->size() > 0) {
// The header has template parameters when it shouldn't. Complain.
- Diag(ParamLists[ParamIdx]->getTemplateLoc(),
- diag::err_template_param_list_matches_nontemplate)
- << T
- << SourceRange(ParamLists[ParamIdx]->getLAngleLoc(),
- ParamLists[ParamIdx]->getRAngleLoc())
- << getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
+ if (!SuppressDiagnostic)
+ Diag(ParamLists[ParamIdx]->getTemplateLoc(),
+ diag::err_template_param_list_matches_nontemplate)
+ << T
+ << SourceRange(ParamLists[ParamIdx]->getLAngleLoc(),
+ ParamLists[ParamIdx]->getRAngleLoc())
+ << getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
Invalid = true;
return nullptr;
}
@@ -3025,7 +3178,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
if (ExpectedTemplateParams &&
!TemplateParameterListsAreEqual(ParamLists[ParamIdx],
ExpectedTemplateParams,
- true, TPL_TemplateMatch))
+ !SuppressDiagnostic, TPL_TemplateMatch))
Invalid = true;
if (!Invalid &&
@@ -3037,9 +3190,10 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
continue;
}
- Diag(DeclLoc, diag::err_template_spec_needs_template_parameters)
- << T
- << getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
+ if (!SuppressDiagnostic)
+ Diag(DeclLoc, diag::err_template_spec_needs_template_parameters)
+ << T
+ << getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
Invalid = true;
continue;
}
@@ -3075,16 +3229,18 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
AllExplicitSpecHeaders = false;
}
- Diag(ParamLists[ParamIdx]->getTemplateLoc(),
- AllExplicitSpecHeaders ? diag::warn_template_spec_extra_headers
- : diag::err_template_spec_extra_headers)
- << SourceRange(ParamLists[ParamIdx]->getTemplateLoc(),
- ParamLists[ParamLists.size() - 2]->getRAngleLoc());
+ if (!SuppressDiagnostic)
+ Diag(ParamLists[ParamIdx]->getTemplateLoc(),
+ AllExplicitSpecHeaders ? diag::warn_template_spec_extra_headers
+ : diag::err_template_spec_extra_headers)
+ << SourceRange(ParamLists[ParamIdx]->getTemplateLoc(),
+ ParamLists[ParamLists.size() - 2]->getRAngleLoc());
// If there was a specialization somewhere, such that 'template<>' is
// not required, and there were any 'template<>' headers, note where the
// specialization occurred.
- if (ExplicitSpecLoc.isValid() && HasAnyExplicitSpecHeader)
+ if (ExplicitSpecLoc.isValid() && HasAnyExplicitSpecHeader &&
+ !SuppressDiagnostic)
Diag(ExplicitSpecLoc,
diag::note_explicit_template_spec_does_not_need_header)
<< NestedTypes.back();
@@ -3360,6 +3516,10 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
DTN->getIdentifier(),
TemplateArgs);
+ if (Name.getAsAssumedTemplateName() &&
+ resolveAssumedTemplateNameAsType(/*Scope*/nullptr, Name, TemplateLoc))
+ return QualType();
+
TemplateDecl *Template = Name.getAsTemplateDecl();
if (!Template || isa<FunctionTemplateDecl>(Template) ||
isa<VarTemplateDecl>(Template) || isa<ConceptDecl>(Template)) {
@@ -3399,9 +3559,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists;
TemplateArgLists.addOuterTemplateArguments(&StackTemplateArgs);
- unsigned Depth = AliasTemplate->getTemplateParameters()->getDepth();
- for (unsigned I = 0; I < Depth; ++I)
- TemplateArgLists.addOuterTemplateArguments(None);
+ TemplateArgLists.addOuterRetainedLevels(
+ AliasTemplate->getTemplateParameters()->getDepth());
LocalInstantiationScope Scope(*this);
InstantiatingTemplate Inst(*this, TemplateLoc, Template);
@@ -3707,6 +3866,9 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc) {
+ if (SS.isInvalid())
+ return TypeResult(true);
+
TemplateName Template = TemplateD.get();
// Translate the parser's template argument list in our AST format.
@@ -4044,7 +4206,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
if (isSameAsPrimaryTemplate(VarTemplate->getTemplateParameters(),
Converted) &&
- (!Context.getLangOpts().ConceptsTS ||
+ (!Context.getLangOpts().CPlusPlus20 ||
!TemplateParams->hasAssociatedConstraints())) {
// C++ [temp.class.spec]p9b3:
//
@@ -4510,21 +4672,28 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
}
-/// Form a dependent template name.
+/// Form a template name from a name that is syntactically required to name a
+/// template, either due to use of the 'template' keyword or because a name in
+/// this syntactic context is assumed to name a template (C++ [temp.names]p2-4).
+///
+/// This action forms a template name given the name of the template and its
+/// optional scope specifier. This is used when the 'template' keyword is used
+/// or when the parsing context unambiguously treats a following '<' as
+/// introducing a template argument list. Note that this may produce a
+/// non-dependent template name if we can perform the lookup now and identify
+/// the named template.
///
-/// This action forms a dependent template name given the template
-/// name and its (presumably dependent) scope specifier. For
-/// example, given "MetaFun::template apply", the scope specifier \p
-/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
+/// For example, given "x.MetaFun::template apply", the scope specifier
+/// \p SS will be "MetaFun::", \p TemplateKWLoc contains the location
/// of the "template" keyword, and "apply" is the \p Name.
-TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
- CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- const UnqualifiedId &Name,
- ParsedType ObjectType,
- bool EnteringContext,
- TemplateTy &Result,
- bool AllowInjectedClassName) {
+TemplateNameKind Sema::ActOnTemplateName(Scope *S,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const UnqualifiedId &Name,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ TemplateTy &Result,
+ bool AllowInjectedClassName) {
if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent())
Diag(TemplateKWLoc,
getLangOpts().CPlusPlus11 ?
@@ -4532,95 +4701,115 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
diag::ext_template_outside_of_template)
<< FixItHint::CreateRemoval(TemplateKWLoc);
+ if (SS.isInvalid())
+ return TNK_Non_template;
+
+ // Figure out where isTemplateName is going to look.
DeclContext *LookupCtx = nullptr;
- if (SS.isSet())
+ if (SS.isNotEmpty())
LookupCtx = computeDeclContext(SS, EnteringContext);
- if (!LookupCtx && ObjectType)
- LookupCtx = computeDeclContext(ObjectType.get());
- if (LookupCtx) {
- // C++0x [temp.names]p5:
- // If a name prefixed by the keyword template is not the name of
- // a template, the program is ill-formed. [Note: the keyword
- // template may not be applied to non-template members of class
- // templates. -end note ] [ Note: as is the case with the
- // typename prefix, the template prefix is allowed in cases
- // where it is not strictly necessary; i.e., when the
- // nested-name-specifier or the expression on the left of the ->
- // or . is not dependent on a template-parameter, or the use
- // does not appear in the scope of a template. -end note]
- //
- // Note: C++03 was more strict here, because it banned the use of
- // the "template" keyword prior to a template-name that was not a
- // dependent name. C++ DR468 relaxed this requirement (the
- // "template" keyword is now permitted). We follow the C++0x
- // rules, even in C++03 mode with a warning, retroactively applying the DR.
- bool MemberOfUnknownSpecialization;
- TemplateNameKind TNK = isTemplateName(S, SS, TemplateKWLoc.isValid(), Name,
- ObjectType, EnteringContext, Result,
- MemberOfUnknownSpecialization);
- if (TNK == TNK_Non_template && MemberOfUnknownSpecialization) {
- // This is a dependent template. Handle it below.
- } else if (TNK == TNK_Non_template) {
- // Do the lookup again to determine if this is a "nothing found" case or
- // a "not a template" case. FIXME: Refactor isTemplateName so we don't
- // need to do this.
- DeclarationNameInfo DNI = GetNameFromUnqualifiedId(Name);
- LookupResult R(*this, DNI.getName(), Name.getBeginLoc(),
- LookupOrdinaryName);
- bool MOUS;
- if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext,
- MOUS, TemplateKWLoc) && !R.isAmbiguous())
+ else if (ObjectType)
+ LookupCtx = computeDeclContext(GetTypeFromParser(ObjectType));
+
+ // C++0x [temp.names]p5:
+ // If a name prefixed by the keyword template is not the name of
+ // a template, the program is ill-formed. [Note: the keyword
+ // template may not be applied to non-template members of class
+ // templates. -end note ] [ Note: as is the case with the
+ // typename prefix, the template prefix is allowed in cases
+ // where it is not strictly necessary; i.e., when the
+ // nested-name-specifier or the expression on the left of the ->
+ // or . is not dependent on a template-parameter, or the use
+ // does not appear in the scope of a template. -end note]
+ //
+ // Note: C++03 was more strict here, because it banned the use of
+ // the "template" keyword prior to a template-name that was not a
+ // dependent name. C++ DR468 relaxed this requirement (the
+ // "template" keyword is now permitted). We follow the C++0x
+ // rules, even in C++03 mode with a warning, retroactively applying the DR.
+ bool MemberOfUnknownSpecialization;
+ TemplateNameKind TNK = isTemplateName(S, SS, TemplateKWLoc.isValid(), Name,
+ ObjectType, EnteringContext, Result,
+ MemberOfUnknownSpecialization);
+ if (TNK != TNK_Non_template) {
+ // We resolved this to a (non-dependent) template name. Return it.
+ auto *LookupRD = dyn_cast_or_null<CXXRecordDecl>(LookupCtx);
+ if (!AllowInjectedClassName && SS.isNotEmpty() && LookupRD &&
+ Name.getKind() == UnqualifiedIdKind::IK_Identifier &&
+ Name.Identifier && LookupRD->getIdentifier() == Name.Identifier) {
+ // C++14 [class.qual]p2:
+ // In a lookup in which function names are not ignored and the
+ // nested-name-specifier nominates a class C, if the name specified
+ // [...] is the injected-class-name of C, [...] the name is instead
+ // considered to name the constructor
+ //
+ // We don't get here if naming the constructor would be valid, so we
+ // just reject immediately and recover by treating the
+ // injected-class-name as naming the template.
+ Diag(Name.getBeginLoc(),
+ diag::ext_out_of_line_qualified_id_type_names_constructor)
+ << Name.Identifier
+ << 0 /*injected-class-name used as template name*/
+ << TemplateKWLoc.isValid();
+ }
+ return TNK;
+ }
+
+ if (!MemberOfUnknownSpecialization) {
+ // Didn't find a template name, and the lookup wasn't dependent.
+ // Do the lookup again to determine if this is a "nothing found" case or
+ // a "not a template" case. FIXME: Refactor isTemplateName so we don't
+ // need to do this.
+ DeclarationNameInfo DNI = GetNameFromUnqualifiedId(Name);
+ LookupResult R(*this, DNI.getName(), Name.getBeginLoc(),
+ LookupOrdinaryName);
+ bool MOUS;
+ // Tell LookupTemplateName that we require a template so that it diagnoses
+ // cases where it finds a non-template.
+ RequiredTemplateKind RTK = TemplateKWLoc.isValid()
+ ? RequiredTemplateKind(TemplateKWLoc)
+ : TemplateNameIsRequired;
+ if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext, MOUS,
+ RTK, nullptr, /*AllowTypoCorrection=*/false) &&
+ !R.isAmbiguous()) {
+ if (LookupCtx)
Diag(Name.getBeginLoc(), diag::err_no_member)
<< DNI.getName() << LookupCtx << SS.getRange();
- return TNK_Non_template;
- } else {
- // We found something; return it.
- auto *LookupRD = dyn_cast<CXXRecordDecl>(LookupCtx);
- if (!AllowInjectedClassName && SS.isSet() && LookupRD &&
- Name.getKind() == UnqualifiedIdKind::IK_Identifier &&
- Name.Identifier && LookupRD->getIdentifier() == Name.Identifier) {
- // C++14 [class.qual]p2:
- // In a lookup in which function names are not ignored and the
- // nested-name-specifier nominates a class C, if the name specified
- // [...] is the injected-class-name of C, [...] the name is instead
- // considered to name the constructor
- //
- // We don't get here if naming the constructor would be valid, so we
- // just reject immediately and recover by treating the
- // injected-class-name as naming the template.
- Diag(Name.getBeginLoc(),
- diag::ext_out_of_line_qualified_id_type_names_constructor)
- << Name.Identifier
- << 0 /*injected-class-name used as template name*/
- << 1 /*'template' keyword was used*/;
- }
- return TNK;
+ else
+ Diag(Name.getBeginLoc(), diag::err_undeclared_use)
+ << DNI.getName() << SS.getRange();
}
+ return TNK_Non_template;
}
NestedNameSpecifier *Qualifier = SS.getScopeRep();
switch (Name.getKind()) {
case UnqualifiedIdKind::IK_Identifier:
- Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
- Name.Identifier));
+ Result = TemplateTy::make(
+ Context.getDependentTemplateName(Qualifier, Name.Identifier));
return TNK_Dependent_template_name;
case UnqualifiedIdKind::IK_OperatorFunctionId:
- Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
- Name.OperatorFunctionId.Operator));
+ Result = TemplateTy::make(Context.getDependentTemplateName(
+ Qualifier, Name.OperatorFunctionId.Operator));
return TNK_Function_template;
case UnqualifiedIdKind::IK_LiteralOperatorId:
- llvm_unreachable("literal operator id cannot have a dependent scope");
+ // This is a kind of template name, but can never occur in a dependent
+ // scope (literal operators can only be declared at namespace scope).
+ break;
default:
break;
}
- Diag(Name.getBeginLoc(), diag::err_template_kw_refers_to_non_template)
+ // This name cannot possibly name a dependent template. Diagnose this now
+ // rather than building a dependent template name that can never be valid.
+ Diag(Name.getBeginLoc(),
+ diag::err_template_kw_refers_to_dependent_non_template)
<< GetNameFromUnqualifiedId(Name).getName() << Name.getSourceRange()
- << TemplateKWLoc;
+ << TemplateKWLoc.isValid() << TemplateKWLoc;
return TNK_Non_template;
}
@@ -4655,10 +4844,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
CXXScopeSpec SS;
DeclarationNameInfo NameInfo;
- if (DeclRefExpr *ArgExpr = dyn_cast<DeclRefExpr>(Arg.getAsExpr())) {
- SS.Adopt(ArgExpr->getQualifierLoc());
- NameInfo = ArgExpr->getNameInfo();
- } else if (DependentScopeDeclRefExpr *ArgExpr =
+ if (DependentScopeDeclRefExpr *ArgExpr =
dyn_cast<DependentScopeDeclRefExpr>(Arg.getAsExpr())) {
SS.Adopt(ArgExpr->getQualifierLoc());
NameInfo = ArgExpr->getNameInfo();
@@ -4677,6 +4863,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
if (Result.getAsSingle<TypeDecl>() ||
Result.getResultKind() ==
LookupResult::NotFoundInCurrentInstantiation) {
+ assert(SS.getScopeRep() && "dependent scope expr must has a scope!");
// Suggest that the user add 'typename' before the NNS.
SourceLocation Loc = AL.getSourceRange().getBegin();
Diag(Loc, getLangOpts().MSVCCompat
@@ -5716,6 +5903,11 @@ bool UnnamedLocalNoLinkageFinder::VisitDependentSizedExtVectorType(
return Visit(T->getElementType());
}
+bool UnnamedLocalNoLinkageFinder::VisitDependentSizedMatrixType(
+ const DependentSizedMatrixType *T) {
+ return Visit(T->getElementType());
+}
+
bool UnnamedLocalNoLinkageFinder::VisitDependentAddressSpaceType(
const DependentAddressSpaceType *T) {
return Visit(T->getPointeeType());
@@ -5734,6 +5926,11 @@ bool UnnamedLocalNoLinkageFinder::VisitExtVectorType(const ExtVectorType* T) {
return Visit(T->getElementType());
}
+bool UnnamedLocalNoLinkageFinder::VisitConstantMatrixType(
+ const ConstantMatrixType *T) {
+ return Visit(T->getElementType());
+}
+
bool UnnamedLocalNoLinkageFinder::VisitFunctionProtoType(
const FunctionProtoType* T) {
for (const auto &A : T->param_types()) {
@@ -5815,7 +6012,9 @@ bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType* T) {
- return VisitNestedNameSpecifier(T->getQualifier());
+ if (auto *Q = T->getQualifier())
+ return VisitNestedNameSpecifier(Q);
+ return false;
}
bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType(
@@ -5845,6 +6044,15 @@ bool UnnamedLocalNoLinkageFinder::VisitPipeType(const PipeType* T) {
return false;
}
+bool UnnamedLocalNoLinkageFinder::VisitExtIntType(const ExtIntType *T) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentExtIntType(
+ const DependentExtIntType *T) {
+ return false;
+}
+
bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
if (Tag->getDeclContext()->isFunctionOrMethod()) {
S.Diag(SR.getBegin(),
@@ -5869,6 +6077,7 @@ bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
NestedNameSpecifier *NNS) {
+ assert(NNS);
if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix()))
return true;
@@ -6177,8 +6386,11 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
Arg = subst->getReplacement()->IgnoreImpCasts();
}
- DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg);
- ValueDecl *Entity = DRE ? DRE->getDecl() : nullptr;
+ ValueDecl *Entity = nullptr;
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg))
+ Entity = DRE->getDecl();
+ else if (CXXUuidofExpr *CUE = dyn_cast<CXXUuidofExpr>(Arg))
+ Entity = CUE->getGuidDecl();
// If our parameter has pointer type, check for a null template value.
if (ParamType->isPointerType() || ParamType->isNullPtrType()) {
@@ -6205,16 +6417,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return false;
}
- if (isa<CXXUuidofExpr>(Arg)) {
- if (CheckTemplateArgumentIsCompatibleWithParameter(S, Param, ParamType,
- ArgIn, Arg, ArgType))
- return true;
-
- Converted = TemplateArgument(ArgIn);
- return false;
- }
-
- if (!DRE) {
+ if (!Entity) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
@@ -6241,13 +6444,14 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
FunctionDecl *Func = dyn_cast<FunctionDecl>(Entity);
VarDecl *Var = dyn_cast<VarDecl>(Entity);
+ MSGuidDecl *Guid = dyn_cast<MSGuidDecl>(Entity);
// A non-type template argument must refer to an object or function.
- if (!Func && !Var) {
+ if (!Func && !Var && !Guid) {
// We found something, but we don't know specifically what it is.
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_object_or_func)
<< Arg->getSourceRange();
- S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
+ S.Diag(Entity->getLocation(), diag::note_template_arg_refers_here);
return true;
}
@@ -6268,30 +6472,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return true;
}
- if (Func) {
- // If the template parameter has pointer type, the function decays.
- if (ParamType->isPointerType() && !AddressTaken)
- ArgType = S.Context.getPointerType(Func->getType());
- else if (AddressTaken && ParamType->isReferenceType()) {
- // If we originally had an address-of operator, but the
- // parameter has reference type, complain and (if things look
- // like they will work) drop the address-of operator.
- if (!S.Context.hasSameUnqualifiedType(Func->getType(),
- ParamType.getNonReferenceType())) {
- S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
- << ParamType;
- S.Diag(Param->getLocation(), diag::note_template_param_here);
- return true;
- }
-
- S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
- << ParamType
- << FixItHint::CreateRemoval(AddrOpLoc);
- S.Diag(Param->getLocation(), diag::note_template_param_here);
-
- ArgType = Func->getType();
- }
- } else {
+ if (Var) {
// A value of reference type is not an object.
if (Var->getType()->isReferenceType()) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_reference_var)
@@ -6307,50 +6488,53 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
S.Diag(Var->getLocation(), diag::note_template_arg_refers_here);
return true;
}
+ }
- // If the template parameter has pointer type, we must have taken
- // the address of this object.
- if (ParamType->isReferenceType()) {
- if (AddressTaken) {
- // If we originally had an address-of operator, but the
- // parameter has reference type, complain and (if things look
- // like they will work) drop the address-of operator.
- if (!S.Context.hasSameUnqualifiedType(Var->getType(),
- ParamType.getNonReferenceType())) {
- S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
- << ParamType;
- S.Diag(Param->getLocation(), diag::note_template_param_here);
- return true;
- }
+ if (AddressTaken && ParamType->isReferenceType()) {
+ // If we originally had an address-of operator, but the
+ // parameter has reference type, complain and (if things look
+ // like they will work) drop the address-of operator.
+ if (!S.Context.hasSameUnqualifiedType(Entity->getType(),
+ ParamType.getNonReferenceType())) {
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType;
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
- S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
- << ParamType
- << FixItHint::CreateRemoval(AddrOpLoc);
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType
+ << FixItHint::CreateRemoval(AddrOpLoc);
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
- ArgType = Var->getType();
- }
- } else if (!AddressTaken && ParamType->isPointerType()) {
- if (Var->getType()->isArrayType()) {
- // Array-to-pointer decay.
- ArgType = S.Context.getArrayDecayedType(Var->getType());
- } else {
- // If the template parameter has pointer type but the address of
- // this object was not taken, complain and (possibly) recover by
- // taking the address of the entity.
- ArgType = S.Context.getPointerType(Var->getType());
- if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) {
- S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
- << ParamType;
- S.Diag(Param->getLocation(), diag::note_template_param_here);
- return true;
- }
+ ArgType = Entity->getType();
+ }
+ // If the template parameter has pointer type, either we must have taken the
+ // address or the argument must decay to a pointer.
+ if (!AddressTaken && ParamType->isPointerType()) {
+ if (Func) {
+ // Function-to-pointer decay.
+ ArgType = S.Context.getPointerType(Func->getType());
+ } else if (Entity->getType()->isArrayType()) {
+ // Array-to-pointer decay.
+ ArgType = S.Context.getArrayDecayedType(Entity->getType());
+ } else {
+ // If the template parameter has pointer type but the address of
+ // this object was not taken, complain and (possibly) recover by
+ // taking the address of the entity.
+ ArgType = S.Context.getPointerType(Entity->getType());
+ if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
- << ParamType << FixItHint::CreateInsertion(Arg->getBeginLoc(), "&");
-
+ << ParamType;
S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
}
+
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
+ << ParamType << FixItHint::CreateInsertion(Arg->getBeginLoc(), "&");
+
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
}
}
@@ -6530,7 +6714,12 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
DeductionArg = PE->getPattern();
if (DeduceAutoType(
Context.getTrivialTypeSourceInfo(ParamType, Param->getLocation()),
- DeductionArg, ParamType, Depth) == DAR_Failed) {
+ DeductionArg, ParamType, Depth,
+ // We do not check constraints right now because the
+ // immediately-declared constraint of the auto type is also an
+ // associated constraint, and will be checked along with the other
+ // associated constraints after checking the template argument list.
+ /*IgnoreConstraints=*/true) == DAR_Failed) {
Diag(Arg->getExprLoc(),
diag::err_non_type_template_parm_type_deduction_failure)
<< Param->getDeclName() << Param->getType() << Arg->getType()
@@ -6670,12 +6859,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// -- a predefined __func__ variable
APValue::LValueBase Base = Value.getLValueBase();
auto *VD = const_cast<ValueDecl *>(Base.dyn_cast<const ValueDecl *>());
- if (Base && !VD) {
- auto *E = Base.dyn_cast<const Expr *>();
- if (E && isa<CXXUuidofExpr>(E)) {
- Converted = TemplateArgument(ArgResult.get()->IgnoreImpCasts());
- break;
- }
+ if (Base && (!VD || isa<LifetimeExtendedTemporaryDecl>(VD))) {
Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
return ExprError();
@@ -6762,7 +6946,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType IntegerType = ParamType;
if (const EnumType *Enum = IntegerType->getAs<EnumType>())
IntegerType = Enum->getDecl()->getIntegerType();
- Value = Value.extOrTrunc(Context.getTypeSize(IntegerType));
+ Value = Value.extOrTrunc(IntegerType->isExtIntType()
+ ? Context.getIntWidth(IntegerType)
+ : Context.getTypeSize(IntegerType));
Converted = TemplateArgument(Context, Value,
Context.getCanonicalType(ParamType));
@@ -6856,7 +7042,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// Coerce the template argument's value to the value it will have
// based on the template parameter's type.
- unsigned AllowedBits = Context.getTypeSize(IntegerType);
+ unsigned AllowedBits = IntegerType->isExtIntType()
+ ? Context.getIntWidth(IntegerType)
+ : Context.getTypeSize(IntegerType);
if (Value.getBitWidth() != AllowedBits)
Value = Value.extOrTrunc(AllowedBits);
Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
@@ -7102,6 +7290,11 @@ bool Sema::CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
// [temp.constr.order].
SmallVector<const Expr *, 3> ParamsAC, TemplateAC;
Params->getAssociatedConstraints(ParamsAC);
+ // C++2a[temp.arg.template]p3
+ // [...] In this comparison, if P is unconstrained, the constraints on A
+ // are not considered.
+ if (ParamsAC.empty())
+ return false;
Template->getAssociatedConstraints(TemplateAC);
bool IsParamAtLeastAsConstrained;
if (IsAtLeastAsConstrained(Param, ParamsAC, Template, TemplateAC,
@@ -7872,13 +8065,11 @@ bool Sema::CheckTemplatePartialSpecializationArgs(
DeclResult Sema::ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
- SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
- const ParsedAttributesView &Attr,
+ SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
+ TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody) {
assert(TUK != TUK_Reference && "References are not specializations");
- CXXScopeSpec &SS = TemplateId.SS;
-
// NOTE: KWLoc is the location of the tag keyword. This will instead
// store the location of the outermost template keyword in the declaration.
SourceLocation TemplateKWLoc = TemplateParameterLists.size() > 0
@@ -8048,7 +8239,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (Context.hasSameType(CanonType,
ClassTemplate->getInjectedClassNameSpecialization()) &&
- (!Context.getLangOpts().ConceptsTS ||
+ (!Context.getLangOpts().CPlusPlus20 ||
!TemplateParams->hasAssociatedConstraints())) {
// C++ [temp.class.spec]p9b3:
//
@@ -8274,7 +8465,7 @@ Decl *Sema::ActOnConceptDefinition(Scope *S,
ConceptDecl *NewDecl = ConceptDecl::Create(Context, DC, NameLoc, Name,
TemplateParameterLists.front(),
ConstraintExpr);
-
+
if (NewDecl->hasAssociatedConstraints()) {
// C++2a [temp.concept]p4:
// A concept shall not have associated constraints.
@@ -10012,24 +10203,12 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
<< FixItHint::CreateRemoval(TypenameLoc);
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
+ TypeSourceInfo *TSI = nullptr;
QualType T = CheckTypenameType(TypenameLoc.isValid()? ETK_Typename : ETK_None,
- TypenameLoc, QualifierLoc, II, IdLoc);
+ TypenameLoc, QualifierLoc, II, IdLoc, &TSI,
+ /*DeducedTSTContext=*/true);
if (T.isNull())
return true;
-
- TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
- if (isa<DependentNameType>(T)) {
- DependentNameTypeLoc TL = TSI->getTypeLoc().castAs<DependentNameTypeLoc>();
- TL.setElaboratedKeywordLoc(TypenameLoc);
- TL.setQualifierLoc(QualifierLoc);
- TL.setNameLoc(IdLoc);
- } else {
- ElaboratedTypeLoc TL = TSI->getTypeLoc().castAs<ElaboratedTypeLoc>();
- TL.setElaboratedKeywordLoc(TypenameLoc);
- TL.setQualifierLoc(QualifierLoc);
- TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc);
- }
-
return CreateParsedType(T, TSI);
}
@@ -10166,6 +10345,35 @@ static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
return true;
}
+QualType
+Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
+ SourceLocation KeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const IdentifierInfo &II,
+ SourceLocation IILoc,
+ TypeSourceInfo **TSI,
+ bool DeducedTSTContext) {
+ QualType T = CheckTypenameType(Keyword, KeywordLoc, QualifierLoc, II, IILoc,
+ DeducedTSTContext);
+ if (T.isNull())
+ return QualType();
+
+ *TSI = Context.CreateTypeSourceInfo(T);
+ if (isa<DependentNameType>(T)) {
+ DependentNameTypeLoc TL =
+ (*TSI)->getTypeLoc().castAs<DependentNameTypeLoc>();
+ TL.setElaboratedKeywordLoc(KeywordLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.setNameLoc(IILoc);
+ } else {
+ ElaboratedTypeLoc TL = (*TSI)->getTypeLoc().castAs<ElaboratedTypeLoc>();
+ TL.setElaboratedKeywordLoc(KeywordLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IILoc);
+ }
+ return T;
+}
+
/// Build the type that describes a C++ typename specifier,
/// e.g., "typename T::type".
QualType
@@ -10173,32 +10381,38 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
- SourceLocation IILoc) {
+ SourceLocation IILoc, bool DeducedTSTContext) {
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
- DeclContext *Ctx = computeDeclContext(SS);
- if (!Ctx) {
- // If the nested-name-specifier is dependent and couldn't be
- // resolved to a type, build a typename type.
- assert(QualifierLoc.getNestedNameSpecifier()->isDependent());
- return Context.getDependentNameType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
- &II);
+ DeclContext *Ctx = nullptr;
+ if (QualifierLoc) {
+ Ctx = computeDeclContext(SS);
+ if (!Ctx) {
+ // If the nested-name-specifier is dependent and couldn't be
+ // resolved to a type, build a typename type.
+ assert(QualifierLoc.getNestedNameSpecifier()->isDependent());
+ return Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ &II);
+ }
+
+ // If the nested-name-specifier refers to the current instantiation,
+ // the "typename" keyword itself is superfluous. In C++03, the
+ // program is actually ill-formed. However, DR 382 (in C++0x CD1)
+ // allows such extraneous "typename" keywords, and we retroactively
+ // apply this DR to C++03 code with only a warning. In any case we continue.
+
+ if (RequireCompleteDeclContext(SS, Ctx))
+ return QualType();
}
- // If the nested-name-specifier refers to the current instantiation,
- // the "typename" keyword itself is superfluous. In C++03, the
- // program is actually ill-formed. However, DR 382 (in C++0x CD1)
- // allows such extraneous "typename" keywords, and we retroactively
- // apply this DR to C++03 code with only a warning. In any case we continue.
-
- if (RequireCompleteDeclContext(SS, Ctx))
- return QualType();
-
DeclarationName Name(&II);
LookupResult Result(*this, Name, IILoc, LookupOrdinaryName);
- LookupQualifiedName(Result, Ctx, SS);
+ if (Ctx)
+ LookupQualifiedName(Result, Ctx, SS);
+ else
+ LookupName(Result, CurScope);
unsigned DiagID = 0;
Decl *Referenced = nullptr;
switch (Result.getResultKind()) {
@@ -10207,7 +10421,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
// a more specific diagnostic.
SourceRange CondRange;
Expr *Cond = nullptr;
- if (isEnableIf(QualifierLoc, II, CondRange, Cond)) {
+ if (Ctx && isEnableIf(QualifierLoc, II, CondRange, Cond)) {
// If we have a condition, narrow it down to the specific failed
// condition.
if (Cond) {
@@ -10223,12 +10437,14 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
return QualType();
}
- Diag(CondRange.getBegin(), diag::err_typename_nested_not_found_enable_if)
+ Diag(CondRange.getBegin(),
+ diag::err_typename_nested_not_found_enable_if)
<< Ctx << CondRange;
return QualType();
}
- DiagID = diag::err_typename_nested_not_found;
+ DiagID = Ctx ? diag::err_typename_nested_not_found
+ : diag::err_unknown_typename;
break;
}
@@ -10294,6 +10510,19 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
// is a placeholder for a deduced class type [...].
if (getLangOpts().CPlusPlus17) {
if (auto *TD = getAsTypeTemplateDecl(Result.getFoundDecl())) {
+ if (!DeducedTSTContext) {
+ QualType T(QualifierLoc
+ ? QualifierLoc.getNestedNameSpecifier()->getAsType()
+ : nullptr, 0);
+ if (!T.isNull())
+ Diag(IILoc, diag::err_dependent_deduced_tst)
+ << (int)getTemplateNameKindForDiagnostics(TemplateName(TD)) << T;
+ else
+ Diag(IILoc, diag::err_deduced_tst)
+ << (int)getTemplateNameKindForDiagnostics(TemplateName(TD));
+ Diag(TD->getLocation(), diag::note_template_decl_here);
+ return QualType();
+ }
return Context.getElaboratedType(
Keyword, QualifierLoc.getNestedNameSpecifier(),
Context.getDeducedTemplateSpecializationType(TemplateName(TD),
@@ -10301,12 +10530,14 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
}
}
- DiagID = diag::err_typename_nested_not_type;
+ DiagID = Ctx ? diag::err_typename_nested_not_type
+ : diag::err_typename_not_type;
Referenced = Result.getFoundDecl();
break;
case LookupResult::FoundOverloaded:
- DiagID = diag::err_typename_nested_not_type;
+ DiagID = Ctx ? diag::err_typename_nested_not_type
+ : diag::err_typename_not_type;
Referenced = *Result.begin();
break;
@@ -10318,9 +10549,14 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
// type. Emit an appropriate diagnostic and return an error.
SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : SS.getBeginLoc(),
IILoc);
- Diag(IILoc, DiagID) << FullRange << Name << Ctx;
+ if (Ctx)
+ Diag(IILoc, DiagID) << FullRange << Name << Ctx;
+ else
+ Diag(IILoc, DiagID) << FullRange << Name;
if (Referenced)
- Diag(Referenced->getLocation(), diag::note_typename_refers_here)
+ Diag(Referenced->getLocation(),
+ Ctx ? diag::note_typename_member_refers_here
+ : diag::note_typename_refers_here)
<< Name;
return QualType();
}
@@ -10515,7 +10751,7 @@ Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
}
Out << ']';
- return Out.str();
+ return std::string(Out.str());
}
void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index 1b9f1b2144d1..5392be57a3aa 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -355,7 +355,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
TemplateArgument Merged = checkDeducedTemplateArguments(
Context, DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()));
- if (Merged.isNull())
+ if (Merged.isNull() && !(XA->isNull() && YA->isNull()))
return DeducedTemplateArgument();
NewPack.push_back(Merged);
}
@@ -724,38 +724,49 @@ private:
// Compute the set of template parameter indices that correspond to
// parameter packs expanded by the pack expansion.
llvm::SmallBitVector SawIndices(TemplateParams->size());
+ llvm::SmallVector<TemplateArgument, 4> ExtraDeductions;
auto AddPack = [&](unsigned Index) {
if (SawIndices[Index])
return;
SawIndices[Index] = true;
addPack(Index);
+
+ // Deducing a parameter pack that is a pack expansion also constrains the
+ // packs appearing in that parameter to have the same deduced arity. Also,
+ // in C++17 onwards, deducing a non-type template parameter deduces its
+ // type, so we need to collect the pending deduced values for those packs.
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(
+ TemplateParams->getParam(Index))) {
+ if (!NTTP->isExpandedParameterPack())
+ if (auto *Expansion = dyn_cast<PackExpansionType>(NTTP->getType()))
+ ExtraDeductions.push_back(Expansion->getPattern());
+ }
+ // FIXME: Also collect the unexpanded packs in any type and template
+ // parameter packs that are pack expansions.
};
- // First look for unexpanded packs in the pattern.
- SmallVector<UnexpandedParameterPack, 2> Unexpanded;
- S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
- for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
- unsigned Depth, Index;
- std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
- if (Depth == Info.getDeducedDepth())
- AddPack(Index);
- }
+ auto Collect = [&](TemplateArgument Pattern) {
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ unsigned Depth, Index;
+ std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (Depth == Info.getDeducedDepth())
+ AddPack(Index);
+ }
+ };
+
+ // Look for unexpanded packs in the pattern.
+ Collect(Pattern);
assert(!Packs.empty() && "Pack expansion without unexpanded packs?");
unsigned NumNamedPacks = Packs.size();
- // We can also have deduced template parameters that do not actually
- // appear in the pattern, but can be deduced by it (the type of a non-type
- // template parameter pack, in particular). These won't have prevented us
- // from partially expanding the pack.
- llvm::SmallBitVector Used(TemplateParams->size());
- MarkUsedTemplateParameters(S.Context, Pattern, /*OnlyDeduced*/true,
- Info.getDeducedDepth(), Used);
- for (int Index = Used.find_first(); Index != -1;
- Index = Used.find_next(Index))
- if (TemplateParams->getParam(Index)->isParameterPack())
- AddPack(Index);
+ // Also look for unexpanded packs that are indirectly deduced by deducing
+ // the sizes of the packs in this pattern.
+ while (!ExtraDeductions.empty())
+ Collect(ExtraDeductions.pop_back_val());
return NumNamedPacks;
}
@@ -1505,6 +1516,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
+ case Type::ExtInt:
if (TDF & TDF_SkipNonDependent)
return Sema::TDK_Success;
@@ -1808,7 +1820,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// If this is a base class, try to perform template argument
// deduction from it.
if (NextT != RecordT) {
- TemplateDeductionInfo BaseInfo(Info.getLocation());
+ TemplateDeductionInfo BaseInfo(TemplateDeductionInfo::ForBase, Info);
Sema::TemplateDeductionResult BaseResult =
DeduceTemplateArguments(S, TemplateParams, SpecParam,
QualType(NextT, 0), BaseInfo, Deduced);
@@ -2046,6 +2058,101 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// (clang extension)
//
+ // T __attribute__((matrix_type(<integral constant>,
+ // <integral constant>)))
+ case Type::ConstantMatrix: {
+ const ConstantMatrixType *MatrixArg = dyn_cast<ConstantMatrixType>(Arg);
+ if (!MatrixArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ const ConstantMatrixType *MatrixParam = cast<ConstantMatrixType>(Param);
+ // Check that the dimensions are the same
+ if (MatrixParam->getNumRows() != MatrixArg->getNumRows() ||
+ MatrixParam->getNumColumns() != MatrixArg->getNumColumns()) {
+ return Sema::TDK_NonDeducedMismatch;
+ }
+ // Perform deduction on element types.
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, MatrixParam->getElementType(),
+ MatrixArg->getElementType(), Info, Deduced, TDF);
+ }
+
+ case Type::DependentSizedMatrix: {
+ const MatrixType *MatrixArg = dyn_cast<MatrixType>(Arg);
+ if (!MatrixArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ // Check the element type of the matrixes.
+ const DependentSizedMatrixType *MatrixParam =
+ cast<DependentSizedMatrixType>(Param);
+ if (Sema::TemplateDeductionResult Result =
+ DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, MatrixParam->getElementType(),
+ MatrixArg->getElementType(), Info, Deduced, TDF))
+ return Result;
+
+ // Try to deduce a matrix dimension.
+ auto DeduceMatrixArg =
+ [&S, &Info, &Deduced, &TemplateParams](
+ Expr *ParamExpr, const MatrixType *Arg,
+ unsigned (ConstantMatrixType::*GetArgDimension)() const,
+ Expr *(DependentSizedMatrixType::*GetArgDimensionExpr)() const) {
+ const auto *ArgConstMatrix = dyn_cast<ConstantMatrixType>(Arg);
+ const auto *ArgDepMatrix = dyn_cast<DependentSizedMatrixType>(Arg);
+ if (!ParamExpr->isValueDependent()) {
+ llvm::APSInt ParamConst(
+ S.Context.getTypeSize(S.Context.getSizeType()));
+ if (!ParamExpr->isIntegerConstantExpr(ParamConst, S.Context))
+ return Sema::TDK_NonDeducedMismatch;
+
+ if (ArgConstMatrix) {
+ if ((ArgConstMatrix->*GetArgDimension)() == ParamConst)
+ return Sema::TDK_Success;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ Expr *ArgExpr = (ArgDepMatrix->*GetArgDimensionExpr)();
+ llvm::APSInt ArgConst(
+ S.Context.getTypeSize(S.Context.getSizeType()));
+ if (!ArgExpr->isValueDependent() &&
+ ArgExpr->isIntegerConstantExpr(ArgConst, S.Context) &&
+ ArgConst == ParamConst)
+ return Sema::TDK_Success;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, ParamExpr);
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ if (ArgConstMatrix) {
+ llvm::APSInt ArgConst(
+ S.Context.getTypeSize(S.Context.getSizeType()));
+ ArgConst = (ArgConstMatrix->*GetArgDimension)();
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, ArgConst, S.Context.getSizeType(),
+ /*ArrayBound=*/true, Info, Deduced);
+ }
+
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, (ArgDepMatrix->*GetArgDimensionExpr)(),
+ Info, Deduced);
+ };
+
+ auto Result = DeduceMatrixArg(MatrixParam->getRowExpr(), MatrixArg,
+ &ConstantMatrixType::getNumRows,
+ &DependentSizedMatrixType::getRowExpr);
+ if (Result)
+ return Result;
+
+ return DeduceMatrixArg(MatrixParam->getColumnExpr(), MatrixArg,
+ &ConstantMatrixType::getNumColumns,
+ &DependentSizedMatrixType::getColumnExpr);
+ }
+
+ // (clang extension)
+ //
// T __attribute__(((address_space(N))))
case Type::DependentAddressSpace: {
const DependentAddressSpaceType *AddressSpaceParam =
@@ -2096,6 +2203,33 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Sema::TDK_NonDeducedMismatch;
}
+ case Type::DependentExtInt: {
+ const auto *IntParam = cast<DependentExtIntType>(Param);
+
+ if (const auto *IntArg = dyn_cast<ExtIntType>(Arg)){
+ if (IntParam->isUnsigned() != IntArg->isUnsigned())
+ return Sema::TDK_NonDeducedMismatch;
+
+ NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, IntParam->getNumBitsExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
+ ArgSize = IntArg->getNumBits();
+
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, ArgSize,
+ S.Context.IntTy, true, Info,
+ Deduced);
+ }
+
+ if (const auto *IntArg = dyn_cast<DependentExtIntType>(Arg)) {
+ if (IntParam->isUnsigned() != IntArg->isUnsigned())
+ return Sema::TDK_NonDeducedMismatch;
+ return Sema::TDK_Success;
+ }
+ return Sema::TDK_NonDeducedMismatch;
+ }
case Type::TypeOfExpr:
case Type::TypeOf:
@@ -2478,7 +2612,7 @@ Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion: {
NestedNameSpecifierLocBuilder Builder;
- TemplateName Template = Arg.getAsTemplate();
+ TemplateName Template = Arg.getAsTemplateOrTemplatePattern();
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
Builder.MakeTrivial(Context, DTN->getQualifier(), Loc);
else if (QualifiedTemplateName *QTN =
@@ -2504,27 +2638,10 @@ Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
}
TemplateArgumentLoc
-Sema::getIdentityTemplateArgumentLoc(Decl *TemplateParm,
+Sema::getIdentityTemplateArgumentLoc(NamedDecl *TemplateParm,
SourceLocation Location) {
- if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(TemplateParm))
- return getTrivialTemplateArgumentLoc(
- TemplateArgument(
- Context.getTemplateTypeParmType(TTP->getDepth(), TTP->getIndex(),
- TTP->isParameterPack(), TTP)),
- QualType(), Location.isValid() ? Location : TTP->getLocation());
- else if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(TemplateParm))
- return getTrivialTemplateArgumentLoc(TemplateArgument(TemplateName(TTP)),
- QualType(),
- Location.isValid() ? Location :
- TTP->getLocation());
- auto *NTTP = cast<NonTypeTemplateParmDecl>(TemplateParm);
- CXXScopeSpec SS;
- DeclarationNameInfo Info(NTTP->getDeclName(),
- Location.isValid() ? Location : NTTP->getLocation());
- Expr *E = BuildDeclarationNameExpr(SS, Info, NTTP).get();
- return getTrivialTemplateArgumentLoc(TemplateArgument(E), NTTP->getType(),
- Location.isValid() ? Location :
- NTTP->getLocation());
+ return getTrivialTemplateArgumentLoc(
+ Context.getInjectedTemplateArg(TemplateParm), QualType(), Location);
}
/// Convert the given deduced template argument and add it to the set of
@@ -2754,8 +2871,8 @@ CheckDeducedArgumentConstraints(Sema& S, TemplateDeclT *Template,
/// Complete template argument deduction for a partial specialization.
template <typename T>
-static typename std::enable_if<IsPartialSpecialization<T>::value,
- Sema::TemplateDeductionResult>::type
+static std::enable_if_t<IsPartialSpecialization<T>::value,
+ Sema::TemplateDeductionResult>
FinishTemplateArgumentDeduction(
Sema &S, T *Partial, bool IsPartialOrdering,
const TemplateArgumentList &TemplateArgs,
@@ -2924,8 +3041,13 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
if (Trap.hasErrorOccurred())
return Sema::TDK_SubstitutionFailure;
- return ::FinishTemplateArgumentDeduction(
- *this, Partial, /*IsPartialOrdering=*/false, TemplateArgs, Deduced, Info);
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = ::FinishTemplateArgumentDeduction(*this, Partial,
+ /*IsPartialOrdering=*/false,
+ TemplateArgs, Deduced, Info);
+ });
+ return Result;
}
/// Perform template argument deduction to determine whether
@@ -2965,8 +3087,13 @@ Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
if (Trap.hasErrorOccurred())
return Sema::TDK_SubstitutionFailure;
- return ::FinishTemplateArgumentDeduction(
- *this, Partial, /*IsPartialOrdering=*/false, TemplateArgs, Deduced, Info);
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = ::FinishTemplateArgumentDeduction(*this, Partial,
+ /*IsPartialOrdering=*/false,
+ TemplateArgs, Deduced, Info);
+ });
+ return Result;
}
/// Determine whether the given type T is a simple-template-id type.
@@ -3446,13 +3573,16 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
// ([temp.constr.decl]), those constraints are checked for satisfaction
// ([temp.constr.constr]). If the constraints are not satisfied, type
// deduction fails.
- if (CheckInstantiatedFunctionTemplateConstraints(Info.getLocation(),
- Specialization, Builder, Info.AssociatedConstraintsSatisfaction))
- return TDK_MiscellaneousDeductionFailure;
+ if (!PartialOverloading ||
+ (Builder.size() == FunctionTemplate->getTemplateParameters()->size())) {
+ if (CheckInstantiatedFunctionTemplateConstraints(Info.getLocation(),
+ Specialization, Builder, Info.AssociatedConstraintsSatisfaction))
+ return TDK_MiscellaneousDeductionFailure;
- if (!Info.AssociatedConstraintsSatisfaction.IsSatisfied) {
- Info.reset(TemplateArgumentList::CreateCopy(Context, Builder));
- return TDK_ConstraintsNotSatisfied;
+ if (!Info.AssociatedConstraintsSatisfaction.IsSatisfied) {
+ Info.reset(TemplateArgumentList::CreateCopy(Context, Builder));
+ return TDK_ConstraintsNotSatisfied;
+ }
}
if (OriginalCallArgs) {
@@ -3912,13 +4042,12 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
SmallVector<QualType, 8> ParamTypes;
unsigned NumExplicitlySpecified = 0;
if (ExplicitTemplateArgs) {
- TemplateDeductionResult Result =
- SubstituteExplicitTemplateArguments(FunctionTemplate,
- *ExplicitTemplateArgs,
- Deduced,
- ParamTypes,
- nullptr,
- Info);
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = SubstituteExplicitTemplateArguments(
+ FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes, nullptr,
+ Info);
+ });
if (Result)
return Result;
@@ -4020,12 +4149,16 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// that is needed when the accessibility of template arguments is checked.
DeclContext *CallingCtx = CurContext;
- return FinishTemplateArgumentDeduction(
- FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, Info,
- &OriginalCallArgs, PartialOverloading, [&, CallingCtx]() {
- ContextRAII SavedContext(*this, CallingCtx);
- return CheckNonDependent(ParamTypesForArgChecking);
- });
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = FinishTemplateArgumentDeduction(
+ FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, Info,
+ &OriginalCallArgs, PartialOverloading, [&, CallingCtx]() {
+ ContextRAII SavedContext(*this, CallingCtx);
+ return CheckNonDependent(ParamTypesForArgChecking);
+ });
+ });
+ return Result;
}
QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType,
@@ -4111,11 +4244,13 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
unsigned NumExplicitlySpecified = 0;
SmallVector<QualType, 4> ParamTypes;
if (ExplicitTemplateArgs) {
- if (TemplateDeductionResult Result
- = SubstituteExplicitTemplateArguments(FunctionTemplate,
- *ExplicitTemplateArgs,
- Deduced, ParamTypes,
- &FunctionType, Info))
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = SubstituteExplicitTemplateArguments(
+ FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes,
+ &FunctionType, Info);
+ });
+ if (Result)
return Result;
NumExplicitlySpecified = Deduced.size();
@@ -4157,10 +4292,13 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
return Result;
}
- if (TemplateDeductionResult Result
- = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
- NumExplicitlySpecified,
- Specialization, Info))
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
+ NumExplicitlySpecified,
+ Specialization, Info);
+ });
+ if (Result)
return Result;
// If the function has a deduced return type, deduce it now, so we can check
@@ -4317,9 +4455,11 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
LocalInstantiationScope InstScope(*this);
// Finish template argument deduction.
FunctionDecl *ConversionSpecialized = nullptr;
- TemplateDeductionResult Result
- = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0,
- ConversionSpecialized, Info);
+ TemplateDeductionResult Result;
+ runWithSufficientStackSpace(Info.getLocation(), [&] {
+ Result = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0,
+ ConversionSpecialized, Info);
+ });
Specialization = cast_or_null<CXXConversionDecl>(ConversionSpecialized);
return Result;
}
@@ -4404,9 +4544,10 @@ namespace {
QualType Result = SemaRef.Context.getAutoType(
Replacement, TL.getTypePtr()->getKeyword(), Replacement.isNull(),
- ReplacementIsPack);
+ ReplacementIsPack, TL.getTypePtr()->getTypeConstraintConcept(),
+ TL.getTypePtr()->getTypeConstraintArguments());
auto NewTL = TLB.push<AutoTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
+ NewTL.copy(TL);
return Result;
}
@@ -4441,9 +4582,10 @@ namespace {
Sema::DeduceAutoResult
Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, QualType &Result,
- Optional<unsigned> DependentDeductionDepth) {
+ Optional<unsigned> DependentDeductionDepth,
+ bool IgnoreConstraints) {
return DeduceAutoType(Type->getTypeLoc(), Init, Result,
- DependentDeductionDepth);
+ DependentDeductionDepth, IgnoreConstraints);
}
/// Attempt to produce an informative diagostic explaining why auto deduction
@@ -4471,6 +4613,49 @@ static bool diagnoseAutoDeductionFailure(Sema &S,
}
}
+static Sema::DeduceAutoResult
+CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
+ AutoTypeLoc TypeLoc, QualType Deduced) {
+ ConstraintSatisfaction Satisfaction;
+ ConceptDecl *Concept = Type.getTypeConstraintConcept();
+ TemplateArgumentListInfo TemplateArgs(TypeLoc.getLAngleLoc(),
+ TypeLoc.getRAngleLoc());
+ TemplateArgs.addArgument(
+ TemplateArgumentLoc(TemplateArgument(Deduced),
+ S.Context.getTrivialTypeSourceInfo(
+ Deduced, TypeLoc.getNameLoc())));
+ for (unsigned I = 0, C = TypeLoc.getNumArgs(); I != C; ++I)
+ TemplateArgs.addArgument(TypeLoc.getArgLoc(I));
+
+ llvm::SmallVector<TemplateArgument, 4> Converted;
+ if (S.CheckTemplateArgumentList(Concept, SourceLocation(), TemplateArgs,
+ /*PartialTemplateArgs=*/false, Converted))
+ return Sema::DAR_FailedAlreadyDiagnosed;
+ if (S.CheckConstraintSatisfaction(Concept, {Concept->getConstraintExpr()},
+ Converted, TypeLoc.getLocalSourceRange(),
+ Satisfaction))
+ return Sema::DAR_FailedAlreadyDiagnosed;
+ if (!Satisfaction.IsSatisfied) {
+ std::string Buf;
+ llvm::raw_string_ostream OS(Buf);
+ OS << "'" << Concept->getName();
+ if (TypeLoc.hasExplicitTemplateArgs()) {
+ OS << "<";
+ for (const auto &Arg : Type.getTypeConstraintArguments())
+ Arg.print(S.getPrintingPolicy(), OS);
+ OS << ">";
+ }
+ OS << "'";
+ OS.flush();
+ S.Diag(TypeLoc.getConceptNameLoc(),
+ diag::err_placeholder_constraints_not_satisfied)
+ << Deduced << Buf << TypeLoc.getLocalSourceRange();
+ S.DiagnoseUnsatisfiedConstraint(Satisfaction);
+ return Sema::DAR_FailedAlreadyDiagnosed;
+ }
+ return Sema::DAR_Succeeded;
+}
+
/// Deduce the type for an auto type-specifier (C++11 [dcl.spec.auto]p6)
///
/// Note that this is done even if the initializer is dependent. (This is
@@ -4485,9 +4670,14 @@ static bool diagnoseAutoDeductionFailure(Sema &S,
/// dependent cases. This is necessary for template partial ordering with
/// 'auto' template parameters. The value specified is the template
/// parameter depth at which we should perform 'auto' deduction.
+/// \param IgnoreConstraints Set if we should not fail if the deduced type does
+/// not satisfy the type-constraint in the auto type.
Sema::DeduceAutoResult
Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
- Optional<unsigned> DependentDeductionDepth) {
+ Optional<unsigned> DependentDeductionDepth,
+ bool IgnoreConstraints) {
+ if (Init->containsErrors())
+ return DAR_FailedAlreadyDiagnosed;
if (Init->getType()->isNonOverloadPlaceholderType()) {
ExprResult NonPlaceholder = CheckPlaceholderExpr(Init);
if (NonPlaceholder.isInvalid())
@@ -4528,6 +4718,14 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
return DAR_FailedAlreadyDiagnosed;
// FIXME: Support a non-canonical deduced type for 'auto'.
Deduced = Context.getCanonicalType(Deduced);
+ if (AT->isConstrained() && !IgnoreConstraints) {
+ auto ConstraintsResult =
+ CheckDeducedPlaceholderConstraints(*this, *AT,
+ Type.getContainedAutoTypeLoc(),
+ Deduced);
+ if (ConstraintsResult != DAR_Succeeded)
+ return ConstraintsResult;
+ }
Result = SubstituteDeducedTypeTransform(*this, Deduced).Apply(Type);
if (Result.isNull())
return DAR_FailedAlreadyDiagnosed;
@@ -4635,6 +4833,17 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
return DAR_FailedAlreadyDiagnosed;
}
+ if (const auto *AT = Type.getType()->getAs<AutoType>()) {
+ if (AT->isConstrained() && !IgnoreConstraints) {
+ auto ConstraintsResult =
+ CheckDeducedPlaceholderConstraints(*this, *AT,
+ Type.getContainedAutoTypeLoc(),
+ DeducedType);
+ if (ConstraintsResult != DAR_Succeeded)
+ return ConstraintsResult;
+ }
+ }
+
Result = SubstituteDeducedTypeTransform(*this, DeducedType).Apply(Type);
if (Result.isNull())
return DAR_FailedAlreadyDiagnosed;
@@ -4787,7 +4996,10 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
TemplatePartialOrderingContext TPOC,
- unsigned NumCallArguments1) {
+ unsigned NumCallArguments1,
+ bool Reversed) {
+ assert(!Reversed || TPOC == TPOC_Call);
+
FunctionDecl *FD1 = FT1->getTemplatedDecl();
FunctionDecl *FD2 = FT2->getTemplatedDecl();
const FunctionProtoType *Proto1 = FD1->getType()->getAs<FunctionProtoType>();
@@ -4836,6 +5048,12 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
} else if (!Method1 && Method2 && !Method2->isStatic()) {
// Compare 'this' from Method2 against first parameter from Method1.
AddImplicitObjectParameterType(S.Context, Method2, Args2);
+ } else if (Method1 && Method2 && Reversed) {
+ // Compare 'this' from Method1 against second parameter from Method2
+ // and 'this' from Method2 against second parameter from Method1.
+ AddImplicitObjectParameterType(S.Context, Method1, Args1);
+ AddImplicitObjectParameterType(S.Context, Method2, Args2);
+ ++NumComparedArguments;
}
Args1.insert(Args1.end(), Proto1->param_type_begin(),
@@ -4850,6 +5068,8 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
Args1.resize(NumComparedArguments);
if (Args2.size() > NumComparedArguments)
Args2.resize(NumComparedArguments);
+ if (Reversed)
+ std::reverse(Args2.begin(), Args2.end());
if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(),
Args1.data(), Args1.size(), Info, Deduced,
TDF_None, /*PartialOrdering=*/true))
@@ -4968,6 +5188,10 @@ static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
/// \param NumCallArguments2 The number of arguments in the call to FT2, used
/// only when \c TPOC is \c TPOC_Call.
///
+/// \param Reversed If \c true, exactly one of FT1 and FT2 is an overload
+/// candidate with a reversed parameter order. In this case, the corresponding
+/// P/A pairs between FT1 and FT2 are reversed.
+///
/// \returns the more specialized function template. If neither
/// template is more specialized, returns NULL.
FunctionTemplateDecl *
@@ -4976,7 +5200,8 @@ Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
- unsigned NumCallArguments2) {
+ unsigned NumCallArguments2,
+ bool Reversed) {
auto JudgeByConstraints = [&] () -> FunctionTemplateDecl * {
llvm::SmallVector<const Expr *, 3> AC1, AC2;
@@ -4993,9 +5218,9 @@ Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
};
bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC,
- NumCallArguments1);
+ NumCallArguments1, Reversed);
bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC,
- NumCallArguments2);
+ NumCallArguments2, Reversed);
if (Better1 != Better2) // We have a clear winner
return Better1 ? FT1 : FT2;
@@ -5174,14 +5399,15 @@ static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
Sema::InstantiatingTemplate Inst(S, Info.getLocation(), P2, DeducedArgs,
Info);
auto *TST1 = T1->castAs<TemplateSpecializationType>();
- if (FinishTemplateArgumentDeduction(
- S, P2, /*IsPartialOrdering=*/true,
- TemplateArgumentList(TemplateArgumentList::OnStack,
- TST1->template_arguments()),
- Deduced, Info))
- return false;
-
- return true;
+ bool AtLeastAsSpecialized;
+ S.runWithSufficientStackSpace(Info.getLocation(), [&] {
+ AtLeastAsSpecialized = !FinishTemplateArgumentDeduction(
+ S, P2, /*IsPartialOrdering=*/true,
+ TemplateArgumentList(TemplateArgumentList::OnStack,
+ TST1->template_arguments()),
+ Deduced, Info);
+ });
+ return AtLeastAsSpecialized;
}
/// Returns the more specialized class template partial specialization
@@ -5616,6 +5842,24 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
break;
}
+ case Type::ConstantMatrix: {
+ const ConstantMatrixType *MatType = cast<ConstantMatrixType>(T);
+ MarkUsedTemplateParameters(Ctx, MatType->getElementType(), OnlyDeduced,
+ Depth, Used);
+ break;
+ }
+
+ case Type::DependentSizedMatrix: {
+ const DependentSizedMatrixType *MatType = cast<DependentSizedMatrixType>(T);
+ MarkUsedTemplateParameters(Ctx, MatType->getElementType(), OnlyDeduced,
+ Depth, Used);
+ MarkUsedTemplateParameters(Ctx, MatType->getRowExpr(), OnlyDeduced, Depth,
+ Used);
+ MarkUsedTemplateParameters(Ctx, MatType->getColumnExpr(), OnlyDeduced,
+ Depth, Used);
+ break;
+ }
+
case Type::FunctionProto: {
const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
MarkUsedTemplateParameters(Ctx, Proto->getReturnType(), OnlyDeduced, Depth,
@@ -5771,6 +6015,11 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
cast<DeducedType>(T)->getDeducedType(),
OnlyDeduced, Depth, Used);
break;
+ case Type::DependentExtInt:
+ MarkUsedTemplateParameters(Ctx,
+ cast<DependentExtIntType>(T)->getNumBitsExpr(),
+ OnlyDeduced, Depth, Used);
+ break;
// None of these types have any template parameters in them.
case Type::Builtin:
@@ -5783,6 +6032,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
case Type::ObjCObjectPointer:
case Type::UnresolvedUsing:
case Type::Pipe:
+ case Type::ExtInt:
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base)
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index af41e231134d..11e03c517d01 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -9,7 +9,6 @@
//
//===----------------------------------------------------------------------===/
-#include "clang/Sema/SemaInternal.h"
#include "TreeTransform.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
@@ -18,11 +17,15 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/PrettyDeclStackTrace.h"
+#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Stack.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaConcept.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
@@ -199,8 +202,10 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
case DeducedTemplateArgumentSubstitution:
case PriorTemplateArgumentSubstitution:
case ConstraintsCheck:
+ case NestedRequirementConstraintsCheck:
return true;
+ case RequirementInstantiation:
case DefaultTemplateArgumentChecking:
case DeclaringSpecialMember:
case DeclaringImplicitEqualityComparison:
@@ -210,6 +215,8 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
case ParameterMappingSubstitution:
case ConstraintNormalization:
case RewritingOperatorAsSpaceship:
+ case InitializingStructuredBinding:
+ case MarkingClassDllexported:
return false;
// This function should never be called when Kind's value is Memoization.
@@ -247,7 +254,7 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
Inst.InstantiationRange = InstantiationRange;
SemaRef.pushCodeSynthesisContext(Inst);
- AlreadyInstantiating =
+ AlreadyInstantiating = !Inst.Entity ? false :
!SemaRef.InstantiatingSpecializations
.insert(std::make_pair(Inst.Entity->getCanonicalDecl(), Inst.Kind))
.second;
@@ -366,6 +373,26 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation,
+ concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo,
+ SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef, CodeSynthesisContext::RequirementInstantiation,
+ PointOfInstantiation, InstantiationRange, /*Entity=*/nullptr,
+ /*Template=*/nullptr, /*TemplateArgs=*/None, &DeductionInfo) {}
+
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation,
+ concepts::NestedRequirement *Req, ConstraintsCheck,
+ SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef, CodeSynthesisContext::NestedRequirementConstraintsCheck,
+ PointOfInstantiation, InstantiationRange, /*Entity=*/nullptr,
+ /*Template=*/nullptr, /*TemplateArgs=*/None) {}
+
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange)
: InstantiatingTemplate(
@@ -446,8 +473,9 @@ void Sema::InstantiatingTemplate::Clear() {
if (!Invalid) {
if (!AlreadyInstantiating) {
auto &Active = SemaRef.CodeSynthesisContexts.back();
- SemaRef.InstantiatingSpecializations.erase(
- std::make_pair(Active.Entity, Active.Kind));
+ if (Active.Entity)
+ SemaRef.InstantiatingSpecializations.erase(
+ std::make_pair(Active.Entity, Active.Kind));
}
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef,
@@ -684,6 +712,18 @@ void Sema::PrintInstantiationStack() {
<< Active->InstantiationRange;
break;
+ case CodeSynthesisContext::RequirementInstantiation:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_requirement_instantiation_here)
+ << Active->InstantiationRange;
+ break;
+
+ case CodeSynthesisContext::NestedRequirementConstraintsCheck:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_nested_requirement_here)
+ << Active->InstantiationRange;
+ break;
+
case CodeSynthesisContext::DeclaringSpecialMember:
Diags.Report(Active->PointOfInstantiation,
diag::note_in_declaration_of_implicit_special_member)
@@ -722,26 +762,47 @@ void Sema::PrintInstantiationStack() {
diag::note_rewriting_operator_as_spaceship);
break;
+ case CodeSynthesisContext::InitializingStructuredBinding:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_in_binding_decl_init)
+ << cast<BindingDecl>(Active->Entity);
+ break;
+
+ case CodeSynthesisContext::MarkingClassDllexported:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_due_to_dllexported_class)
+ << cast<CXXRecordDecl>(Active->Entity) << !getLangOpts().CPlusPlus11;
+ break;
+
case CodeSynthesisContext::Memoization:
break;
-
+
case CodeSynthesisContext::ConstraintsCheck: {
unsigned DiagID = 0;
+ if (!Active->Entity) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_nested_requirement_here)
+ << Active->InstantiationRange;
+ break;
+ }
if (isa<ConceptDecl>(Active->Entity))
DiagID = diag::note_concept_specialization_here;
else if (isa<TemplateDecl>(Active->Entity))
DiagID = diag::note_checking_constraints_for_template_id_here;
else if (isa<VarTemplatePartialSpecializationDecl>(Active->Entity))
DiagID = diag::note_checking_constraints_for_var_spec_id_here;
- else {
- assert(isa<ClassTemplatePartialSpecializationDecl>(Active->Entity));
+ else if (isa<ClassTemplatePartialSpecializationDecl>(Active->Entity))
DiagID = diag::note_checking_constraints_for_class_spec_id_here;
+ else {
+ assert(isa<FunctionDecl>(Active->Entity));
+ DiagID = diag::note_checking_constraints_for_function_here;
}
SmallVector<char, 128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
cast<NamedDecl>(Active->Entity)->printName(OS);
- printTemplateArgumentList(OS, Active->template_arguments(),
- getPrintingPolicy());
+ if (!isa<FunctionDecl>(Active->Entity))
+ printTemplateArgumentList(OS, Active->template_arguments(),
+ getPrintingPolicy());
Diags.Report(Active->PointOfInstantiation, DiagID) << OS.str()
<< Active->InstantiationRange;
break;
@@ -788,6 +849,7 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::ConstraintsCheck:
case CodeSynthesisContext::ParameterMappingSubstitution:
case CodeSynthesisContext::ConstraintNormalization:
+ case CodeSynthesisContext::NestedRequirementConstraintsCheck:
// This is a template instantiation, so there is no SFINAE.
return None;
@@ -802,9 +864,10 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::ExplicitTemplateArgumentSubstitution:
case CodeSynthesisContext::DeducedTemplateArgumentSubstitution:
case CodeSynthesisContext::ConstraintSubstitution:
- // We're either substituting explicitly-specified template arguments
- // or deduced template arguments or a constraint expression, so SFINAE
- // applies.
+ case CodeSynthesisContext::RequirementInstantiation:
+ // We're either substituting explicitly-specified template arguments,
+ // deduced template arguments, a constraint expression or a requirement
+ // in a requires expression, so SFINAE applies.
assert(Active->DeductionInfo && "Missing deduction info pointer");
return Active->DeductionInfo;
@@ -812,6 +875,8 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::DeclaringImplicitEqualityComparison:
case CodeSynthesisContext::DefiningSynthesizedFunction:
case CodeSynthesisContext::RewritingOperatorAsSpaceship:
+ case CodeSynthesisContext::InitializingStructuredBinding:
+ case CodeSynthesisContext::MarkingClassDllexported:
// This happens in a context unrelated to template instantiation, so
// there is no SFINAE.
return None;
@@ -874,6 +939,10 @@ namespace {
this->Entity = Entity;
}
+ unsigned TransformTemplateDepth(unsigned Depth) {
+ return TemplateArgs.getNewDepth(Depth);
+ }
+
bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
@@ -1010,6 +1079,8 @@ namespace {
NonTypeTemplateParmDecl *D);
ExprResult TransformSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E);
+ ExprResult TransformSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E);
/// Rebuild a DeclRefExpr for a VarDecl reference.
ExprResult RebuildVarDeclRefExpr(VarDecl *PD, SourceLocation Loc);
@@ -1056,6 +1127,41 @@ namespace {
return TreeTransform<TemplateInstantiator>::TransformLambdaExpr(E);
}
+ ExprResult TransformRequiresExpr(RequiresExpr *E) {
+ LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
+ return TreeTransform<TemplateInstantiator>::TransformRequiresExpr(E);
+ }
+
+ bool TransformRequiresExprRequirements(
+ ArrayRef<concepts::Requirement *> Reqs,
+ SmallVectorImpl<concepts::Requirement *> &Transformed) {
+ bool SatisfactionDetermined = false;
+ for (concepts::Requirement *Req : Reqs) {
+ concepts::Requirement *TransReq = nullptr;
+ if (!SatisfactionDetermined) {
+ if (auto *TypeReq = dyn_cast<concepts::TypeRequirement>(Req))
+ TransReq = TransformTypeRequirement(TypeReq);
+ else if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(Req))
+ TransReq = TransformExprRequirement(ExprReq);
+ else
+ TransReq = TransformNestedRequirement(
+ cast<concepts::NestedRequirement>(Req));
+ if (!TransReq)
+ return true;
+ if (!TransReq->isDependent() && !TransReq->isSatisfied())
+ // [expr.prim.req]p6
+ // [...] The substitution and semantic constraint checking
+ // proceeds in lexical order and stops when a condition that
+ // determines the result of the requires-expression is
+ // encountered. [..]
+ SatisfactionDetermined = true;
+ } else
+ TransReq = Req;
+ Transformed.push_back(TransReq);
+ }
+ return false;
+ }
+
TemplateParameterList *TransformTemplateParameterList(
TemplateParameterList *OrigTPL) {
if (!OrigTPL || !OrigTPL->size()) return OrigTPL;
@@ -1065,6 +1171,14 @@ namespace {
/* DeclContext *Owner */ Owner, TemplateArgs);
return DeclInstantiator.SubstTemplateParams(OrigTPL);
}
+
+ concepts::TypeRequirement *
+ TransformTypeRequirement(concepts::TypeRequirement *Req);
+ concepts::ExprRequirement *
+ TransformExprRequirement(concepts::ExprRequirement *Req);
+ concepts::NestedRequirement *
+ TransformNestedRequirement(concepts::NestedRequirement *Req);
+
private:
ExprResult transformNonTypeTemplateParmRef(NonTypeTemplateParmDecl *parm,
SourceLocation loc,
@@ -1248,6 +1362,19 @@ TemplateName TemplateInstantiator::TransformTemplateName(
TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
+ if (TemplateArgs.isRewrite()) {
+ // We're rewriting the template parameter as a reference to another
+ // template parameter.
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ assert(Arg.pack_size() == 1 && Arg.pack_begin()->isPackExpansion() &&
+ "unexpected pack arguments in template rewrite");
+ Arg = Arg.pack_begin()->getPackExpansionPattern();
+ }
+ assert(Arg.getKind() == TemplateArgument::Template &&
+ "unexpected nontype template argument kind in template rewrite");
+ return Arg.getAsTemplate();
+ }
+
if (TTP->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
@@ -1287,11 +1414,47 @@ TemplateName TemplateInstantiator::TransformTemplateName(
AllowInjectedClassName);
}
+static ExprResult TransformUniqueStableName(TemplateInstantiator &TI,
+ PredefinedExpr *E) {
+ if (E->getIdentKind() == PredefinedExpr::UniqueStableNameType) {
+ TypeSourceInfo *Info =
+ TI.getDerived().TransformType(E->getTypeSourceInfo());
+
+ if (!Info)
+ return ExprError();
+
+ if (!TI.getDerived().AlwaysRebuild() && Info == E->getTypeSourceInfo())
+ return E;
+
+ return TI.getSema().BuildUniqueStableName(E->getLocation(), Info);
+ }
+
+ if (E->getIdentKind() == PredefinedExpr::UniqueStableNameExpr) {
+ EnterExpressionEvaluationContext Unevaluated(
+ TI.getSema(), Sema::ExpressionEvaluationContext::Unevaluated);
+ ExprResult SubExpr = TI.getDerived().TransformExpr(E->getExpr());
+
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!TI.getDerived().AlwaysRebuild() && SubExpr.get() == E->getExpr())
+ return E;
+
+ return TI.getSema().BuildUniqueStableName(E->getLocation(), SubExpr.get());
+ }
+
+ llvm_unreachable("Only valid for UniqueStableNameType/Expr");
+}
+
ExprResult
TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
if (!E->isTypeDependent())
return E;
+ if (E->getIdentKind() == PredefinedExpr::UniqueStableNameType ||
+ E->getIdentKind() == PredefinedExpr::UniqueStableNameExpr)
+ return TransformUniqueStableName(*this, E);
+
return getSema().BuildPredefinedExpr(E->getLocation(), E->getIdentKind());
}
@@ -1308,19 +1471,18 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
TemplateArgument Arg = TemplateArgs(NTTP->getDepth(), NTTP->getPosition());
- if (TemplateArgs.getNumLevels() != TemplateArgs.getNumSubstitutedLevels()) {
- // We're performing a partial substitution, so the substituted argument
- // could be dependent. As a result we can't create a SubstNonType*Expr
- // node now, since that represents a fully-substituted argument.
- // FIXME: We should have some AST representation for this.
+ if (TemplateArgs.isRewrite()) {
+ // We're rewriting the template parameter as a reference to another
+ // template parameter.
if (Arg.getKind() == TemplateArgument::Pack) {
- // FIXME: This won't work for alias templates.
assert(Arg.pack_size() == 1 && Arg.pack_begin()->isPackExpansion() &&
- "unexpected pack arguments in partial substitution");
+ "unexpected pack arguments in template rewrite");
Arg = Arg.pack_begin()->getPackExpansionPattern();
}
assert(Arg.getKind() == TemplateArgument::Expression &&
- "unexpected nontype template argument kind in partial substitution");
+ "unexpected nontype template argument kind in template rewrite");
+ // FIXME: This can lead to the same subexpression appearing multiple times
+ // in a complete expression.
return Arg.getAsExpr();
}
@@ -1445,6 +1607,44 @@ TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
Arg);
}
+ExprResult
+TemplateInstantiator::TransformSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ ExprResult SubstReplacement = TransformExpr(E->getReplacement());
+ if (SubstReplacement.isInvalid())
+ return true;
+ QualType SubstType = TransformType(E->getType());
+ if (SubstType.isNull())
+ return true;
+ // The type may have been previously dependent and not now, which means we
+ // might have to implicit cast the argument to the new type, for example:
+ // template<auto T, decltype(T) U>
+ // concept C = sizeof(U) == 4;
+ // void foo() requires C<2, 'a'> { }
+ // When normalizing foo(), we first form the normalized constraints of C:
+ // AtomicExpr(sizeof(U) == 4,
+ // U=SubstNonTypeTemplateParmExpr(Param=U,
+ // Expr=DeclRef(U),
+ // Type=decltype(T)))
+ // Then we substitute T = 2, U = 'a' into the parameter mapping, and need to
+ // produce:
+ // AtomicExpr(sizeof(U) == 4,
+ // U=SubstNonTypeTemplateParmExpr(Param=U,
+ // Expr=ImpCast(
+ // decltype(2),
+ // SubstNTTPE(Param=U, Expr='a',
+ // Type=char)),
+ // Type=decltype(2)))
+ // The call to CheckTemplateArgument here produces the ImpCast.
+ TemplateArgument Converted;
+ if (SemaRef.CheckTemplateArgument(E->getParameter(), SubstType,
+ SubstReplacement.get(),
+ Converted).isInvalid())
+ return true;
+ return transformNonTypeTemplateParmRef(E->getParameter(),
+ E->getExprLoc(), Converted);
+}
+
ExprResult TemplateInstantiator::RebuildVarDeclRefExpr(VarDecl *PD,
SourceLocation Loc) {
DeclarationNameInfo NameInfo(PD->getDeclName(), Loc);
@@ -1594,6 +1794,24 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
TemplateArgument Arg = TemplateArgs(T->getDepth(), T->getIndex());
+ if (TemplateArgs.isRewrite()) {
+ // We're rewriting the template parameter as a reference to another
+ // template parameter.
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ assert(Arg.pack_size() == 1 && Arg.pack_begin()->isPackExpansion() &&
+ "unexpected pack arguments in template rewrite");
+ Arg = Arg.pack_begin()->getPackExpansionPattern();
+ }
+ assert(Arg.getKind() == TemplateArgument::Type &&
+ "unexpected nontype template argument kind in template rewrite");
+ QualType NewT = Arg.getAsType();
+ assert(isa<TemplateTypeParmType>(NewT) &&
+ "type parm not rewritten to type parm");
+ auto NewTL = TLB.push<TemplateTypeParmTypeLoc>(NewT);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return NewT;
+ }
+
if (T->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
@@ -1669,6 +1887,163 @@ TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
return Result;
}
+template<typename EntityPrinter>
+static concepts::Requirement::SubstitutionDiagnostic *
+createSubstDiag(Sema &S, TemplateDeductionInfo &Info, EntityPrinter Printer) {
+ SmallString<128> Message;
+ SourceLocation ErrorLoc;
+ if (Info.hasSFINAEDiagnostic()) {
+ PartialDiagnosticAt PDA(SourceLocation(),
+ PartialDiagnostic::NullDiagnostic{});
+ Info.takeSFINAEDiagnostic(PDA);
+ PDA.second.EmitToString(S.getDiagnostics(), Message);
+ ErrorLoc = PDA.first;
+ } else {
+ ErrorLoc = Info.getLocation();
+ }
+ char *MessageBuf = new (S.Context) char[Message.size()];
+ std::copy(Message.begin(), Message.end(), MessageBuf);
+ SmallString<128> Entity;
+ llvm::raw_svector_ostream OS(Entity);
+ Printer(OS);
+ char *EntityBuf = new (S.Context) char[Entity.size()];
+ std::copy(Entity.begin(), Entity.end(), EntityBuf);
+ return new (S.Context) concepts::Requirement::SubstitutionDiagnostic{
+ StringRef(EntityBuf, Entity.size()), ErrorLoc,
+ StringRef(MessageBuf, Message.size())};
+}
+
+concepts::TypeRequirement *
+TemplateInstantiator::TransformTypeRequirement(concepts::TypeRequirement *Req) {
+ if (!Req->isDependent() && !AlwaysRebuild())
+ return Req;
+ if (Req->isSubstitutionFailure()) {
+ if (AlwaysRebuild())
+ return RebuildTypeRequirement(
+ Req->getSubstitutionDiagnostic());
+ return Req;
+ }
+
+ Sema::SFINAETrap Trap(SemaRef);
+ TemplateDeductionInfo Info(Req->getType()->getTypeLoc().getBeginLoc());
+ Sema::InstantiatingTemplate TypeInst(SemaRef,
+ Req->getType()->getTypeLoc().getBeginLoc(), Req, Info,
+ Req->getType()->getTypeLoc().getSourceRange());
+ if (TypeInst.isInvalid())
+ return nullptr;
+ TypeSourceInfo *TransType = TransformType(Req->getType());
+ if (!TransType || Trap.hasErrorOccurred())
+ return RebuildTypeRequirement(createSubstDiag(SemaRef, Info,
+ [&] (llvm::raw_ostream& OS) {
+ Req->getType()->getType().print(OS, SemaRef.getPrintingPolicy());
+ }));
+ return RebuildTypeRequirement(TransType);
+}
+
+concepts::ExprRequirement *
+TemplateInstantiator::TransformExprRequirement(concepts::ExprRequirement *Req) {
+ if (!Req->isDependent() && !AlwaysRebuild())
+ return Req;
+
+ Sema::SFINAETrap Trap(SemaRef);
+ TemplateDeductionInfo Info(Req->getExpr()->getBeginLoc());
+
+ llvm::PointerUnion<Expr *, concepts::Requirement::SubstitutionDiagnostic *>
+ TransExpr;
+ if (Req->isExprSubstitutionFailure())
+ TransExpr = Req->getExprSubstitutionDiagnostic();
+ else {
+ Sema::InstantiatingTemplate ExprInst(SemaRef, Req->getExpr()->getBeginLoc(),
+ Req, Info,
+ Req->getExpr()->getSourceRange());
+ if (ExprInst.isInvalid())
+ return nullptr;
+ ExprResult TransExprRes = TransformExpr(Req->getExpr());
+ if (TransExprRes.isInvalid() || Trap.hasErrorOccurred())
+ TransExpr = createSubstDiag(SemaRef, Info,
+ [&] (llvm::raw_ostream& OS) {
+ Req->getExpr()->printPretty(OS, nullptr,
+ SemaRef.getPrintingPolicy());
+ });
+ else
+ TransExpr = TransExprRes.get();
+ }
+
+ llvm::Optional<concepts::ExprRequirement::ReturnTypeRequirement> TransRetReq;
+ const auto &RetReq = Req->getReturnTypeRequirement();
+ if (RetReq.isEmpty())
+ TransRetReq.emplace();
+ else if (RetReq.isSubstitutionFailure())
+ TransRetReq.emplace(RetReq.getSubstitutionDiagnostic());
+ else if (RetReq.isTypeConstraint()) {
+ TemplateParameterList *OrigTPL =
+ RetReq.getTypeConstraintTemplateParameterList();
+ Sema::InstantiatingTemplate TPLInst(SemaRef, OrigTPL->getTemplateLoc(),
+ Req, Info, OrigTPL->getSourceRange());
+ if (TPLInst.isInvalid())
+ return nullptr;
+ TemplateParameterList *TPL =
+ TransformTemplateParameterList(OrigTPL);
+ if (!TPL)
+ TransRetReq.emplace(createSubstDiag(SemaRef, Info,
+ [&] (llvm::raw_ostream& OS) {
+ RetReq.getTypeConstraint()->getImmediatelyDeclaredConstraint()
+ ->printPretty(OS, nullptr, SemaRef.getPrintingPolicy());
+ }));
+ else {
+ TPLInst.Clear();
+ TransRetReq.emplace(TPL);
+ }
+ }
+ assert(TransRetReq.hasValue() &&
+ "All code paths leading here must set TransRetReq");
+ if (Expr *E = TransExpr.dyn_cast<Expr *>())
+ return RebuildExprRequirement(E, Req->isSimple(), Req->getNoexceptLoc(),
+ std::move(*TransRetReq));
+ return RebuildExprRequirement(
+ TransExpr.get<concepts::Requirement::SubstitutionDiagnostic *>(),
+ Req->isSimple(), Req->getNoexceptLoc(), std::move(*TransRetReq));
+}
+
+concepts::NestedRequirement *
+TemplateInstantiator::TransformNestedRequirement(
+ concepts::NestedRequirement *Req) {
+ if (!Req->isDependent() && !AlwaysRebuild())
+ return Req;
+ if (Req->isSubstitutionFailure()) {
+ if (AlwaysRebuild())
+ return RebuildNestedRequirement(
+ Req->getSubstitutionDiagnostic());
+ return Req;
+ }
+ Sema::InstantiatingTemplate ReqInst(SemaRef,
+ Req->getConstraintExpr()->getBeginLoc(), Req,
+ Sema::InstantiatingTemplate::ConstraintsCheck{},
+ Req->getConstraintExpr()->getSourceRange());
+
+ ExprResult TransConstraint;
+ TemplateDeductionInfo Info(Req->getConstraintExpr()->getBeginLoc());
+ {
+ EnterExpressionEvaluationContext ContextRAII(
+ SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ Sema::SFINAETrap Trap(SemaRef);
+ Sema::InstantiatingTemplate ConstrInst(SemaRef,
+ Req->getConstraintExpr()->getBeginLoc(), Req, Info,
+ Req->getConstraintExpr()->getSourceRange());
+ if (ConstrInst.isInvalid())
+ return nullptr;
+ TransConstraint = TransformExpr(Req->getConstraintExpr());
+ if (TransConstraint.isInvalid() || Trap.hasErrorOccurred())
+ return RebuildNestedRequirement(createSubstDiag(SemaRef, Info,
+ [&] (llvm::raw_ostream& OS) {
+ Req->getConstraintExpr()->printPretty(OS, nullptr,
+ SemaRef.getPrintingPolicy());
+ }));
+ }
+ return RebuildNestedRequirement(TransConstraint.get());
+}
+
+
/// Perform substitution on the type T with a given set of template
/// arguments.
///
@@ -1858,6 +2233,94 @@ void Sema::SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
UpdateExceptionSpec(New, ESI);
}
+namespace {
+
+ struct GetContainedInventedTypeParmVisitor :
+ public TypeVisitor<GetContainedInventedTypeParmVisitor,
+ TemplateTypeParmDecl *> {
+ using TypeVisitor<GetContainedInventedTypeParmVisitor,
+ TemplateTypeParmDecl *>::Visit;
+
+ TemplateTypeParmDecl *Visit(QualType T) {
+ if (T.isNull())
+ return nullptr;
+ return Visit(T.getTypePtr());
+ }
+ // The deduced type itself.
+ TemplateTypeParmDecl *VisitTemplateTypeParmType(
+ const TemplateTypeParmType *T) {
+ if (!T->getDecl() || !T->getDecl()->isImplicit())
+ return nullptr;
+ return T->getDecl();
+ }
+
+ // Only these types can contain 'auto' types, and subsequently be replaced
+ // by references to invented parameters.
+
+ TemplateTypeParmDecl *VisitElaboratedType(const ElaboratedType *T) {
+ return Visit(T->getNamedType());
+ }
+
+ TemplateTypeParmDecl *VisitPointerType(const PointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+
+ TemplateTypeParmDecl *VisitBlockPointerType(const BlockPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+
+ TemplateTypeParmDecl *VisitReferenceType(const ReferenceType *T) {
+ return Visit(T->getPointeeTypeAsWritten());
+ }
+
+ TemplateTypeParmDecl *VisitMemberPointerType(const MemberPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+
+ TemplateTypeParmDecl *VisitArrayType(const ArrayType *T) {
+ return Visit(T->getElementType());
+ }
+
+ TemplateTypeParmDecl *VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ return Visit(T->getElementType());
+ }
+
+ TemplateTypeParmDecl *VisitVectorType(const VectorType *T) {
+ return Visit(T->getElementType());
+ }
+
+ TemplateTypeParmDecl *VisitFunctionProtoType(const FunctionProtoType *T) {
+ return VisitFunctionType(T);
+ }
+
+ TemplateTypeParmDecl *VisitFunctionType(const FunctionType *T) {
+ return Visit(T->getReturnType());
+ }
+
+ TemplateTypeParmDecl *VisitParenType(const ParenType *T) {
+ return Visit(T->getInnerType());
+ }
+
+ TemplateTypeParmDecl *VisitAttributedType(const AttributedType *T) {
+ return Visit(T->getModifiedType());
+ }
+
+ TemplateTypeParmDecl *VisitMacroQualifiedType(const MacroQualifiedType *T) {
+ return Visit(T->getUnderlyingType());
+ }
+
+ TemplateTypeParmDecl *VisitAdjustedType(const AdjustedType *T) {
+ return Visit(T->getOriginalType());
+ }
+
+ TemplateTypeParmDecl *VisitPackExpansionType(const PackExpansionType *T) {
+ return Visit(T->getPattern());
+ }
+ };
+
+} // namespace
+
ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
@@ -1905,6 +2368,46 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
return nullptr;
}
+ // In abbreviated templates, TemplateTypeParmDecls with possible
+ // TypeConstraints are created when the parameter list is originally parsed.
+ // The TypeConstraints can therefore reference other functions parameters in
+ // the abbreviated function template, which is why we must instantiate them
+ // here, when the instantiated versions of those referenced parameters are in
+ // scope.
+ if (TemplateTypeParmDecl *TTP =
+ GetContainedInventedTypeParmVisitor().Visit(OldDI->getType())) {
+ if (const TypeConstraint *TC = TTP->getTypeConstraint()) {
+ auto *Inst = cast_or_null<TemplateTypeParmDecl>(
+ FindInstantiatedDecl(TTP->getLocation(), TTP, TemplateArgs));
+ // We will first get here when instantiating the abbreviated function
+ // template's described function, but we might also get here later.
+ // Make sure we do not instantiate the TypeConstraint more than once.
+ if (Inst && !Inst->getTypeConstraint()) {
+ // TODO: Concepts: do not instantiate the constraint (delayed constraint
+ // substitution)
+ const ASTTemplateArgumentListInfo *TemplArgInfo
+ = TC->getTemplateArgsAsWritten();
+ TemplateArgumentListInfo InstArgs;
+
+ if (TemplArgInfo) {
+ InstArgs.setLAngleLoc(TemplArgInfo->LAngleLoc);
+ InstArgs.setRAngleLoc(TemplArgInfo->RAngleLoc);
+ if (Subst(TemplArgInfo->getTemplateArgs(),
+ TemplArgInfo->NumTemplateArgs, InstArgs, TemplateArgs))
+ return nullptr;
+ }
+ if (AttachTypeConstraint(
+ TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
+ TC->getNamedConcept(), &InstArgs, Inst,
+ TTP->isParameterPack()
+ ? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
+ ->getEllipsisLoc()
+ : SourceLocation()))
+ return nullptr;
+ }
+ }
+ }
+
ParmVarDecl *NewParm = CheckParameter(Context.getTranslationUnitDecl(),
OldParm->getInnerLocStart(),
OldParm->getLocation(),
@@ -1923,7 +2426,7 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
} else if (Expr *Arg = OldParm->getDefaultArg()) {
FunctionDecl *OwningFunc = cast<FunctionDecl>(OldParm->getDeclContext());
- if (OwningFunc->isLexicallyWithinFunctionOrMethod()) {
+ if (OwningFunc->isInLocalScopeForInstantiation()) {
// Instantiate default arguments for methods of local classes (DR1484)
// and non-defining declarations.
Sema::ContextRAII SavedContext(*this, OwningFunc);
@@ -1932,7 +2435,12 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
if (NewArg.isUsable()) {
// It would be nice if we still had this.
SourceLocation EqualLoc = NewArg.get()->getBeginLoc();
- SetParamDefaultArgument(NewParm, NewArg.get(), EqualLoc);
+ ExprResult Result =
+ ConvertParamDefaultArgument(NewParm, NewArg.get(), EqualLoc);
+ if (Result.isInvalid())
+ return nullptr;
+
+ SetParamDefaultArgument(NewParm, Result.getAs<Expr>(), EqualLoc);
}
} else {
// FIXME: if we non-lazily instantiated non-dependent default args for
@@ -3101,6 +3609,12 @@ LocalInstantiationScope::findInstantiationOf(const Decl *D) {
if (isa<EnumDecl>(D))
return nullptr;
+ // Materialized typedefs/type alias for implicit deduction guides may require
+ // instantiation.
+ if (isa<TypedefNameDecl>(D) &&
+ isa<CXXDeductionGuideDecl>(D->getDeclContext()))
+ return nullptr;
+
// If we didn't find the decl, then we either have a sema bug, or we have a
// forward reference to a label declaration. Return null to indicate that
// we have an uninstantiated label.
@@ -3152,6 +3666,13 @@ void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
ArgumentPacks.push_back(Pack);
}
+bool LocalInstantiationScope::isLocalPackExpansion(const Decl *D) {
+ for (DeclArgumentPack *Pack : ArgumentPacks)
+ if (std::find(Pack->begin(), Pack->end(), D) != Pack->end())
+ return true;
+ return false;
+}
+
void LocalInstantiationScope::SetPartiallySubstitutedPack(NamedDecl *Pack,
const TemplateArgument *ExplicitArgs,
unsigned NumExplicitArgs) {
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 64500d0a26d5..2efb7acb9724 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -8,7 +8,7 @@
// This file implements C++ template instantiation for declarations.
//
//===----------------------------------------------------------------------===/
-#include "clang/Sema/SemaInternal.h"
+
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
@@ -19,8 +19,11 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/Support/TimeProfiler.h"
@@ -393,50 +396,39 @@ static void instantiateOMPDeclareVariantAttr(
VariantFuncRef = Subst(E);
}
+ // Copy the template version of the OMPTraitInfo and run substitute on all
+ // score and condition expressiosn.
+ OMPTraitInfo &TI = S.getASTContext().getNewOMPTraitInfo();
+ TI = *Attr.getTraitInfos();
+
+ // Try to substitute template parameters in score and condition expressions.
+ auto SubstScoreOrConditionExpr = [&S, Subst](Expr *&E, bool) {
+ if (E) {
+ EnterExpressionEvaluationContext Unevaluated(
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ ExprResult ER = Subst(E);
+ if (ER.isUsable())
+ E = ER.get();
+ else
+ return true;
+ }
+ return false;
+ };
+ if (TI.anyScoreOrCondition(SubstScoreOrConditionExpr))
+ return;
+
// Check function/variant ref.
Optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
- S.checkOpenMPDeclareVariantFunction(
- S.ConvertDeclToDeclGroup(New), VariantFuncRef.get(), Attr.getRange());
+ S.checkOpenMPDeclareVariantFunction(S.ConvertDeclToDeclGroup(New),
+ VariantFuncRef.get(), TI,
+ Attr.getRange());
+
if (!DeclVarData)
return;
- SmallVector<Sema::OMPCtxSelectorData, 4> Data;
- for (unsigned I = 0, E = Attr.scores_size(); I < E; ++I) {
- ExprResult Score;
- if (Expr *E = *std::next(Attr.scores_begin(), I))
- Score = Subst(E);
- // Instantiate the attribute.
- auto CtxSet = static_cast<OpenMPContextSelectorSetKind>(
- *std::next(Attr.ctxSelectorSets_begin(), I));
- auto Ctx = static_cast<OpenMPContextSelectorKind>(
- *std::next(Attr.ctxSelectors_begin(), I));
- switch (CtxSet) {
- case OMP_CTX_SET_implementation:
- switch (Ctx) {
- case OMP_CTX_vendor:
- Data.emplace_back(CtxSet, Ctx, Score, Attr.implVendors());
- break;
- case OMP_CTX_kind:
- case OMP_CTX_unknown:
- llvm_unreachable("Unexpected context selector kind.");
- }
- break;
- case OMP_CTX_SET_device:
- switch (Ctx) {
- case OMP_CTX_kind:
- Data.emplace_back(CtxSet, Ctx, Score, Attr.deviceKinds());
- break;
- case OMP_CTX_vendor:
- case OMP_CTX_unknown:
- llvm_unreachable("Unexpected context selector kind.");
- }
- break;
- case OMP_CTX_SET_unknown:
- llvm_unreachable("Unexpected context selector set kind.");
- }
- }
+
S.ActOnOpenMPDeclareVariantDirective(DeclVarData.getValue().first,
- DeclVarData.getValue().second,
- Attr.getRange(), Data);
+ DeclVarData.getValue().second, TI,
+ Attr.getRange());
}
static void instantiateDependentAMDGPUFlatWorkGroupSizeAttr(
@@ -706,6 +698,10 @@ TemplateDeclInstantiator::VisitExternCContextDecl(ExternCContextDecl *D) {
llvm_unreachable("extern \"C\" context cannot be instantiated");
}
+Decl *TemplateDeclInstantiator::VisitMSGuidDecl(MSGuidDecl *D) {
+ llvm_unreachable("GUID declaration cannot be instantiated");
+}
+
Decl *
TemplateDeclInstantiator::VisitLabelDecl(LabelDecl *D) {
LabelDecl *Inst = LabelDecl::Create(SemaRef.Context, Owner, D->getLocation(),
@@ -1837,6 +1833,23 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
return nullptr;
QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo);
+ if (TemplateParams && TemplateParams->size()) {
+ auto *LastParam =
+ dyn_cast<TemplateTypeParmDecl>(TemplateParams->asArray().back());
+ if (LastParam && LastParam->isImplicit() &&
+ LastParam->hasTypeConstraint()) {
+ // In abbreviated templates, the type-constraints of invented template
+ // type parameters are instantiated with the function type, invalidating
+ // the TemplateParameterList which relied on the template type parameter
+ // not having a type constraint. Recreate the TemplateParameterList with
+ // the updated parameter list.
+ TemplateParams = TemplateParameterList::Create(
+ SemaRef.Context, TemplateParams->getTemplateLoc(),
+ TemplateParams->getLAngleLoc(), TemplateParams->asArray(),
+ TemplateParams->getRAngleLoc(), TemplateParams->getRequiresClause());
+ }
+ }
+
NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc();
if (QualifierLoc) {
QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc,
@@ -1848,6 +1861,8 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
// FIXME: Concepts: Do not substitute into constraint expressions
Expr *TrailingRequiresClause = D->getTrailingRequiresClause();
if (TrailingRequiresClause) {
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
ExprResult SubstRC = SemaRef.SubstExpr(TrailingRequiresClause,
TemplateArgs);
if (SubstRC.isInvalid())
@@ -1896,6 +1911,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
D->hasWrittenPrototype(), D->getConstexprKind(),
TrailingRequiresClause);
Function->setRangeEnd(D->getSourceRange().getEnd());
+ Function->setUsesFPIntrin(D->usesFPIntrin());
}
if (D->isInlined())
@@ -2029,7 +2045,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
// Look only into the namespace where the friend would be declared to
// find a previous declaration. This is the innermost enclosing namespace,
// as described in ActOnFriendFunctionDecl.
- SemaRef.LookupQualifiedName(Previous, DC);
+ SemaRef.LookupQualifiedName(Previous, DC->getRedeclContext());
// In C++, the previous declaration we find might be a tag type
// (class or enum). In this case, the new declaration will hide the
@@ -2175,6 +2191,23 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
return nullptr;
QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo);
+ if (TemplateParams && TemplateParams->size()) {
+ auto *LastParam =
+ dyn_cast<TemplateTypeParmDecl>(TemplateParams->asArray().back());
+ if (LastParam && LastParam->isImplicit() &&
+ LastParam->hasTypeConstraint()) {
+ // In abbreviated templates, the type-constraints of invented template
+ // type parameters are instantiated with the function type, invalidating
+ // the TemplateParameterList which relied on the template type parameter
+ // not having a type constraint. Recreate the TemplateParameterList with
+ // the updated parameter list.
+ TemplateParams = TemplateParameterList::Create(
+ SemaRef.Context, TemplateParams->getTemplateLoc(),
+ TemplateParams->getLAngleLoc(), TemplateParams->asArray(),
+ TemplateParams->getRAngleLoc(), TemplateParams->getRequiresClause());
+ }
+ }
+
NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc();
if (QualifierLoc) {
QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc,
@@ -2186,6 +2219,11 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
// FIXME: Concepts: Do not substitute into constraint expressions
Expr *TrailingRequiresClause = D->getTrailingRequiresClause();
if (TrailingRequiresClause) {
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
+ auto *ThisContext = dyn_cast_or_null<CXXRecordDecl>(Owner);
+ Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext,
+ D->getMethodQualifiers(), ThisContext);
ExprResult SubstRC = SemaRef.SubstExpr(TrailingRequiresClause,
TemplateArgs);
if (SubstRC.isInvalid())
@@ -2518,28 +2556,34 @@ Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
Inst->setAccess(AS_public);
Inst->setImplicit(D->isImplicit());
if (auto *TC = D->getTypeConstraint()) {
- // TODO: Concepts: do not instantiate the constraint (delayed constraint
- // substitution)
- const ASTTemplateArgumentListInfo *TemplArgInfo
- = TC->getTemplateArgsAsWritten();
- TemplateArgumentListInfo InstArgs;
-
- if (TemplArgInfo) {
- InstArgs.setLAngleLoc(TemplArgInfo->LAngleLoc);
- InstArgs.setRAngleLoc(TemplArgInfo->RAngleLoc);
- if (SemaRef.Subst(TemplArgInfo->getTemplateArgs(),
- TemplArgInfo->NumTemplateArgs,
- InstArgs, TemplateArgs))
+ if (!D->isImplicit()) {
+ // Invented template parameter type constraints will be instantiated with
+ // the corresponding auto-typed parameter as it might reference other
+ // parameters.
+
+ // TODO: Concepts: do not instantiate the constraint (delayed constraint
+ // substitution)
+ const ASTTemplateArgumentListInfo *TemplArgInfo
+ = TC->getTemplateArgsAsWritten();
+ TemplateArgumentListInfo InstArgs;
+
+ if (TemplArgInfo) {
+ InstArgs.setLAngleLoc(TemplArgInfo->LAngleLoc);
+ InstArgs.setRAngleLoc(TemplArgInfo->RAngleLoc);
+ if (SemaRef.Subst(TemplArgInfo->getTemplateArgs(),
+ TemplArgInfo->NumTemplateArgs,
+ InstArgs, TemplateArgs))
+ return nullptr;
+ }
+ if (SemaRef.AttachTypeConstraint(
+ TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
+ TC->getNamedConcept(), &InstArgs, Inst,
+ D->isParameterPack()
+ ? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
+ ->getEllipsisLoc()
+ : SourceLocation()))
return nullptr;
}
- if (SemaRef.AttachTypeConstraint(
- TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
- TC->getNamedConcept(), &InstArgs, Inst,
- D->isParameterPack()
- ? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
- ->getEllipsisLoc()
- : SourceLocation()))
- return nullptr;
}
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
TypeSourceInfo *InstantiatedDefaultArg =
@@ -2685,6 +2729,16 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
D->getDepth() - TemplateArgs.getNumSubstitutedLevels(),
D->getPosition(), D->getIdentifier(), T, D->isParameterPack(), DI);
+ if (AutoTypeLoc AutoLoc = DI->getTypeLoc().getContainedAutoTypeLoc())
+ if (AutoLoc.isConstrained())
+ if (SemaRef.AttachTypeConstraint(
+ AutoLoc, Param,
+ IsExpandedParameterPack
+ ? DI->getTypeLoc().getAs<PackExpansionTypeLoc>()
+ .getEllipsisLoc()
+ : SourceLocation()))
+ Invalid = true;
+
Param->setAccess(AS_public);
Param->setImplicit(D->isImplicit());
if (Invalid)
@@ -3571,6 +3625,9 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
if (InsertPos)
VarTemplate->AddSpecialization(Var, InsertPos);
+ if (SemaRef.getLangOpts().OpenCL)
+ SemaRef.deduceOpenCLAddressSpace(Var);
+
// Substitute the nested name specifier, if any.
if (SubstQualifier(D, Var))
return nullptr;
@@ -3600,6 +3657,12 @@ Decl *TemplateDeclInstantiator::VisitConceptDecl(ConceptDecl *D) {
llvm_unreachable("Concept definitions cannot reside inside a template");
}
+Decl *
+TemplateDeclInstantiator::VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D) {
+ return RequiresExprBodyDecl::Create(SemaRef.Context, D->getDeclContext(),
+ D->getBeginLoc());
+}
+
Decl *TemplateDeclInstantiator::VisitDecl(Decl *D) {
llvm_unreachable("Unexpected decl");
}
@@ -3659,6 +3722,8 @@ FunctionDecl *Sema::SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
// access and function-definition and in the same class scope as the
// three-way comparison operator function
MultiLevelTemplateArgumentList NoTemplateArgs;
+ NoTemplateArgs.setKind(TemplateSubstitutionKind::Rewrite);
+ NoTemplateArgs.addOuterRetainedLevels(RD->getTemplateDepth());
TemplateDeclInstantiator Instantiator(*this, RD, NoTemplateArgs);
Decl *R;
if (auto *MD = dyn_cast<CXXMethodDecl>(Spaceship)) {
@@ -3713,6 +3778,8 @@ TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) {
// checking satisfaction.
Expr *InstRequiresClause = nullptr;
if (Expr *E = L->getRequiresClause()) {
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
ExprResult Res = SemaRef.SubstExpr(E, TemplateArgs);
if (Res.isInvalid() || !Res.isUsable()) {
return nullptr;
@@ -4163,6 +4230,94 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
return false;
}
+bool Sema::InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
+ ParmVarDecl *Param) {
+ assert(Param->hasUninstantiatedDefaultArg());
+ Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
+
+ EnterExpressionEvaluationContext EvalContext(
+ *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
+
+ // Instantiate the expression.
+ //
+ // FIXME: Pass in a correct Pattern argument, otherwise
+ // getTemplateInstantiationArgs uses the lexical context of FD, e.g.
+ //
+ // template<typename T>
+ // struct A {
+ // static int FooImpl();
+ //
+ // template<typename Tp>
+ // // bug: default argument A<T>::FooImpl() is evaluated with 2-level
+ // // template argument list [[T], [Tp]], should be [[Tp]].
+ // friend A<Tp> Foo(int a);
+ // };
+ //
+ // template<typename T>
+ // A<T> Foo(int a = A<T>::FooImpl());
+ MultiLevelTemplateArgumentList TemplateArgs
+ = getTemplateInstantiationArgs(FD, nullptr, /*RelativeToPrimary=*/true);
+
+ InstantiatingTemplate Inst(*this, CallLoc, Param,
+ TemplateArgs.getInnermost());
+ if (Inst.isInvalid())
+ return true;
+ if (Inst.isAlreadyInstantiating()) {
+ Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
+ Param->setInvalidDecl();
+ return true;
+ }
+
+ ExprResult Result;
+ {
+ // C++ [dcl.fct.default]p5:
+ // The names in the [default argument] expression are bound, and
+ // the semantic constraints are checked, at the point where the
+ // default argument expression appears.
+ ContextRAII SavedContext(*this, FD);
+ LocalInstantiationScope Local(*this);
+
+ FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(
+ /*ForDefinition*/ false);
+ if (addInstantiatedParametersToScope(*this, FD, Pattern, Local,
+ TemplateArgs))
+ return true;
+
+ runWithSufficientStackSpace(CallLoc, [&] {
+ Result = SubstInitializer(UninstExpr, TemplateArgs,
+ /*DirectInit*/false);
+ });
+ }
+ if (Result.isInvalid())
+ return true;
+
+ // Check the expression as an initializer for the parameter.
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context, Param);
+ InitializationKind Kind = InitializationKind::CreateCopy(
+ Param->getLocation(),
+ /*FIXME:EqualLoc*/ UninstExpr->getBeginLoc());
+ Expr *ResultE = Result.getAs<Expr>();
+
+ InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
+ Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
+ if (Result.isInvalid())
+ return true;
+
+ Result =
+ ActOnFinishFullExpr(Result.getAs<Expr>(), Param->getOuterLocStart(),
+ /*DiscardedValue*/ false);
+ if (Result.isInvalid())
+ return true;
+
+ // Remember the instantiated default argument.
+ Param->setDefaultArg(Result.getAs<Expr>());
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->DefaultArgumentInstantiated(Param);
+
+ return false;
+}
+
void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Decl) {
const FunctionProtoType *Proto = Decl->getType()->castAs<FunctionProtoType>();
@@ -4193,6 +4348,10 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
MultiLevelTemplateArgumentList TemplateArgs =
getTemplateInstantiationArgs(Decl, nullptr, /*RelativeToPrimary*/true);
+ // FIXME: We can't use getTemplateInstantiationPattern(false) in general
+ // here, because for a non-defining friend declaration in a class template,
+ // we don't store enough information to map back to the friend declaration in
+ // the template.
FunctionDecl *Template = Proto->getExceptionSpecTemplate();
if (addInstantiatedParametersToScope(*this, Decl, Template, Scope,
TemplateArgs)) {
@@ -4224,24 +4383,29 @@ bool Sema::CheckInstantiatedFunctionTemplateConstraints(
Sema::ContextRAII savedContext(*this, Decl);
LocalInstantiationScope Scope(*this);
- MultiLevelTemplateArgumentList MLTAL =
- getTemplateInstantiationArgs(Decl, nullptr, /*RelativeToPrimary*/true);
-
// If this is not an explicit specialization - we need to get the instantiated
// version of the template arguments and add them to scope for the
// substitution.
if (Decl->isTemplateInstantiation()) {
InstantiatingTemplate Inst(*this, Decl->getPointOfInstantiation(),
InstantiatingTemplate::ConstraintsCheck{}, Decl->getPrimaryTemplate(),
- MLTAL.getInnermost(), SourceRange());
+ TemplateArgs, SourceRange());
if (Inst.isInvalid())
return true;
- if (addInstantiatedParametersToScope(*this, Decl,
- Decl->getTemplateInstantiationPattern(),
- Scope, MLTAL))
+ MultiLevelTemplateArgumentList MLTAL(
+ *Decl->getTemplateSpecializationArgs());
+ if (addInstantiatedParametersToScope(
+ *this, Decl, Decl->getPrimaryTemplate()->getTemplatedDecl(),
+ Scope, MLTAL))
return true;
}
-
+ Qualifiers ThisQuals;
+ CXXRecordDecl *Record = nullptr;
+ if (auto *Method = dyn_cast<CXXMethodDecl>(Decl)) {
+ ThisQuals = Method->getMethodQualifiers();
+ Record = Method->getParent();
+ }
+ CXXThisScopeRAII ThisScope(*this, Record, ThisQuals, Record != nullptr);
return CheckConstraintSatisfaction(Template, TemplateAC, TemplateArgs,
PointOfInstantiation, Satisfaction);
}
@@ -4297,7 +4461,7 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
EPI.ExceptionSpec.Type != EST_None &&
EPI.ExceptionSpec.Type != EST_DynamicNone &&
EPI.ExceptionSpec.Type != EST_BasicNoexcept &&
- !Tmpl->isLexicallyWithinFunctionOrMethod()) {
+ !Tmpl->isInLocalScopeForInstantiation()) {
FunctionDecl *ExceptionSpecTemplate = Tmpl;
if (EPI.ExceptionSpec.Type == EST_Uninstantiated)
ExceptionSpecTemplate = EPI.ExceptionSpec.SourceTemplate;
@@ -4734,6 +4898,9 @@ VarTemplateSpecializationDecl *Sema::CompleteVarTemplateSpecializationDecl(
// Instantiate the initializer.
InstantiateVariableInitializer(VarSpec, PatternDecl, TemplateArgs);
+ if (getLangOpts().OpenCL)
+ deduceOpenCLAddressSpace(VarSpec);
+
return VarSpec;
}
@@ -4774,6 +4941,7 @@ void Sema::BuildVariableInstantiation(
NewVar->setCXXForRangeDecl(OldVar->isCXXForRangeDecl());
NewVar->setObjCForDecl(OldVar->isObjCForDecl());
NewVar->setConstexpr(OldVar->isConstexpr());
+ MaybeAddCUDAConstantAttr(NewVar);
NewVar->setInitCapture(OldVar->isInitCapture());
NewVar->setPreviousDeclInSameBlockScope(
OldVar->isPreviousDeclInSameBlockScope());
@@ -5537,6 +5705,20 @@ DeclContext *Sema::FindInstantiatedContext(SourceLocation Loc, DeclContext* DC,
} else return DC;
}
+/// Determine whether the given context is dependent on template parameters at
+/// level \p Level or below.
+///
+/// Sometimes we only substitute an inner set of template arguments and leave
+/// the outer templates alone. In such cases, contexts dependent only on the
+/// outer levels are not effectively dependent.
+static bool isDependentContextAtLevel(DeclContext *DC, unsigned Level) {
+ if (!DC->isDependentContext())
+ return false;
+ if (!Level)
+ return true;
+ return cast<Decl>(DC)->getTemplateDepth() > Level;
+}
+
/// Find the instantiation of the given declaration within the
/// current instantiation.
///
@@ -5567,6 +5749,10 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext) {
DeclContext *ParentDC = D->getDeclContext();
+ // Determine whether our parent context depends on any of the tempalte
+ // arguments we're currently substituting.
+ bool ParentDependsOnArgs = isDependentContextAtLevel(
+ ParentDC, TemplateArgs.getNumRetainedOuterLevels());
// FIXME: Parmeters of pointer to functions (y below) that are themselves
// parameters (p below) can have their ParentDC set to the translation-unit
// - thus we can not consistently check if the ParentDC of such a parameter
@@ -5583,15 +5769,14 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
// - as long as we have a ParmVarDecl whose parent is non-dependent and
// whose type is not instantiation dependent, do nothing to the decl
// - otherwise find its instantiated decl.
- if (isa<ParmVarDecl>(D) && !ParentDC->isDependentContext() &&
+ if (isa<ParmVarDecl>(D) && !ParentDependsOnArgs &&
!cast<ParmVarDecl>(D)->getType()->isInstantiationDependentType())
return D;
if (isa<ParmVarDecl>(D) || isa<NonTypeTemplateParmDecl>(D) ||
isa<TemplateTypeParmDecl>(D) || isa<TemplateTemplateParmDecl>(D) ||
- ((ParentDC->isFunctionOrMethod() ||
- isa<OMPDeclareReductionDecl>(ParentDC) ||
- isa<OMPDeclareMapperDecl>(ParentDC)) &&
- ParentDC->isDependentContext()) ||
+ (ParentDependsOnArgs && (ParentDC->isFunctionOrMethod() ||
+ isa<OMPDeclareReductionDecl>(ParentDC) ||
+ isa<OMPDeclareMapperDecl>(ParentDC))) ||
(isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) {
// D is a local of some kind. Look into the map of local
// declarations to their instantiations.
@@ -5634,6 +5819,9 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
bool NeedInstantiate = false;
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
NeedInstantiate = RD->isLocalClass();
+ else if (isa<TypedefNameDecl>(D) &&
+ isa<CXXDeductionGuideDecl>(D->getDeclContext()))
+ NeedInstantiate = true;
else
NeedInstantiate = isa<EnumDecl>(D);
if (NeedInstantiate) {
@@ -5742,7 +5930,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
// anonymous unions in class templates).
}
- if (!ParentDC->isDependentContext())
+ if (!ParentDependsOnArgs)
return D;
ParentDC = FindInstantiatedContext(Loc, ParentDC, TemplateArgs);
@@ -5811,10 +5999,11 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
if (!Result) {
if (isa<UsingShadowDecl>(D)) {
// UsingShadowDecls can instantiate to nothing because of using hiding.
- } else if (Diags.hasErrorOccurred()) {
- // We've already complained about something, so most likely this
- // declaration failed to instantiate. There's no point in complaining
- // further, since this is normal in invalid code.
+ } else if (Diags.hasUncompilableErrorOccurred()) {
+ // We've already complained about some ill-formed code, so most likely
+ // this declaration failed to instantiate. There's no point in
+ // complaining further, since this is normal in invalid code.
+ // FIXME: Use more fine-grained 'invalid' tracking for this.
} else if (IsBeingInstantiated) {
// The class in which this member exists is currently being
// instantiated, and we haven't gotten around to instantiating this
@@ -5854,6 +6043,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
/// Performs template instantiation for all implicit template
/// instantiations we have seen until this point.
void Sema::PerformPendingInstantiations(bool LocalOnly) {
+ std::deque<PendingImplicitInstantiation> delayedPCHInstantiations;
while (!PendingLocalImplicitInstantiations.empty() ||
(!LocalOnly && !PendingInstantiations.empty())) {
PendingImplicitInstantiation Inst;
@@ -5884,6 +6074,10 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
if (Function->isDefined())
Function->setInstantiationIsPending(false);
}
+ // Definition of a PCH-ed template declaration may be available only in the TU.
+ if (!LocalOnly && LangOpts.PCHInstantiateTemplates &&
+ TUKind == TU_Prefix && Function->instantiationIsPending())
+ delayedPCHInstantiations.push_back(Inst);
continue;
}
@@ -5929,6 +6123,9 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
InstantiateVariableDefinition(/*FIXME:*/ Inst.second, Var, true,
DefinitionRequired, true);
}
+
+ if (!LocalOnly && LangOpts.PCHInstantiateTemplates)
+ PendingInstantiations.swap(delayedPCHInstantiations);
}
void Sema::PerformDependentDiagnostics(const DeclContext *Pattern,
diff --git a/clang/lib/Sema/SemaTemplateVariadic.cpp b/clang/lib/Sema/SemaTemplateVariadic.cpp
index d947d6d282be..7b77d1cb482a 100644
--- a/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -847,6 +847,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_typeofExpr:
case TST_decltype:
+ case TST_extint:
if (DS.getRepAsExpr() &&
DS.getRepAsExpr()->containsUnexpandedParameterPack())
return true;
@@ -880,6 +881,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_auto:
case TST_auto_type:
case TST_decltype_auto:
+ case TST_BFloat16:
#define GENERIC_IMAGE_TYPE(ImgType, Id) case TST_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
case TST_unknown_anytype:
@@ -940,7 +942,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
if (Expr *TRC = D.getTrailingRequiresClause())
if (TRC->containsUnexpandedParameterPack())
return true;
-
+
return false;
}
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index 3884fdae8fe7..b8f7f1a58159 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -27,6 +27,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
@@ -34,6 +35,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -128,6 +130,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_NSReturnsRetained: \
case ParsedAttr::AT_NoReturn: \
case ParsedAttr::AT_Regparm: \
+ case ParsedAttr::AT_CmseNSCall: \
case ParsedAttr::AT_AnyX86NoCallerSavedRegisters: \
case ParsedAttr::AT_AnyX86NoCfCheck: \
CALLING_CONV_ATTRS_CASELIST
@@ -1251,6 +1254,26 @@ getImageAccess(const ParsedAttributesView &Attrs) {
return OpenCLAccessAttr::Keyword_read_only;
}
+static QualType ConvertConstrainedAutoDeclSpecToType(Sema &S, DeclSpec &DS,
+ AutoTypeKeyword AutoKW) {
+ assert(DS.isConstrainedAuto());
+ TemplateIdAnnotation *TemplateId = DS.getRepAsTemplateId();
+ TemplateArgumentListInfo TemplateArgsInfo;
+ TemplateArgsInfo.setLAngleLoc(TemplateId->LAngleLoc);
+ TemplateArgsInfo.setRAngleLoc(TemplateId->RAngleLoc);
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ S.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo);
+ llvm::SmallVector<TemplateArgument, 8> TemplateArgs;
+ for (auto &ArgLoc : TemplateArgsInfo.arguments())
+ TemplateArgs.push_back(ArgLoc.getArgument());
+ return S.Context.getAutoType(QualType(), AutoTypeKeyword::Auto, false,
+ /*IsPack=*/false,
+ cast<ConceptDecl>(TemplateId->Template.get()
+ .getAsTemplateDecl()),
+ TemplateArgs);
+}
+
/// Convert the specified declspec to the appropriate type
/// object.
/// \param state Specifies the declarator containing the declaration specifier
@@ -1419,6 +1442,18 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
break;
}
+ case DeclSpec::TST_extint: {
+ if (!S.Context.getTargetInfo().hasExtIntType())
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
+ << "_ExtInt";
+ Result = S.BuildExtIntType(DS.getTypeSpecSign() == TSS_unsigned,
+ DS.getRepAsExpr(), DS.getBeginLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+ }
case DeclSpec::TST_accum: {
switch (DS.getTypeSpecWidth()) {
case DeclSpec::TSW_short:
@@ -1486,6 +1521,12 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.Float16Ty;
break;
case DeclSpec::TST_half: Result = Context.HalfTy; break;
+ case DeclSpec::TST_BFloat16:
+ if (!S.Context.getTargetInfo().hasBFloat16Type())
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
+ << "__bf16";
+ Result = Context.BFloat16Ty;
+ break;
case DeclSpec::TST_float: Result = Context.FloatTy; break;
case DeclSpec::TST_double:
if (DS.getTypeSpecWidth() == DeclSpec::TSW_long)
@@ -1495,6 +1536,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
case DeclSpec::TST_float128:
if (!S.Context.getTargetInfo().hasFloat128Type() &&
+ !S.getLangOpts().SYCLIsDevice &&
!(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__float128";
@@ -1595,6 +1637,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
case DeclSpec::TST_auto:
+ if (DS.isConstrainedAuto()) {
+ Result = ConvertConstrainedAutoDeclSpecToType(S, DS,
+ AutoTypeKeyword::Auto);
+ break;
+ }
Result = Context.getAutoType(QualType(), AutoTypeKeyword::Auto, false);
break;
@@ -1603,6 +1650,12 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
case DeclSpec::TST_decltype_auto:
+ if (DS.isConstrainedAuto()) {
+ Result =
+ ConvertConstrainedAutoDeclSpecToType(S, DS,
+ AutoTypeKeyword::DecltypeAuto);
+ break;
+ }
Result = Context.getAutoType(QualType(), AutoTypeKeyword::DecltypeAuto,
/*IsDependent*/ false);
break;
@@ -1645,6 +1698,12 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
}
+ // FIXME: we want resulting declarations to be marked invalid, but claiming
+ // the type is invalid is too strong - e.g. it causes ActOnTypeName to return
+ // a null type.
+ if (Result->containsErrors())
+ declarator.setInvalidType();
+
if (S.getLangOpts().OpenCL &&
S.checkOpenCLDisabledTypeDeclSpec(DS, Result))
declarator.setInvalidType(true);
@@ -1700,7 +1759,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// The effect of a cv-qualifier-seq in a function declarator is not the
// same as adding cv-qualification on top of the function type. In the
// latter case, the cv-qualifiers are ignored.
- if (TypeQuals && Result->isFunctionType()) {
+ if (Result->isFunctionType()) {
diagnoseAndRemoveTypeQualifiers(
S, DS, TypeQuals, Result, DeclSpec::TQ_const | DeclSpec::TQ_volatile,
S.getLangOpts().CPlusPlus
@@ -2121,6 +2180,45 @@ QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) {
return Context.getWritePipeType(T);
}
+/// Build a extended int type.
+///
+/// \param IsUnsigned Boolean representing the signedness of the type.
+///
+/// \param BitWidth Size of this int type in bits, or an expression representing
+/// that.
+///
+/// \param Loc Location of the keyword.
+QualType Sema::BuildExtIntType(bool IsUnsigned, Expr *BitWidth,
+ SourceLocation Loc) {
+ if (BitWidth->isInstantiationDependent())
+ return Context.getDependentExtIntType(IsUnsigned, BitWidth);
+
+ llvm::APSInt Bits(32);
+ ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Bits);
+
+ if (ICE.isInvalid())
+ return QualType();
+
+ int64_t NumBits = Bits.getSExtValue();
+ if (!IsUnsigned && NumBits < 2) {
+ Diag(Loc, diag::err_ext_int_bad_size) << 0;
+ return QualType();
+ }
+
+ if (IsUnsigned && NumBits < 1) {
+ Diag(Loc, diag::err_ext_int_bad_size) << 1;
+ return QualType();
+ }
+
+ if (NumBits > llvm::IntegerType::MAX_INT_BITS) {
+ Diag(Loc, diag::err_ext_int_max_size) << IsUnsigned
+ << llvm::IntegerType::MAX_INT_BITS;
+ return QualType();
+ }
+
+ return Context.getExtIntType(IsUnsigned, NumBits);
+}
+
/// Check whether the specified array size makes the array type a VLA. If so,
/// return true, if not, return the size of the array in SizeVal.
static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
@@ -2182,7 +2280,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
}
if (T->isVoidType() || T->isIncompleteArrayType()) {
- Diag(Loc, diag::err_illegal_decl_array_incomplete_type) << T;
+ Diag(Loc, diag::err_array_incomplete_or_sizeless_type) << 0 << T;
return QualType();
}
@@ -2200,11 +2298,16 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
} else {
// C99 6.7.5.2p1: If the element type is an incomplete or function type,
// reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
- if (RequireCompleteType(Loc, T,
- diag::err_illegal_decl_array_incomplete_type))
+ if (RequireCompleteSizedType(Loc, T,
+ diag::err_array_incomplete_or_sizeless_type))
return QualType();
}
+ if (T->isSizelessType()) {
+ Diag(Loc, diag::err_array_incomplete_or_sizeless_type) << 1 << T;
+ return QualType();
+ }
+
if (T->isFunctionType()) {
Diag(Loc, diag::err_illegal_decl_array_of_functions)
<< getPrintableNameForEntity(Entity) << T;
@@ -2290,13 +2393,6 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
? diag::err_typecheck_zero_array_size
: diag::ext_typecheck_zero_array_size)
<< ArraySize->getSourceRange();
-
- if (ASM == ArrayType::Static) {
- Diag(ArraySize->getBeginLoc(),
- diag::warn_typecheck_zero_static_array_size)
- << ArraySize->getSourceRange();
- ASM = ArrayType::Normal;
- }
} else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
!T->isIncompleteType() && !T->isUndeducedType()) {
// Is the array too large?
@@ -2392,28 +2488,35 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc,
VectorType::GenericVector);
- unsigned VectorSize = static_cast<unsigned>(VecSize.getZExtValue() * 8);
+ // vecSize is specified in bytes - convert to bits.
+ if (!VecSize.isIntN(61)) {
+ // Bit size will overflow uint64.
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << SizeExpr->getSourceRange() << "vector";
+ return QualType();
+ }
+ uint64_t VectorSizeBits = VecSize.getZExtValue() * 8;
unsigned TypeSize = static_cast<unsigned>(Context.getTypeSize(CurType));
- if (VectorSize == 0) {
- Diag(AttrLoc, diag::err_attribute_zero_size) << SizeExpr->getSourceRange();
+ if (VectorSizeBits == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size)
+ << SizeExpr->getSourceRange() << "vector";
return QualType();
}
- // vecSize is specified in bytes - convert to bits.
- if (VectorSize % TypeSize) {
+ if (VectorSizeBits % TypeSize) {
Diag(AttrLoc, diag::err_attribute_invalid_size)
<< SizeExpr->getSourceRange();
return QualType();
}
- if (VectorType::isVectorSizeTooLarge(VectorSize / TypeSize)) {
+ if (VectorSizeBits / TypeSize > std::numeric_limits<uint32_t>::max()) {
Diag(AttrLoc, diag::err_attribute_size_too_large)
- << SizeExpr->getSourceRange();
+ << SizeExpr->getSourceRange() << "vector";
return QualType();
}
- return Context.getVectorType(CurType, VectorSize / TypeSize,
+ return Context.getVectorType(CurType, VectorSizeBits / TypeSize,
VectorType::GenericVector);
}
@@ -2445,19 +2548,18 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
return QualType();
}
+ if (!vecSize.isIntN(32)) {
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << ArraySize->getSourceRange() << "vector";
+ return QualType();
+ }
// Unlike gcc's vector_size attribute, the size is specified as the
// number of elements, not the number of bytes.
unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue());
if (vectorSize == 0) {
Diag(AttrLoc, diag::err_attribute_zero_size)
- << ArraySize->getSourceRange();
- return QualType();
- }
-
- if (VectorType::isVectorSizeTooLarge(vectorSize)) {
- Diag(AttrLoc, diag::err_attribute_size_too_large)
- << ArraySize->getSourceRange();
+ << ArraySize->getSourceRange() << "vector";
return QualType();
}
@@ -2467,6 +2569,84 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
return Context.getDependentSizedExtVectorType(T, ArraySize, AttrLoc);
}
+QualType Sema::BuildMatrixType(QualType ElementTy, Expr *NumRows, Expr *NumCols,
+ SourceLocation AttrLoc) {
+ assert(Context.getLangOpts().MatrixTypes &&
+ "Should never build a matrix type when it is disabled");
+
+ // Check element type, if it is not dependent.
+ if (!ElementTy->isDependentType() &&
+ !MatrixType::isValidElementType(ElementTy)) {
+ Diag(AttrLoc, diag::err_attribute_invalid_matrix_type) << ElementTy;
+ return QualType();
+ }
+
+ if (NumRows->isTypeDependent() || NumCols->isTypeDependent() ||
+ NumRows->isValueDependent() || NumCols->isValueDependent())
+ return Context.getDependentSizedMatrixType(ElementTy, NumRows, NumCols,
+ AttrLoc);
+
+ // Both row and column values can only be 20 bit wide currently.
+ llvm::APSInt ValueRows(32), ValueColumns(32);
+
+ bool const RowsIsInteger = NumRows->isIntegerConstantExpr(ValueRows, Context);
+ bool const ColumnsIsInteger =
+ NumCols->isIntegerConstantExpr(ValueColumns, Context);
+
+ auto const RowRange = NumRows->getSourceRange();
+ auto const ColRange = NumCols->getSourceRange();
+
+ // Both are row and column expressions are invalid.
+ if (!RowsIsInteger && !ColumnsIsInteger) {
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << "matrix_type" << AANT_ArgumentIntegerConstant << RowRange
+ << ColRange;
+ return QualType();
+ }
+
+ // Only the row expression is invalid.
+ if (!RowsIsInteger) {
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << "matrix_type" << AANT_ArgumentIntegerConstant << RowRange;
+ return QualType();
+ }
+
+ // Only the column expression is invalid.
+ if (!ColumnsIsInteger) {
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << "matrix_type" << AANT_ArgumentIntegerConstant << ColRange;
+ return QualType();
+ }
+
+ // Check the matrix dimensions.
+ unsigned MatrixRows = static_cast<unsigned>(ValueRows.getZExtValue());
+ unsigned MatrixColumns = static_cast<unsigned>(ValueColumns.getZExtValue());
+ if (MatrixRows == 0 && MatrixColumns == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size)
+ << "matrix" << RowRange << ColRange;
+ return QualType();
+ }
+ if (MatrixRows == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size) << "matrix" << RowRange;
+ return QualType();
+ }
+ if (MatrixColumns == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size) << "matrix" << ColRange;
+ return QualType();
+ }
+ if (!ConstantMatrixType::isDimensionValid(MatrixRows)) {
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << RowRange << "matrix row";
+ return QualType();
+ }
+ if (!ConstantMatrixType::isDimensionValid(MatrixColumns)) {
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << ColRange << "matrix column";
+ return QualType();
+ }
+ return Context.getConstantMatrixType(ElementTy, MatrixRows, MatrixColumns);
+}
+
bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
if (T->isArrayType() || T->isFunctionType()) {
Diag(Loc, diag::err_func_returning_array_function)
@@ -2496,7 +2676,7 @@ bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
// C++2a [dcl.fct]p12:
// A volatile-qualified return type is deprecated
- if (T.isVolatileQualified() && getLangOpts().CPlusPlus2a)
+ if (T.isVolatileQualified() && getLangOpts().CPlusPlus20)
Diag(Loc, diag::warn_deprecated_volatile_return) << T;
return false;
@@ -2581,7 +2761,7 @@ QualType Sema::BuildFunctionType(QualType T,
// C++2a [dcl.fct]p4:
// A parameter with volatile-qualified type is deprecated
- if (ParamType.isVolatileQualified() && getLangOpts().CPlusPlus2a)
+ if (ParamType.isVolatileQualified() && getLangOpts().CPlusPlus20)
Diag(Loc, diag::warn_deprecated_volatile_param) << ParamType;
ParamTypes[Idx] = ParamType;
@@ -2921,6 +3101,87 @@ static void diagnoseRedundantReturnTypeQualifiers(Sema &S, QualType RetTy,
D.getDeclSpec().getUnalignedSpecLoc());
}
+static void CopyTypeConstraintFromAutoType(Sema &SemaRef, const AutoType *Auto,
+ AutoTypeLoc AutoLoc,
+ TemplateTypeParmDecl *TP,
+ SourceLocation EllipsisLoc) {
+
+ TemplateArgumentListInfo TAL(AutoLoc.getLAngleLoc(), AutoLoc.getRAngleLoc());
+ for (unsigned Idx = 0; Idx < AutoLoc.getNumArgs(); ++Idx)
+ TAL.addArgument(AutoLoc.getArgLoc(Idx));
+
+ SemaRef.AttachTypeConstraint(
+ AutoLoc.getNestedNameSpecifierLoc(), AutoLoc.getConceptNameInfo(),
+ AutoLoc.getNamedConcept(),
+ AutoLoc.hasExplicitTemplateArgs() ? &TAL : nullptr, TP, EllipsisLoc);
+}
+
+static QualType InventTemplateParameter(
+ TypeProcessingState &state, QualType T, TypeSourceInfo *TSI, AutoType *Auto,
+ InventedTemplateParameterInfo &Info) {
+ Sema &S = state.getSema();
+ Declarator &D = state.getDeclarator();
+
+ const unsigned TemplateParameterDepth = Info.AutoTemplateParameterDepth;
+ const unsigned AutoParameterPosition = Info.TemplateParams.size();
+ const bool IsParameterPack = D.hasEllipsis();
+
+ // If auto is mentioned in a lambda parameter or abbreviated function
+ // template context, convert it to a template parameter type.
+
+ // Create the TemplateTypeParmDecl here to retrieve the corresponding
+ // template parameter type. Template parameters are temporarily added
+ // to the TU until the associated TemplateDecl is created.
+ TemplateTypeParmDecl *InventedTemplateParam =
+ TemplateTypeParmDecl::Create(
+ S.Context, S.Context.getTranslationUnitDecl(),
+ /*KeyLoc=*/D.getDeclSpec().getTypeSpecTypeLoc(),
+ /*NameLoc=*/D.getIdentifierLoc(),
+ TemplateParameterDepth, AutoParameterPosition,
+ S.InventAbbreviatedTemplateParameterTypeName(
+ D.getIdentifier(), AutoParameterPosition), false,
+ IsParameterPack, /*HasTypeConstraint=*/Auto->isConstrained());
+ InventedTemplateParam->setImplicit();
+ Info.TemplateParams.push_back(InventedTemplateParam);
+ // Attach type constraints
+ if (Auto->isConstrained()) {
+ if (TSI) {
+ CopyTypeConstraintFromAutoType(
+ S, Auto, TSI->getTypeLoc().getContainedAutoTypeLoc(),
+ InventedTemplateParam, D.getEllipsisLoc());
+ } else {
+ TemplateIdAnnotation *TemplateId = D.getDeclSpec().getRepAsTemplateId();
+ TemplateArgumentListInfo TemplateArgsInfo;
+ if (TemplateId->LAngleLoc.isValid()) {
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ S.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo);
+ }
+ S.AttachTypeConstraint(
+ D.getDeclSpec().getTypeSpecScope().getWithLocInContext(S.Context),
+ DeclarationNameInfo(DeclarationName(TemplateId->Name),
+ TemplateId->TemplateNameLoc),
+ cast<ConceptDecl>(TemplateId->Template.get().getAsTemplateDecl()),
+ TemplateId->LAngleLoc.isValid() ? &TemplateArgsInfo : nullptr,
+ InventedTemplateParam, D.getEllipsisLoc());
+ }
+ }
+
+ // If TSI is nullptr, this is a constrained declspec auto and the type
+ // constraint will be attached later in TypeSpecLocFiller
+
+ // Replace the 'auto' in the function parameter with this invented
+ // template type parameter.
+ // FIXME: Retain some type sugar to indicate that this was written
+ // as 'auto'?
+ return state.ReplaceAutoType(
+ T, QualType(InventedTemplateParam->getTypeForDecl(), 0));
+}
+
+static TypeSourceInfo *
+GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
+ QualType T, TypeSourceInfo *ReturnTypeInfo);
+
static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
TypeSourceInfo *&ReturnTypeInfo) {
Sema &SemaRef = state.getSema();
@@ -2991,54 +3252,58 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
break;
case DeclaratorContext::ObjCParameterContext:
case DeclaratorContext::ObjCResultContext:
- case DeclaratorContext::PrototypeContext:
Error = 0;
break;
- case DeclaratorContext::LambdaExprParameterContext:
- // In C++14, generic lambdas allow 'auto' in their parameters.
- if (!SemaRef.getLangOpts().CPlusPlus14 ||
- !Auto || Auto->getKeyword() != AutoTypeKeyword::Auto)
- Error = 16;
- else {
- // If auto is mentioned in a lambda parameter context, convert it to a
- // template parameter type.
- sema::LambdaScopeInfo *LSI = SemaRef.getCurLambda();
- assert(LSI && "No LambdaScopeInfo on the stack!");
- const unsigned TemplateParameterDepth = LSI->AutoTemplateParameterDepth;
- const unsigned AutoParameterPosition = LSI->TemplateParams.size();
- const bool IsParameterPack = D.hasEllipsis();
-
- // Create the TemplateTypeParmDecl here to retrieve the corresponding
- // template parameter type. Template parameters are temporarily added
- // to the TU until the associated TemplateDecl is created.
- TemplateTypeParmDecl *CorrespondingTemplateParam =
- TemplateTypeParmDecl::Create(
- SemaRef.Context, SemaRef.Context.getTranslationUnitDecl(),
- /*KeyLoc*/ SourceLocation(), /*NameLoc*/ D.getBeginLoc(),
- TemplateParameterDepth, AutoParameterPosition,
- /*Identifier*/ nullptr, false, IsParameterPack,
- /*HasTypeConstraint=*/false);
- CorrespondingTemplateParam->setImplicit();
- LSI->TemplateParams.push_back(CorrespondingTemplateParam);
- // Replace the 'auto' in the function parameter with this invented
- // template type parameter.
- // FIXME: Retain some type sugar to indicate that this was written
- // as 'auto'.
- T = state.ReplaceAutoType(
- T, QualType(CorrespondingTemplateParam->getTypeForDecl(), 0));
+ case DeclaratorContext::RequiresExprContext:
+ Error = 22;
+ break;
+ case DeclaratorContext::PrototypeContext:
+ case DeclaratorContext::LambdaExprParameterContext: {
+ InventedTemplateParameterInfo *Info = nullptr;
+ if (D.getContext() == DeclaratorContext::PrototypeContext) {
+ // With concepts we allow 'auto' in function parameters.
+ if (!SemaRef.getLangOpts().CPlusPlus20 || !Auto ||
+ Auto->getKeyword() != AutoTypeKeyword::Auto) {
+ Error = 0;
+ break;
+ } else if (!SemaRef.getCurScope()->isFunctionDeclarationScope()) {
+ Error = 21;
+ break;
+ } else if (D.hasTrailingReturnType()) {
+ // This might be OK, but we'll need to convert the trailing return
+ // type later.
+ break;
+ }
+
+ Info = &SemaRef.InventedParameterInfos.back();
+ } else {
+ // In C++14, generic lambdas allow 'auto' in their parameters.
+ if (!SemaRef.getLangOpts().CPlusPlus14 || !Auto ||
+ Auto->getKeyword() != AutoTypeKeyword::Auto) {
+ Error = 16;
+ break;
+ }
+ Info = SemaRef.getCurLambda();
+ assert(Info && "No LambdaScopeInfo on the stack!");
}
+ T = InventTemplateParameter(state, T, nullptr, Auto, *Info);
break;
+ }
case DeclaratorContext::MemberContext: {
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
D.isFunctionDeclarator())
break;
bool Cxx = SemaRef.getLangOpts().CPlusPlus;
- switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
- case TTK_Enum: llvm_unreachable("unhandled tag kind");
- case TTK_Struct: Error = Cxx ? 1 : 2; /* Struct member */ break;
- case TTK_Union: Error = Cxx ? 3 : 4; /* Union member */ break;
- case TTK_Class: Error = 5; /* Class member */ break;
- case TTK_Interface: Error = 6; /* Interface member */ break;
+ if (isa<ObjCContainerDecl>(SemaRef.CurContext)) {
+ Error = 6; // Interface member.
+ } else {
+ switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
+ case TTK_Enum: llvm_unreachable("unhandled tag kind");
+ case TTK_Struct: Error = Cxx ? 1 : 2; /* Struct member */ break;
+ case TTK_Union: Error = Cxx ? 3 : 4; /* Union member */ break;
+ case TTK_Class: Error = 5; /* Class member */ break;
+ case TTK_Interface: Error = 6; /* Interface member */ break;
+ }
}
if (D.getDeclSpec().isFriendSpecified())
Error = 20; // Friend type
@@ -3221,6 +3486,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::ObjCParameterContext:
case DeclaratorContext::ObjCResultContext:
case DeclaratorContext::KNRTypeListContext:
+ case DeclaratorContext::RequiresExprContext:
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
DiagID = diag::err_type_defined_in_param_type;
@@ -4028,10 +4294,6 @@ static bool DiagnoseMultipleAddrSpaceAttributes(Sema &S, LangAS ASOld,
return false;
}
-static TypeSourceInfo *
-GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
- QualType T, TypeSourceInfo *ReturnTypeInfo);
-
static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
QualType declSpecType,
TypeSourceInfo *TInfo) {
@@ -4279,6 +4541,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::TemplateTypeArgContext:
case DeclaratorContext::TypeNameContext:
case DeclaratorContext::FunctionalCastContext:
+ case DeclaratorContext::RequiresExprContext:
// Don't infer in these contexts.
break;
}
@@ -4606,7 +4869,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
} else if (D.getContext() != DeclaratorContext::LambdaExprContext &&
(T.hasQualifiers() || !isa<AutoType>(T) ||
cast<AutoType>(T)->getKeyword() !=
- AutoTypeKeyword::Auto)) {
+ AutoTypeKeyword::Auto ||
+ cast<AutoType>(T)->isConstrained())) {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::err_trailing_return_without_auto)
<< T << D.getDeclSpec().getSourceRange();
@@ -4617,7 +4881,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// An error occurred parsing the trailing return type.
T = Context.IntTy;
D.setInvalidType(true);
- }
+ } else if (S.getLangOpts().CPlusPlus20)
+ // Handle cases like: `auto f() -> auto` or `auto f() -> C auto`.
+ if (AutoType *Auto = T->getContainedAutoType())
+ if (S.getCurScope()->isFunctionDeclarationScope())
+ T = InventTemplateParameter(state, T, TInfo, Auto,
+ S.InventedParameterInfos.back());
} else {
// This function type is not the type of the entity being declared,
// so checking the 'auto' is not the responsibility of this chunk.
@@ -4721,7 +4990,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C++2a [dcl.fct]p12:
// A volatile-qualified return type is deprecated
- if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus2a)
+ if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus20)
S.Diag(DeclType.Loc, diag::warn_deprecated_volatile_return) << T;
}
@@ -4863,8 +5132,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// FIXME: This really should be in BuildFunctionType.
if (S.getLangOpts().OpenCL) {
if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
- S.Diag(Param->getLocation(),
- diag::err_opencl_half_param) << ParamTy;
+ S.Diag(Param->getLocation(), diag::err_opencl_invalid_param)
+ << ParamTy << 0;
D.setInvalidType();
Param->setInvalidDecl();
}
@@ -4883,6 +5152,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
Param->setKNRPromoted(true);
}
}
+ } else if (S.getLangOpts().OpenCL && ParamTy->isBlockPointerType()) {
+ // OpenCL 2.0 s6.12.5: A block cannot be a parameter of a function.
+ S.Diag(Param->getLocation(), diag::err_opencl_invalid_param)
+ << ParamTy << 1 /*hint off*/;
+ D.setInvalidType();
}
if (LangOpts.ObjCAutoRefCount && Param->hasAttr<NSConsumedAttr>()) {
@@ -5212,7 +5486,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C++2a [dcl.fct]p4:
// A parameter with volatile-qualified type is deprecated
- if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus2a &&
+ if (T.isVolatileQualified() && S.getLangOpts().CPlusPlus20 &&
(D.getContext() == DeclaratorContext::PrototypeContext ||
D.getContext() == DeclaratorContext::LambdaExprParameterContext))
S.Diag(D.getIdentifierLoc(), diag::warn_deprecated_volatile_param) << T;
@@ -5227,6 +5501,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
switch (D.getContext()) {
case DeclaratorContext::PrototypeContext:
case DeclaratorContext::LambdaExprParameterContext:
+ case DeclaratorContext::RequiresExprContext:
// C++0x [dcl.fct]p13:
// [...] When it is part of a parameter-declaration-clause, the
// parameter pack is a function parameter pack (14.5.3). The type T
@@ -5236,7 +5511,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
//
// We represent function parameter packs as function parameters whose
// type is a pack expansion.
- if (!T->containsUnexpandedParameterPack()) {
+ if (!T->containsUnexpandedParameterPack() &&
+ (!LangOpts.CPlusPlus20 || !T->getContainedAutoType())) {
S.Diag(D.getEllipsisLoc(),
diag::err_function_parameter_pack_without_parameter_packs)
<< T << D.getSourceRange();
@@ -5444,14 +5720,15 @@ static void fillAttributedTypeLoc(AttributedTypeLoc TL,
namespace {
class TypeSpecLocFiller : public TypeLocVisitor<TypeSpecLocFiller> {
+ Sema &SemaRef;
ASTContext &Context;
TypeProcessingState &State;
const DeclSpec &DS;
public:
- TypeSpecLocFiller(ASTContext &Context, TypeProcessingState &State,
+ TypeSpecLocFiller(Sema &S, ASTContext &Context, TypeProcessingState &State,
const DeclSpec &DS)
- : Context(Context), State(State), DS(DS) {}
+ : SemaRef(S), Context(Context), State(State), DS(DS) {}
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
Visit(TL.getModifiedLoc());
@@ -5579,6 +5856,34 @@ namespace {
TL.copy(
TInfo->getTypeLoc().castAs<DependentTemplateSpecializationTypeLoc>());
}
+ void VisitAutoTypeLoc(AutoTypeLoc TL) {
+ assert(DS.getTypeSpecType() == TST_auto ||
+ DS.getTypeSpecType() == TST_decltype_auto ||
+ DS.getTypeSpecType() == TST_auto_type ||
+ DS.getTypeSpecType() == TST_unspecified);
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ if (!DS.isConstrainedAuto())
+ return;
+ TemplateIdAnnotation *TemplateId = DS.getRepAsTemplateId();
+ if (DS.getTypeSpecScope().isNotEmpty())
+ TL.setNestedNameSpecifierLoc(
+ DS.getTypeSpecScope().getWithLocInContext(Context));
+ else
+ TL.setNestedNameSpecifierLoc(NestedNameSpecifierLoc());
+ TL.setTemplateKWLoc(TemplateId->TemplateKWLoc);
+ TL.setConceptNameLoc(TemplateId->TemplateNameLoc);
+ TL.setFoundDecl(nullptr);
+ TL.setLAngleLoc(TemplateId->LAngleLoc);
+ TL.setRAngleLoc(TemplateId->RAngleLoc);
+ if (TemplateId->NumArgs == 0)
+ return;
+ TemplateArgumentListInfo TemplateArgsInfo;
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ SemaRef.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo);
+ for (unsigned I = 0; I < TemplateId->NumArgs; ++I)
+ TL.setArgLocInfo(I, TemplateArgsInfo.arguments()[I].getLocInfo());
+ }
void VisitTagTypeLoc(TagTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
}
@@ -5609,6 +5914,14 @@ namespace {
TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc());
}
+ void VisitExtIntTypeLoc(ExtIntTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ }
+
+ void VisitDependentExtIntTypeLoc(DependentExtIntTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ }
+
void VisitTypeLoc(TypeLoc TL) {
// FIXME: add other typespec types and change this to an assert.
TL.initialize(Context, DS.getTypeSpecTypeLoc());
@@ -5692,7 +6005,7 @@ namespace {
}
// Finally fill in MemberPointerLocInfo fields.
- TL.setStarLoc(Chunk.Loc);
+ TL.setStarLoc(SourceLocation::getFromRawEncoding(Chunk.Mem.StarLoc));
TL.setClassTInfo(ClsTInfo);
}
void VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
@@ -5735,6 +6048,9 @@ namespace {
assert(Chunk.Kind == DeclaratorChunk::Pipe);
TL.setKWLoc(Chunk.Loc);
}
+ void VisitExtIntTypeLoc(ExtIntTypeLoc TL) {
+ TL.setNameLoc(Chunk.Loc);
+ }
void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
TL.setExpansionLoc(Chunk.Loc);
}
@@ -5785,6 +6101,21 @@ fillDependentAddressSpaceTypeLoc(DependentAddressSpaceTypeLoc DASTL,
"no address_space attribute found at the expected location!");
}
+static void fillMatrixTypeLoc(MatrixTypeLoc MTL,
+ const ParsedAttributesView &Attrs) {
+ for (const ParsedAttr &AL : Attrs) {
+ if (AL.getKind() == ParsedAttr::AT_MatrixType) {
+ MTL.setAttrNameLoc(AL.getLoc());
+ MTL.setAttrRowOperand(AL.getArgAsExpr(0));
+ MTL.setAttrColumnOperand(AL.getArgAsExpr(1));
+ MTL.setAttrOperandParensRange(SourceRange());
+ return;
+ }
+ }
+
+ llvm_unreachable("no matrix_type attribute found at the expected location!");
+}
+
/// Create and instantiate a TypeSourceInfo with type source information.
///
/// \param T QualType referring to the type as written in source code.
@@ -5833,6 +6164,9 @@ GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
CurrTL = TL.getPointeeTypeLoc().getUnqualifiedLoc();
}
+ if (MatrixTypeLoc TL = CurrTL.getAs<MatrixTypeLoc>())
+ fillMatrixTypeLoc(TL, D.getTypeObject(i).getAttrs());
+
// FIXME: Ordering here?
while (AdjustedTypeLoc TL = CurrTL.getAs<AdjustedTypeLoc>())
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
@@ -5848,7 +6182,7 @@ GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
assert(TL.getFullDataSize() == CurrTL.getFullDataSize());
memcpy(CurrTL.getOpaqueData(), TL.getOpaqueData(), TL.getFullDataSize());
} else {
- TypeSpecLocFiller(S.Context, State, D.getDeclSpec()).Visit(CurrTL);
+ TypeSpecLocFiller(S, S.Context, State, D.getDeclSpec()).Visit(CurrTL);
}
return TInfo;
@@ -6349,6 +6683,7 @@ namespace {
Desugar,
Attributed,
Parens,
+ Array,
Pointer,
BlockPointer,
Reference,
@@ -6369,6 +6704,10 @@ namespace {
} else if (isa<ParenType>(Ty)) {
T = cast<ParenType>(Ty)->getInnerType();
Stack.push_back(Parens);
+ } else if (isa<ConstantArrayType>(Ty) || isa<VariableArrayType>(Ty) ||
+ isa<IncompleteArrayType>(Ty)) {
+ T = cast<ArrayType>(Ty)->getElementType();
+ Stack.push_back(Array);
} else if (isa<PointerType>(Ty)) {
T = cast<PointerType>(Ty)->getPointeeType();
Stack.push_back(Pointer);
@@ -6446,6 +6785,27 @@ namespace {
case MacroQualified:
return wrap(C, cast<MacroQualifiedType>(Old)->getUnderlyingType(), I);
+ case Array: {
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(Old)) {
+ QualType New = wrap(C, CAT->getElementType(), I);
+ return C.getConstantArrayType(New, CAT->getSize(), CAT->getSizeExpr(),
+ CAT->getSizeModifier(),
+ CAT->getIndexTypeCVRQualifiers());
+ }
+
+ if (const auto *VAT = dyn_cast<VariableArrayType>(Old)) {
+ QualType New = wrap(C, VAT->getElementType(), I);
+ return C.getVariableArrayType(
+ New, VAT->getSizeExpr(), VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(), VAT->getBracketsRange());
+ }
+
+ const auto *IAT = cast<IncompleteArrayType>(Old);
+ QualType New = wrap(C, IAT->getElementType(), I);
+ return C.getIncompleteArrayType(New, IAT->getSizeModifier(),
+ IAT->getIndexTypeCVRQualifiers());
+ }
+
case Pointer: {
QualType New = wrap(C, cast<PointerType>(Old)->getPointeeType(), I);
return C.getPointerType(New);
@@ -6673,15 +7033,15 @@ static bool checkNullabilityTypeSpecifier(TypeProcessingState &state,
// attributes, require that the type be a single-level pointer.
if (isContextSensitive) {
// Make sure that the pointee isn't itself a pointer type.
- const Type *pointeeType;
+ const Type *pointeeType = nullptr;
if (desugared->isArrayType())
pointeeType = desugared->getArrayElementTypeNoTypeQual();
- else
+ else if (desugared->isAnyPointerType())
pointeeType = desugared->getPointeeType().getTypePtr();
- if (pointeeType->isAnyPointerType() ||
- pointeeType->isObjCObjectPointerType() ||
- pointeeType->isMemberPointerType()) {
+ if (pointeeType && (pointeeType->isAnyPointerType() ||
+ pointeeType->isObjCObjectPointerType() ||
+ pointeeType->isMemberPointerType())) {
S.Diag(nullabilityLoc, diag::err_nullability_cs_multilevel)
<< DiagNullabilityKind(nullability, true)
<< type;
@@ -6914,6 +7274,25 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
return true;
}
+ if (attr.getKind() == ParsedAttr::AT_CmseNSCall) {
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ // Ignore if we don't have CMSE enabled.
+ if (!S.getLangOpts().Cmse) {
+ S.Diag(attr.getLoc(), diag::warn_attribute_ignored) << attr;
+ attr.setInvalid();
+ return true;
+ }
+
+ // Otherwise we can process right away.
+ FunctionType::ExtInfo EI =
+ unwrapped.get()->getExtInfo().withCmseNSCall(true);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
// ns_returns_retained is not always a type attribute, but if we got
// here, we're treating it as one right now.
if (attr.getKind() == ParsedAttr::AT_NSReturnsRetained) {
@@ -7273,15 +7652,16 @@ static bool isPermittedNeonBaseType(QualType &Ty,
Triple.getArch() == llvm::Triple::aarch64_be;
if (VecKind == VectorType::NeonPolyVector) {
if (IsPolyUnsigned) {
- // AArch64 polynomial vectors are unsigned and support poly64.
+ // AArch64 polynomial vectors are unsigned.
return BTy->getKind() == BuiltinType::UChar ||
BTy->getKind() == BuiltinType::UShort ||
BTy->getKind() == BuiltinType::ULong ||
BTy->getKind() == BuiltinType::ULongLong;
} else {
- // AArch32 polynomial vector are signed.
+ // AArch32 polynomial vectors are signed.
return BTy->getKind() == BuiltinType::SChar ||
- BTy->getKind() == BuiltinType::Short;
+ BTy->getKind() == BuiltinType::Short ||
+ BTy->getKind() == BuiltinType::LongLong;
}
}
@@ -7302,7 +7682,8 @@ static bool isPermittedNeonBaseType(QualType &Ty,
BTy->getKind() == BuiltinType::LongLong ||
BTy->getKind() == BuiltinType::ULongLong ||
BTy->getKind() == BuiltinType::Float ||
- BTy->getKind() == BuiltinType::Half;
+ BTy->getKind() == BuiltinType::Half ||
+ BTy->getKind() == BuiltinType::BFloat16;
}
/// HandleNeonVectorTypeAttr - The "neon_vector_type" and
@@ -7360,6 +7741,23 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
CurType = S.Context.getVectorType(CurType, numElts, VecKind);
}
+static void HandleArmMveStrictPolymorphismAttr(TypeProcessingState &State,
+ QualType &CurType,
+ ParsedAttr &Attr) {
+ const VectorType *VT = dyn_cast<VectorType>(CurType);
+ if (!VT || VT->getVectorKind() != VectorType::NeonVector) {
+ State.getSema().Diag(Attr.getLoc(),
+ diag::err_attribute_arm_mve_polymorphism);
+ Attr.setInvalid();
+ return;
+ }
+
+ CurType =
+ State.getAttributedType(createSimpleAttr<ArmMveStrictPolymorphismAttr>(
+ State.getSema().Context, Attr),
+ CurType, CurType);
+}
+
/// Handle OpenCL Access Qualifier Attribute.
static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
@@ -7416,6 +7814,68 @@ static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
}
}
+/// HandleMatrixTypeAttr - "matrix_type" attribute, like ext_vector_type
+static void HandleMatrixTypeAttr(QualType &CurType, const ParsedAttr &Attr,
+ Sema &S) {
+ if (!S.getLangOpts().MatrixTypes) {
+ S.Diag(Attr.getLoc(), diag::err_builtin_matrix_disabled);
+ return;
+ }
+
+ if (Attr.getNumArgs() != 2) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr << 2;
+ return;
+ }
+
+ Expr *RowsExpr = nullptr;
+ Expr *ColsExpr = nullptr;
+
+ // TODO: Refactor parameter extraction into separate function
+ // Get the number of rows
+ if (Attr.isArgIdent(0)) {
+ CXXScopeSpec SS;
+ SourceLocation TemplateKeywordLoc;
+ UnqualifiedId id;
+ id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
+ ExprResult Rows = S.ActOnIdExpression(S.getCurScope(), SS,
+ TemplateKeywordLoc, id, false, false);
+
+ if (Rows.isInvalid())
+ // TODO: maybe a good error message would be nice here
+ return;
+ RowsExpr = Rows.get();
+ } else {
+ assert(Attr.isArgExpr(0) &&
+ "Argument to should either be an identity or expression");
+ RowsExpr = Attr.getArgAsExpr(0);
+ }
+
+ // Get the number of columns
+ if (Attr.isArgIdent(1)) {
+ CXXScopeSpec SS;
+ SourceLocation TemplateKeywordLoc;
+ UnqualifiedId id;
+ id.setIdentifier(Attr.getArgAsIdent(1)->Ident, Attr.getLoc());
+ ExprResult Columns = S.ActOnIdExpression(
+ S.getCurScope(), SS, TemplateKeywordLoc, id, false, false);
+
+ if (Columns.isInvalid())
+ // TODO: a good error message would be nice here
+ return;
+ RowsExpr = Columns.get();
+ } else {
+ assert(Attr.isArgExpr(1) &&
+ "Argument to should either be an identity or expression");
+ ColsExpr = Attr.getArgAsExpr(1);
+ }
+
+ // Create the matrix type.
+ QualType T = S.BuildMatrixType(CurType, RowsExpr, ColsExpr, Attr.getLoc());
+ if (!T.isNull())
+ CurType = T;
+}
+
static void HandleLifetimeBoundAttr(TypeProcessingState &State,
QualType &CurType,
ParsedAttr &Attr) {
@@ -7544,6 +8004,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
VectorType::NeonPolyVector);
attr.setUsedAsTypeAttr();
break;
+ case ParsedAttr::AT_ArmMveStrictPolymorphism: {
+ HandleArmMveStrictPolymorphismAttr(state, type, attr);
+ attr.setUsedAsTypeAttr();
+ break;
+ }
case ParsedAttr::AT_OpenCLAccess:
HandleOpenCLAccessAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
@@ -7562,6 +8027,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
break;
}
+ case ParsedAttr::AT_MatrixType:
+ HandleMatrixTypeAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
+
MS_TYPE_ATTRS_CASELIST:
if (!handleMSPointerTypeQualifierAttr(state, attr, type))
attr.setUsedAsTypeAttr();
@@ -7638,6 +8108,15 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
case ParsedAttr::AT_AcquireHandle: {
if (!type->isFunctionType())
return;
+
+ if (attr.getNumArgs() != 1) {
+ state.getSema().Diag(attr.getLoc(),
+ diag::err_attribute_wrong_number_arguments)
+ << attr << 1;
+ attr.setInvalid();
+ return;
+ }
+
StringRef HandleType;
if (!state.getSema().checkStringLiteralArgumentAttr(attr, 0, HandleType))
return;
@@ -7722,12 +8201,14 @@ void Sema::completeExprArrayBound(Expr *E) {
/// case of a reference type, the referred-to type).
///
/// \param E The expression whose type is required to be complete.
+/// \param Kind Selects which completeness rules should be applied.
/// \param Diagnoser The object that will emit a diagnostic if the type is
/// incomplete.
///
/// \returns \c true if the type of \p E is incomplete and diagnosed, \c false
/// otherwise.
-bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser) {
+bool Sema::RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
+ TypeDiagnoser &Diagnoser) {
QualType T = E->getType();
// Incomplete array types may be completed by the initializer attached to
@@ -7742,12 +8223,12 @@ bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser) {
// FIXME: Are there other cases which require instantiating something other
// than the type to complete the type of an expression?
- return RequireCompleteType(E->getExprLoc(), T, Diagnoser);
+ return RequireCompleteType(E->getExprLoc(), T, Kind, Diagnoser);
}
bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
BoundTypeDiagnoser<> Diagnoser(DiagID);
- return RequireCompleteExprType(E, Diagnoser);
+ return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
/// Ensure that the type T is a complete type.
@@ -7765,11 +8246,14 @@ bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
///
/// @param T The type that this routine is examining for completeness.
///
+/// @param Kind Selects which completeness rules should be applied.
+///
/// @returns @c true if @p T is incomplete and a diagnostic was emitted,
/// @c false otherwise.
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
+ CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser) {
- if (RequireCompleteTypeImpl(Loc, T, &Diagnoser))
+ if (RequireCompleteTypeImpl(Loc, T, Kind, &Diagnoser))
return true;
if (const TagType *Tag = T->getAs<TagType>()) {
if (!Tag->getDecl()->isCompleteDefinitionRequired()) {
@@ -7823,10 +8307,12 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
} else if (auto *ED = dyn_cast<EnumDecl>(D)) {
if (auto *Pattern = ED->getTemplateInstantiationPattern())
ED = Pattern;
- if (OnlyNeedComplete && ED->isFixed()) {
- // If the enum has a fixed underlying type, and we're only looking for a
- // complete type (not a definition), any visible declaration of it will
- // do.
+ if (OnlyNeedComplete && (ED->isFixed() || getLangOpts().MSVCCompat)) {
+ // If the enum has a fixed underlying type, it may have been forward
+ // declared. In -fms-compatibility, `enum Foo;` will also forward declare
+ // the enum and assign it the underlying type of `int`. Since we're only
+ // looking for a complete type (not a definition), any visible declaration
+ // of it will do.
*Suggested = nullptr;
for (auto *Redecl : ED->redecls()) {
if (isVisible(Redecl))
@@ -7918,6 +8404,7 @@ static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
/// The implementation of RequireCompleteType
bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
+ CompleteTypeKind Kind,
TypeDiagnoser *Diagnoser) {
// FIXME: Add this assertion to make sure we always get instantiation points.
// assert(!Loc.isInvalid() && "Invalid location in RequireCompleteType");
@@ -7931,7 +8418,7 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
if (!MPTy->getClass()->isDependentType()) {
if (getLangOpts().CompleteMemberPointers &&
!MPTy->getClass()->getAsCXXRecordDecl()->isBeingDefined() &&
- RequireCompleteType(Loc, QualType(MPTy->getClass(), 0),
+ RequireCompleteType(Loc, QualType(MPTy->getClass(), 0), Kind,
diag::err_memptr_incomplete))
return true;
@@ -7945,7 +8432,9 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
}
NamedDecl *Def = nullptr;
- bool Incomplete = T->isIncompleteType(&Def);
+ bool AcceptSizeless = (Kind == CompleteTypeKind::AcceptSizeless);
+ bool Incomplete = (T->isIncompleteType(&Def) ||
+ (!AcceptSizeless && T->isSizelessBuiltinType()));
// Check that any necessary explicit specializations are visible. For an
// enum, we just need the declaration, so don't check this.
@@ -7999,7 +8488,7 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// If the external source completed the type, go through the motions
// again to ensure we're allowed to use the completed type.
if (!T->isIncompleteType())
- return RequireCompleteTypeImpl(Loc, T, Diagnoser);
+ return RequireCompleteTypeImpl(Loc, T, Kind, Diagnoser);
}
}
@@ -8051,7 +8540,7 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// instantiation produced an error, so that repeated calls to this
// function give consistent answers.
if (!T->isIncompleteType())
- return RequireCompleteTypeImpl(Loc, T, Diagnoser);
+ return RequireCompleteTypeImpl(Loc, T, Kind, Diagnoser);
}
}
@@ -8065,14 +8554,14 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// If the type was a forward declaration of a class/struct/union
// type, produce a note.
- if (Tag && !Tag->isInvalidDecl())
+ if (Tag && !Tag->isInvalidDecl() && !Tag->getLocation().isInvalid())
Diag(Tag->getLocation(),
Tag->isBeingDefined() ? diag::note_type_being_defined
: diag::note_forward_declaration)
<< Context.getTagDeclType(Tag);
// If the Objective-C class was a forward declaration, produce a note.
- if (IFace && !IFace->isInvalidDecl())
+ if (IFace && !IFace->isInvalidDecl() && !IFace->getLocation().isInvalid())
Diag(IFace->getLocation(), diag::note_forward_class);
// If we have external information that we can use to suggest a fix,
@@ -8084,9 +8573,9 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
}
bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
- unsigned DiagID) {
+ CompleteTypeKind Kind, unsigned DiagID) {
BoundTypeDiagnoser<> Diagnoser(DiagID);
- return RequireCompleteType(Loc, T, Diagnoser);
+ return RequireCompleteType(Loc, T, Kind, Diagnoser);
}
/// Get diagnostic %select index for tag kind for
@@ -8184,7 +8673,7 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
return true;
}
}
- } else if (getLangOpts().CPlusPlus2a ? !RD->hasConstexprDestructor()
+ } else if (getLangOpts().CPlusPlus20 ? !RD->hasConstexprDestructor()
: !RD->hasTrivialDestructor()) {
// All fields and bases are of literal types, so have trivial or constexpr
// destructors. If this class's destructor is non-trivial / non-constexpr,
@@ -8194,7 +8683,7 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
if (!Dtor)
return true;
- if (getLangOpts().CPlusPlus2a) {
+ if (getLangOpts().CPlusPlus20) {
Diag(Dtor->getLocation(), diag::note_non_literal_non_constexpr_dtor)
<< RD;
} else {
@@ -8386,9 +8875,17 @@ QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
DisallowedKind = 4;
else if (T.hasQualifiers())
DisallowedKind = 5;
+ else if (T->isSizelessType())
+ DisallowedKind = 6;
else if (!T.isTriviallyCopyableType(Context))
// Some other non-trivially-copyable type (probably a C++ class)
- DisallowedKind = 6;
+ DisallowedKind = 7;
+ else if (auto *ExtTy = T->getAs<ExtIntType>()) {
+ if (ExtTy->getNumBits() < 8)
+ DisallowedKind = 8;
+ else if (!llvm::isPowerOf2_32(ExtTy->getNumBits()))
+ DisallowedKind = 9;
+ }
if (DisallowedKind != -1) {
Diag(Loc, diag::err_atomic_specifier_bad_type) << DisallowedKind << T;
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 3b827fbc950b..ae0e9f1119b4 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -19,6 +19,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
@@ -27,6 +28,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
+#include "clang/Basic/OpenMPKinds.h"
#include "clang/Sema/Designator.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Ownership.h"
@@ -156,6 +158,13 @@ public:
/// existing lambdas.
bool ReplacingOriginal() { return false; }
+ /// Wether CXXConstructExpr can be skipped when they are implicit.
+ /// They will be reconstructed when used if needed.
+ /// This is usefull when the user that cause rebuilding of the
+ /// CXXConstructExpr is outside of the expression at which the TreeTransform
+ /// started.
+ bool AllowSkippingCXXConstructExpr() { return true; }
+
/// Returns the location of the entity being transformed, if that
/// information was not available elsewhere in the AST.
///
@@ -211,6 +220,14 @@ public:
return T.isNull();
}
+ /// Transform a template parameter depth level.
+ ///
+ /// During a transformation that transforms template parameters, this maps
+ /// an old template parameter depth to a new depth.
+ unsigned TransformTemplateDepth(unsigned Depth) {
+ return Depth;
+ }
+
/// Determine whether the given call argument should be dropped, e.g.,
/// because it is a default argument.
///
@@ -509,6 +526,15 @@ public:
DeclarationNameInfo
TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo);
+ bool TransformRequiresExprRequirements(ArrayRef<concepts::Requirement *> Reqs,
+ llvm::SmallVectorImpl<concepts::Requirement *> &Transformed);
+ concepts::TypeRequirement *
+ TransformTypeRequirement(concepts::TypeRequirement *Req);
+ concepts::ExprRequirement *
+ TransformExprRequirement(concepts::ExprRequirement *Req);
+ concepts::NestedRequirement *
+ TransformNestedRequirement(concepts::NestedRequirement *Req);
+
/// Transform the given template name.
///
/// \param SS The nested-name-specifier that qualifies the template
@@ -704,10 +730,10 @@ public:
#define ABSTRACT_STMT(Stmt)
#include "clang/AST/StmtNodes.inc"
-#define OPENMP_CLAUSE(Name, Class) \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
LLVM_ATTRIBUTE_NOINLINE \
OMPClause *Transform ## Class(Class *S);
-#include "clang/Basic/OpenMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
/// Build a new qualified type given its unqualified type and type location.
///
@@ -868,6 +894,16 @@ public:
Expr *SizeExpr,
SourceLocation AttributeLoc);
+ /// Build a new matrix type given the element type and dimensions.
+ QualType RebuildConstantMatrixType(QualType ElementType, unsigned NumRows,
+ unsigned NumColumns);
+
+ /// Build a new matrix type given the type and dependently-defined
+ /// dimensions.
+ QualType RebuildDependentSizedMatrixType(QualType ElementType, Expr *RowExpr,
+ Expr *ColumnExpr,
+ SourceLocation AttributeLoc);
+
/// Build a new DependentAddressSpaceType or return the pointee
/// type variable with the correct address space (retrieved from
/// AddrSpaceExpr) applied to it. The former will be returned in cases
@@ -941,12 +977,16 @@ public:
/// Build a new C++11 auto type.
///
/// By default, builds a new AutoType with the given deduced type.
- QualType RebuildAutoType(QualType Deduced, AutoTypeKeyword Keyword) {
+ QualType RebuildAutoType(QualType Deduced, AutoTypeKeyword Keyword,
+ ConceptDecl *TypeConstraintConcept,
+ ArrayRef<TemplateArgument> TypeConstraintArgs) {
// Note, IsDependent is always false here: we implicitly convert an 'auto'
// which has been deduced to a dependent type into an undeduced 'auto', so
// that we'll retry deduction after the transformation.
return SemaRef.Context.getAutoType(Deduced, Keyword,
- /*IsDependent*/ false);
+ /*IsDependent*/ false, /*IsPack=*/false,
+ TypeConstraintConcept,
+ TypeConstraintArgs);
}
/// By default, builds a new DeducedTemplateSpecializationType with the given
@@ -1056,23 +1096,8 @@ public:
}
if (Keyword == ETK_None || Keyword == ETK_Typename) {
- QualType T = SemaRef.CheckTypenameType(Keyword, KeywordLoc, QualifierLoc,
- *Id, IdLoc);
- // If a dependent name resolves to a deduced template specialization type,
- // check that we're in one of the syntactic contexts permitting it.
- if (!DeducedTSTContext) {
- if (auto *Deduced = dyn_cast_or_null<DeducedTemplateSpecializationType>(
- T.isNull() ? nullptr : T->getContainedDeducedType())) {
- SemaRef.Diag(IdLoc, diag::err_dependent_deduced_tst)
- << (int)SemaRef.getTemplateNameKindForDiagnostics(
- Deduced->getTemplateName())
- << QualType(QualifierLoc.getNestedNameSpecifier()->getAsType(), 0);
- if (auto *TD = Deduced->getTemplateName().getAsTemplateDecl())
- SemaRef.Diag(TD->getLocation(), diag::note_template_decl_here);
- return QualType();
- }
- }
- return T;
+ return SemaRef.CheckTypenameType(Keyword, KeywordLoc, QualifierLoc,
+ *Id, IdLoc, DeducedTSTContext);
}
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
@@ -1168,6 +1193,14 @@ public:
QualType RebuildPipeType(QualType ValueType, SourceLocation KWLoc,
bool isReadPipe);
+ /// Build an extended int given its value type.
+ QualType RebuildExtIntType(bool IsUnsigned, unsigned NumBits,
+ SourceLocation Loc);
+
+ /// Build a dependent extended int given its value type.
+ QualType RebuildDependentExtIntType(bool IsUnsigned, Expr *NumBitsExpr,
+ SourceLocation Loc);
+
/// Build a new template name given a nested name specifier, a flag
/// indicating whether the "template" keyword was provided, and the template
/// that the template name refers to.
@@ -1314,9 +1347,10 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildWhileStmt(SourceLocation WhileLoc,
- Sema::ConditionResult Cond, Stmt *Body) {
- return getSema().ActOnWhileStmt(WhileLoc, Cond, Body);
+ StmtResult RebuildWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
+ Sema::ConditionResult Cond,
+ SourceLocation RParenLoc, Stmt *Body) {
+ return getSema().ActOnWhileStmt(WhileLoc, LParenLoc, Cond, RParenLoc, Body);
}
/// Build a new do-while statement.
@@ -1603,8 +1637,7 @@ public:
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPDefaultClause(OpenMPDefaultClauseKind Kind,
- SourceLocation KindKwLoc,
+ OMPClause *RebuildOMPDefaultClause(DefaultKind Kind, SourceLocation KindKwLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
@@ -1704,17 +1737,16 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPReductionClause(ArrayRef<Expr *> VarList,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation ColonLoc,
- SourceLocation EndLoc,
- CXXScopeSpec &ReductionIdScopeSpec,
- const DeclarationNameInfo &ReductionId,
- ArrayRef<Expr *> UnresolvedReductions) {
+ OMPClause *RebuildOMPReductionClause(
+ ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ModifierLoc, SourceLocation ColonLoc,
+ SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
+ const DeclarationNameInfo &ReductionId,
+ ArrayRef<Expr *> UnresolvedReductions) {
return getSema().ActOnOpenMPReductionClause(
- VarList, StartLoc, LParenLoc, ColonLoc, EndLoc, ReductionIdScopeSpec,
- ReductionId, UnresolvedReductions);
+ VarList, Modifier, StartLoc, LParenLoc, ModifierLoc, ColonLoc, EndLoc,
+ ReductionIdScopeSpec, ReductionId, UnresolvedReductions);
}
/// Build a new OpenMP 'task_reduction' clause.
@@ -1813,28 +1845,42 @@ public:
EndLoc);
}
+ /// Build a new OpenMP 'depobj' pseudo clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDepobjClause(Depobj, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
/// Build a new OpenMP 'depend' pseudo clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
OMPClause *
- RebuildOMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
- SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
- SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDependClause(DepKind, DepLoc, ColonLoc, VarList,
- StartLoc, LParenLoc, EndLoc);
+ RebuildOMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDependClause(DepModifier, DepKind, DepLoc,
+ ColonLoc, VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'device' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPDeviceClause(Expr *Device, SourceLocation StartLoc,
+ OMPClause *RebuildOMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
+ Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation ModifierLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDeviceClause(Device, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().ActOnOpenMPDeviceClause(Modifier, Device, StartLoc,
+ LParenLoc, ModifierLoc, EndLoc);
}
/// Build a new OpenMP 'map' clause.
@@ -1933,6 +1979,16 @@ public:
return getSema().ActOnOpenMPHintClause(Hint, StartLoc, LParenLoc, EndLoc);
}
+ /// Build a new OpenMP 'detach' clause.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPDetachClause(Expr *Evt, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDetachClause(Evt, StartLoc, LParenLoc, EndLoc);
+ }
+
/// Build a new OpenMP 'dist_schedule' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
@@ -1981,6 +2037,15 @@ public:
return getSema().ActOnOpenMPUseDevicePtrClause(VarList, Locs);
}
+ /// Build a new OpenMP 'use_device_addr' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
+ return getSema().ActOnOpenMPUseDeviceAddrClause(VarList, Locs);
+ }
+
/// Build a new OpenMP 'is_device_ptr' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
@@ -2017,6 +2082,67 @@ public:
EndLoc);
}
+ /// Build a new OpenMP 'inclusive' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPInclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPInclusiveClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'exclusive' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPExclusiveClause(ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPExclusiveClause(VarList, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'uses_allocators' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPUsesAllocatorsClause(
+ ArrayRef<Sema::UsesAllocatorsData> Data, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPUsesAllocatorClause(StartLoc, LParenLoc, EndLoc,
+ Data);
+ }
+
+ /// Build a new OpenMP 'affinity' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPAffinityClause(SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation ColonLoc,
+ SourceLocation EndLoc, Expr *Modifier,
+ ArrayRef<Expr *> Locators) {
+ return getSema().ActOnOpenMPAffinityClause(StartLoc, LParenLoc, ColonLoc,
+ EndLoc, Modifier, Locators);
+ }
+
+ /// Build a new OpenMP 'order' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPOrderClause(OpenMPOrderClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPOrderClause(Kind, KindKwLoc, StartLoc,
+ LParenLoc, EndLoc);
+ }
+
/// Rebuild the operand to an Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
@@ -2294,16 +2420,53 @@ public:
RBracketLoc);
}
+ /// Build a new matrix subscript expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
+ Expr *ColumnIdx,
+ SourceLocation RBracketLoc) {
+ return getSema().CreateBuiltinMatrixSubscriptExpr(Base, RowIdx, ColumnIdx,
+ RBracketLoc);
+ }
+
/// Build a new array section expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildOMPArraySectionExpr(Expr *Base, SourceLocation LBracketLoc,
Expr *LowerBound,
- SourceLocation ColonLoc, Expr *Length,
+ SourceLocation ColonLocFirst,
+ SourceLocation ColonLocSecond,
+ Expr *Length, Expr *Stride,
SourceLocation RBracketLoc) {
return getSema().ActOnOMPArraySectionExpr(Base, LBracketLoc, LowerBound,
- ColonLoc, Length, RBracketLoc);
+ ColonLocFirst, ColonLocSecond,
+ Length, Stride, RBracketLoc);
+ }
+
+ /// Build a new array shaping expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ ArrayRef<Expr *> Dims,
+ ArrayRef<SourceRange> BracketsRanges) {
+ return getSema().ActOnOMPArrayShapingExpr(Base, LParenLoc, RParenLoc, Dims,
+ BracketsRanges);
+ }
+
+ /// Build a new iterator expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildOMPIteratorExpr(
+ SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc,
+ ArrayRef<Sema::OMPIteratorData> Data) {
+ return getSema().ActOnOMPIteratorExpr(/*Scope=*/nullptr, IteratorKwLoc,
+ LLoc, RLoc, Data);
}
/// Build a new call expression.
@@ -2314,8 +2477,8 @@ public:
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig = nullptr) {
- return getSema().BuildCallExpr(/*Scope=*/nullptr, Callee, LParenLoc, Args,
- RParenLoc, ExecConfig);
+ return getSema().ActOnCallExpr(
+ /*Scope=*/nullptr, Callee, LParenLoc, Args, RParenLoc, ExecConfig);
}
/// Build a new member access expression.
@@ -2527,10 +2690,10 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildStmtExpr(SourceLocation LParenLoc,
- Stmt *SubStmt,
- SourceLocation RParenLoc) {
- return getSema().ActOnStmtExpr(LParenLoc, SubStmt, RParenLoc);
+ ExprResult RebuildStmtExpr(SourceLocation LParenLoc, Stmt *SubStmt,
+ SourceLocation RParenLoc, unsigned TemplateDepth) {
+ return getSema().BuildStmtExpr(LParenLoc, SubStmt, RParenLoc,
+ TemplateDepth);
}
/// Build a new __builtin_choose_expr expression.
@@ -2609,6 +2772,10 @@ public:
RAngleLoc, LParenLoc,
SubExpr, RParenLoc);
+ case Stmt::CXXAddrspaceCastExprClass:
+ return getDerived().RebuildCXXAddrspaceCastExpr(
+ OpLoc, LAngleLoc, TInfo, RAngleLoc, LParenLoc, SubExpr, RParenLoc);
+
default:
llvm_unreachable("Invalid C++ named cast");
}
@@ -2682,6 +2849,16 @@ public:
SourceRange(LParenLoc, RParenLoc));
}
+ ExprResult
+ RebuildCXXAddrspaceCastExpr(SourceLocation OpLoc, SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo, SourceLocation RAngleLoc,
+ SourceLocation LParenLoc, Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(
+ OpLoc, tok::kw_addrspace_cast, TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc), SourceRange(LParenLoc, RParenLoc));
+ }
+
/// Build a new C++ functional-style cast expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2735,24 +2912,19 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
- SourceLocation TypeidLoc,
- TypeSourceInfo *Operand,
- SourceLocation RParenLoc) {
- return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
- RParenLoc);
+ ExprResult RebuildCXXUuidofExpr(QualType Type, SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(Type, TypeidLoc, Operand, RParenLoc);
}
/// Build a new C++ __uuidof(expr) expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
- SourceLocation TypeidLoc,
- Expr *Operand,
- SourceLocation RParenLoc) {
- return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
- RParenLoc);
+ ExprResult RebuildCXXUuidofExpr(QualType Type, SourceLocation TypeidLoc,
+ Expr *Operand, SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(Type, TypeidLoc, Operand, RParenLoc);
}
/// Build a new C++ "this" expression.
@@ -2931,9 +3103,14 @@ public:
bool RequiresZeroInit,
CXXConstructExpr::ConstructionKind ConstructKind,
SourceRange ParenRange) {
+ // Reconstruct the constructor we originally found, which might be
+ // different if this is a call to an inherited constructor.
+ CXXConstructorDecl *FoundCtor = Constructor;
+ if (Constructor->isInheritingConstructor())
+ FoundCtor = Constructor->getInheritedConstructor().getConstructor();
+
SmallVector<Expr*, 8> ConvertedArgs;
- if (getSema().CompleteConstructorCall(Constructor, Args, Loc,
- ConvertedArgs))
+ if (getSema().CompleteConstructorCall(FoundCtor, Args, Loc, ConvertedArgs))
return ExprError();
return getSema().BuildCXXConstructExpr(Loc, T, Constructor,
@@ -3078,7 +3255,56 @@ public:
return Result;
}
- /// \brief Build a new Objective-C boxed expression.
+ /// \brief Build a new requires expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildRequiresExpr(SourceLocation RequiresKWLoc,
+ RequiresExprBodyDecl *Body,
+ ArrayRef<ParmVarDecl *> LocalParameters,
+ ArrayRef<concepts::Requirement *> Requirements,
+ SourceLocation ClosingBraceLoc) {
+ return RequiresExpr::Create(SemaRef.Context, RequiresKWLoc, Body,
+ LocalParameters, Requirements, ClosingBraceLoc);
+ }
+
+ concepts::TypeRequirement *
+ RebuildTypeRequirement(
+ concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
+ return SemaRef.BuildTypeRequirement(SubstDiag);
+ }
+
+ concepts::TypeRequirement *RebuildTypeRequirement(TypeSourceInfo *T) {
+ return SemaRef.BuildTypeRequirement(T);
+ }
+
+ concepts::ExprRequirement *
+ RebuildExprRequirement(
+ concepts::Requirement::SubstitutionDiagnostic *SubstDiag, bool IsSimple,
+ SourceLocation NoexceptLoc,
+ concepts::ExprRequirement::ReturnTypeRequirement Ret) {
+ return SemaRef.BuildExprRequirement(SubstDiag, IsSimple, NoexceptLoc,
+ std::move(Ret));
+ }
+
+ concepts::ExprRequirement *
+ RebuildExprRequirement(Expr *E, bool IsSimple, SourceLocation NoexceptLoc,
+ concepts::ExprRequirement::ReturnTypeRequirement Ret) {
+ return SemaRef.BuildExprRequirement(E, IsSimple, NoexceptLoc,
+ std::move(Ret));
+ }
+
+ concepts::NestedRequirement *
+ RebuildNestedRequirement(
+ concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
+ return SemaRef.BuildNestedRequirement(SubstDiag);
+ }
+
+ concepts::NestedRequirement *RebuildNestedRequirement(Expr *Constraint) {
+ return SemaRef.BuildNestedRequirement(Constraint);
+ }
+
+ /// \brief Build a new Objective-C boxed expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -3382,6 +3608,11 @@ public:
Sema::AtomicArgumentOrder::AST);
}
+ ExprResult RebuildRecoveryExpr(SourceLocation BeginLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> SubExprs) {
+ return getSema().CreateRecoveryExpr(BeginLoc, EndLoc, SubExprs);
+ }
+
private:
TypeLoc TransformTypeInObjectScope(TypeLoc TL,
QualType ObjectType,
@@ -3446,10 +3677,10 @@ OMPClause *TreeTransform<Derived>::TransformOMPClause(OMPClause *S) {
switch (S->getClauseKind()) {
default: break;
// Transform individual clause nodes
-#define OPENMP_CLAUSE(Name, Class) \
- case OMPC_ ## Name : \
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
+ case Enum: \
return getDerived().Transform ## Class(cast<Class>(S));
-#include "clang/Basic/OpenMPKinds.def"
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
}
return S;
@@ -3966,50 +4197,8 @@ template<typename Derived>
void TreeTransform<Derived>::InventTemplateArgumentLoc(
const TemplateArgument &Arg,
TemplateArgumentLoc &Output) {
- SourceLocation Loc = getDerived().getBaseLocation();
- switch (Arg.getKind()) {
- case TemplateArgument::Null:
- llvm_unreachable("null template argument in TreeTransform");
- break;
-
- case TemplateArgument::Type:
- Output = TemplateArgumentLoc(Arg,
- SemaRef.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
-
- break;
-
- case TemplateArgument::Template:
- case TemplateArgument::TemplateExpansion: {
- NestedNameSpecifierLocBuilder Builder;
- TemplateName Template = Arg.getAsTemplateOrTemplatePattern();
- if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
- Builder.MakeTrivial(SemaRef.Context, DTN->getQualifier(), Loc);
- else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
- Builder.MakeTrivial(SemaRef.Context, QTN->getQualifier(), Loc);
-
- if (Arg.getKind() == TemplateArgument::Template)
- Output = TemplateArgumentLoc(Arg,
- Builder.getWithLocInContext(SemaRef.Context),
- Loc);
- else
- Output = TemplateArgumentLoc(Arg,
- Builder.getWithLocInContext(SemaRef.Context),
- Loc, Loc);
-
- break;
- }
-
- case TemplateArgument::Expression:
- Output = TemplateArgumentLoc(Arg, Arg.getAsExpr());
- break;
-
- case TemplateArgument::Declaration:
- case TemplateArgument::Integral:
- case TemplateArgument::Pack:
- case TemplateArgument::NullPtr:
- Output = TemplateArgumentLoc(Arg, TemplateArgumentLocInfo());
- break;
- }
+ Output = getSema().getTrivialTemplateArgumentLoc(
+ Arg, QualType(), getDerived().getBaseLocation());
}
template<typename Derived>
@@ -4019,12 +4208,45 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
const TemplateArgument &Arg = Input.getArgument();
switch (Arg.getKind()) {
case TemplateArgument::Null:
- case TemplateArgument::Integral:
case TemplateArgument::Pack:
- case TemplateArgument::Declaration:
- case TemplateArgument::NullPtr:
llvm_unreachable("Unexpected TemplateArgument");
+ case TemplateArgument::Integral:
+ case TemplateArgument::NullPtr:
+ case TemplateArgument::Declaration: {
+ // Transform a resolved template argument straight to a resolved template
+ // argument. We get here when substituting into an already-substituted
+ // template type argument during concept satisfaction checking.
+ QualType T = Arg.getNonTypeTemplateArgumentType();
+ QualType NewT = getDerived().TransformType(T);
+ if (NewT.isNull())
+ return true;
+
+ ValueDecl *D = Arg.getKind() == TemplateArgument::Declaration
+ ? Arg.getAsDecl()
+ : nullptr;
+ ValueDecl *NewD = D ? cast_or_null<ValueDecl>(getDerived().TransformDecl(
+ getDerived().getBaseLocation(), D))
+ : nullptr;
+ if (D && !NewD)
+ return true;
+
+ if (NewT == T && D == NewD)
+ Output = Input;
+ else if (Arg.getKind() == TemplateArgument::Integral)
+ Output = TemplateArgumentLoc(
+ TemplateArgument(getSema().Context, Arg.getAsIntegral(), NewT),
+ TemplateArgumentLocInfo());
+ else if (Arg.getKind() == TemplateArgument::NullPtr)
+ Output = TemplateArgumentLoc(TemplateArgument(NewT, /*IsNullPtr=*/true),
+ TemplateArgumentLocInfo());
+ else
+ Output = TemplateArgumentLoc(TemplateArgument(NewD, NewT),
+ TemplateArgumentLocInfo());
+
+ return false;
+ }
+
case TemplateArgument::Type: {
TypeSourceInfo *DI = Input.getTypeSourceInfo();
if (!DI)
@@ -4456,7 +4678,10 @@ QualType TreeTransform<Derived>::RebuildQualifiedType(QualType T,
Deduced =
SemaRef.Context.getQualifiedType(Deduced.getUnqualifiedType(), Qs);
T = SemaRef.Context.getAutoType(Deduced, AutoTy->getKeyword(),
- AutoTy->isDependentType());
+ AutoTy->isDependentType(),
+ /*isPack=*/false,
+ AutoTy->getTypeConstraintConcept(),
+ AutoTy->getTypeConstraintArguments());
} else {
// Otherwise, complain about the addition of a qualifier to an
// already-qualified type.
@@ -5016,6 +5241,86 @@ QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType(
}
template <typename Derived>
+QualType
+TreeTransform<Derived>::TransformConstantMatrixType(TypeLocBuilder &TLB,
+ ConstantMatrixTypeLoc TL) {
+ const ConstantMatrixType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || ElementType != T->getElementType()) {
+ Result = getDerived().RebuildConstantMatrixType(
+ ElementType, T->getNumRows(), T->getNumColumns());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ConstantMatrixTypeLoc NewTL = TLB.push<ConstantMatrixTypeLoc>(Result);
+ NewTL.setAttrNameLoc(TL.getAttrNameLoc());
+ NewTL.setAttrOperandParensRange(TL.getAttrOperandParensRange());
+ NewTL.setAttrRowOperand(TL.getAttrRowOperand());
+ NewTL.setAttrColumnOperand(TL.getAttrColumnOperand());
+
+ return Result;
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformDependentSizedMatrixType(
+ TypeLocBuilder &TLB, DependentSizedMatrixTypeLoc TL) {
+ const DependentSizedMatrixType *T = TL.getTypePtr();
+
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull()) {
+ return QualType();
+ }
+
+ // Matrix dimensions are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(
+ SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ Expr *origRows = TL.getAttrRowOperand();
+ if (!origRows)
+ origRows = T->getRowExpr();
+ Expr *origColumns = TL.getAttrColumnOperand();
+ if (!origColumns)
+ origColumns = T->getColumnExpr();
+
+ ExprResult rowResult = getDerived().TransformExpr(origRows);
+ rowResult = SemaRef.ActOnConstantExpression(rowResult);
+ if (rowResult.isInvalid())
+ return QualType();
+
+ ExprResult columnResult = getDerived().TransformExpr(origColumns);
+ columnResult = SemaRef.ActOnConstantExpression(columnResult);
+ if (columnResult.isInvalid())
+ return QualType();
+
+ Expr *rows = rowResult.get();
+ Expr *columns = columnResult.get();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || ElementType != T->getElementType() ||
+ rows != origRows || columns != origColumns) {
+ Result = getDerived().RebuildDependentSizedMatrixType(
+ ElementType, rows, columns, T->getAttributeLoc());
+
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // We might have any sort of matrix type now, but fortunately they
+ // all have the same location layout.
+ MatrixTypeLoc NewTL = TLB.push<MatrixTypeLoc>(Result);
+ NewTL.setAttrNameLoc(TL.getAttrNameLoc());
+ NewTL.setAttrOperandParensRange(TL.getAttrOperandParensRange());
+ NewTL.setAttrRowOperand(rows);
+ NewTL.setAttrColumnOperand(columns);
+ return Result;
+}
+
+template <typename Derived>
QualType TreeTransform<Derived>::TransformDependentAddressSpaceType(
TypeLocBuilder &TLB, DependentAddressSpaceTypeLoc TL) {
const DependentAddressSpaceType *T = TL.getTypePtr();
@@ -5189,21 +5494,29 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
PackExpansionTypeLoc ExpansionTL = TL.castAs<PackExpansionTypeLoc>();
TypeLoc Pattern = ExpansionTL.getPatternLoc();
SemaRef.collectUnexpandedParameterPacks(Pattern, Unexpanded);
- assert(Unexpanded.size() > 0 && "Could not find parameter packs!");
// Determine whether we should expand the parameter packs.
bool ShouldExpand = false;
bool RetainExpansion = false;
- Optional<unsigned> OrigNumExpansions =
- ExpansionTL.getTypePtr()->getNumExpansions();
- NumExpansions = OrigNumExpansions;
- if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
- Pattern.getSourceRange(),
- Unexpanded,
- ShouldExpand,
- RetainExpansion,
- NumExpansions)) {
- return true;
+ Optional<unsigned> OrigNumExpansions;
+ if (Unexpanded.size() > 0) {
+ OrigNumExpansions = ExpansionTL.getTypePtr()->getNumExpansions();
+ NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
+ Pattern.getSourceRange(),
+ Unexpanded,
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ return true;
+ }
+ } else {
+#ifndef NDEBUG
+ const AutoType *AT =
+ Pattern.getType().getTypePtr()->getContainedAutoType();
+ assert((AT && (!AT->isDeduced() || AT->getDeducedType().isNull())) &&
+ "Could not find parameter packs or undeduced auto type!");
+#endif
}
if (ShouldExpand) {
@@ -5263,6 +5576,9 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
indexAdjustment,
NumExpansions,
/*ExpectParameterPack=*/true);
+ assert(NewParm->isParameterPack() &&
+ "Parameter pack no longer a parameter pack after "
+ "transformation.");
} else {
NewParm = getDerived().TransformFunctionTypeParam(
OldParm, indexAdjustment, None, /*ExpectParameterPack=*/ false);
@@ -5768,32 +6084,6 @@ QualType TreeTransform<Derived>::TransformUnaryTransformType(
}
template<typename Derived>
-QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
- AutoTypeLoc TL) {
- const AutoType *T = TL.getTypePtr();
- QualType OldDeduced = T->getDeducedType();
- QualType NewDeduced;
- if (!OldDeduced.isNull()) {
- NewDeduced = getDerived().TransformType(OldDeduced);
- if (NewDeduced.isNull())
- return QualType();
- }
-
- QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() || NewDeduced != OldDeduced ||
- T->isDependentType()) {
- Result = getDerived().RebuildAutoType(NewDeduced, T->getKeyword());
- if (Result.isNull())
- return QualType();
- }
-
- AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
-
- return Result;
-}
-
-template<typename Derived>
QualType TreeTransform<Derived>::TransformDeducedTemplateSpecializationType(
TypeLocBuilder &TLB, DeducedTemplateSpecializationTypeLoc TL) {
const DeducedTemplateSpecializationType *T = TL.getTypePtr();
@@ -5990,6 +6280,57 @@ QualType TreeTransform<Derived>::TransformPipeType(TypeLocBuilder &TLB,
return Result;
}
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformExtIntType(TypeLocBuilder &TLB,
+ ExtIntTypeLoc TL) {
+ const ExtIntType *EIT = TL.getTypePtr();
+ QualType Result = TL.getType();
+
+ if (getDerived().AlwaysRebuild()) {
+ Result = getDerived().RebuildExtIntType(EIT->isUnsigned(),
+ EIT->getNumBits(), TL.getNameLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ExtIntTypeLoc NewTL = TLB.push<ExtIntTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformDependentExtIntType(
+ TypeLocBuilder &TLB, DependentExtIntTypeLoc TL) {
+ const DependentExtIntType *EIT = TL.getTypePtr();
+
+ EnterExpressionEvaluationContext Unevaluated(
+ SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ ExprResult BitsExpr = getDerived().TransformExpr(EIT->getNumBitsExpr());
+ BitsExpr = SemaRef.ActOnConstantExpression(BitsExpr);
+
+ if (BitsExpr.isInvalid())
+ return QualType();
+
+ QualType Result = TL.getType();
+
+ if (getDerived().AlwaysRebuild() || BitsExpr.get() != EIT->getNumBitsExpr()) {
+ Result = getDerived().RebuildDependentExtIntType(
+ EIT->isUnsigned(), BitsExpr.get(), TL.getNameLoc());
+
+ if (Result.isNull())
+ return QualType();
+ }
+
+ if (isa<DependentExtIntType>(Result)) {
+ DependentExtIntTypeLoc NewTL = TLB.push<DependentExtIntTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ } else {
+ ExtIntTypeLoc NewTL = TLB.push<ExtIntTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ }
+ return Result;
+}
+
/// Simple iterator that traverses the template arguments in a
/// container that provides a \c getArgLoc() member function.
///
@@ -6054,6 +6395,71 @@ QualType TreeTransform<Derived>::TransformPipeType(TypeLocBuilder &TLB,
}
};
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
+ AutoTypeLoc TL) {
+ const AutoType *T = TL.getTypePtr();
+ QualType OldDeduced = T->getDeducedType();
+ QualType NewDeduced;
+ if (!OldDeduced.isNull()) {
+ NewDeduced = getDerived().TransformType(OldDeduced);
+ if (NewDeduced.isNull())
+ return QualType();
+ }
+
+ ConceptDecl *NewCD = nullptr;
+ TemplateArgumentListInfo NewTemplateArgs;
+ NestedNameSpecifierLoc NewNestedNameSpec;
+ if (TL.getTypePtr()->isConstrained()) {
+ NewCD = cast_or_null<ConceptDecl>(
+ getDerived().TransformDecl(
+ TL.getConceptNameLoc(),
+ TL.getTypePtr()->getTypeConstraintConcept()));
+
+ NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
+ NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+ typedef TemplateArgumentLocContainerIterator<AutoTypeLoc> ArgIterator;
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ ArgIterator(TL,
+ TL.getNumArgs()),
+ NewTemplateArgs))
+ return QualType();
+
+ if (TL.getNestedNameSpecifierLoc()) {
+ NewNestedNameSpec
+ = getDerived().TransformNestedNameSpecifierLoc(
+ TL.getNestedNameSpecifierLoc());
+ if (!NewNestedNameSpec)
+ return QualType();
+ }
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || NewDeduced != OldDeduced ||
+ T->isDependentType()) {
+ llvm::SmallVector<TemplateArgument, 4> NewArgList;
+ NewArgList.reserve(NewArgList.size());
+ for (const auto &ArgLoc : NewTemplateArgs.arguments())
+ NewArgList.push_back(ArgLoc.getArgument());
+ Result = getDerived().RebuildAutoType(NewDeduced, T->getKeyword(), NewCD,
+ NewArgList);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ NewTL.setNestedNameSpecifierLoc(NewNestedNameSpec);
+ NewTL.setTemplateKWLoc(TL.getTemplateKWLoc());
+ NewTL.setConceptNameLoc(TL.getConceptNameLoc());
+ NewTL.setFoundDecl(TL.getFoundDecl());
+ NewTL.setLAngleLoc(TL.getLAngleLoc());
+ NewTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned I = 0; I < TL.getNumArgs(); ++I)
+ NewTL.setArgLocInfo(I, NewTemplateArgs.arguments()[I].getLocInfo());
+
+ return Result;
+}
template <typename Derived>
QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
@@ -6930,7 +7336,8 @@ TreeTransform<Derived>::TransformWhileStmt(WhileStmt *S) {
Body.get() == S->getBody())
return Owned(S);
- return getDerived().RebuildWhileStmt(S->getWhileLoc(), Cond, Body.get());
+ return getDerived().RebuildWhileStmt(S->getWhileLoc(), S->getLParenLoc(),
+ Cond, S->getRParenLoc(), Body.get());
}
template<typename Derived>
@@ -7228,7 +7635,8 @@ TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
return StmtError();
StmtResult FinalSuspend =
getDerived().TransformStmt(S->getFinalSuspendStmt());
- if (FinalSuspend.isInvalid())
+ if (FinalSuspend.isInvalid() ||
+ !SemaRef.checkFinalSuspendNoThrow(FinalSuspend.get()))
return StmtError();
ScopeInfo->setCoroutineSuspends(InitSuspend.get(), FinalSuspend.get());
assert(isa<Expr>(InitSuspend.get()) && isa<Expr>(FinalSuspend.get()));
@@ -7684,8 +8092,12 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
Cond.get(),
Inc.get(), LoopVar.get(),
S->getRParenLoc());
- if (NewStmt.isInvalid())
+ if (NewStmt.isInvalid() && LoopVar.get() != S->getLoopVarStmt()) {
+ // Might not have attached any initializer to the loop variable.
+ getSema().ActOnInitializerError(
+ cast<DeclStmt>(LoopVar.get())->getSingleDecl());
return StmtError();
+ }
}
StmtResult Body = getDerived().TransformStmt(S->getBody());
@@ -8152,6 +8564,28 @@ TreeTransform<Derived>::TransformOMPFlushDirective(OMPFlushDirective *D) {
template <typename Derived>
StmtResult
+TreeTransform<Derived>::TransformOMPDepobjDirective(OMPDepobjDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_depobj, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPScanDirective(OMPScanDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_scan, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
TreeTransform<Derived>::TransformOMPOrderedDirective(OMPOrderedDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_ordered, DirName, nullptr,
@@ -8638,6 +9072,19 @@ TreeTransform<Derived>::TransformOMPOrderedClause(OMPOrderedClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPDetachClause(OMPDetachClause *C) {
+ ExprResult E;
+ if (Expr *Evt = C->getEventHandler()) {
+ E = getDerived().TransformExpr(Evt);
+ if (E.isInvalid())
+ return nullptr;
+ }
+ return getDerived().RebuildOMPDetachClause(E.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPNowaitClause(OMPNowaitClause *C) {
// No need to rebuild this clause, no template-dependent parameters.
return C;
@@ -8692,6 +9139,34 @@ TreeTransform<Derived>::TransformOMPSeqCstClause(OMPSeqCstClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPAcqRelClause(OMPAcqRelClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPAcquireClause(OMPAcquireClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPReleaseClause(OMPReleaseClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPRelaxedClause(OMPRelaxedClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPThreadsClause(OMPThreadsClause *C) {
// No need to rebuild this clause, no template-dependent parameters.
return C;
@@ -8711,6 +9186,13 @@ TreeTransform<Derived>::TransformOMPNogroupClause(OMPNogroupClause *C) {
}
template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPDestroyClause(OMPDestroyClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
OMPClause *TreeTransform<Derived>::TransformOMPUnifiedAddressClause(
OMPUnifiedAddressClause *C) {
llvm_unreachable("unified_address clause cannot appear in dependent context");
@@ -8847,8 +9329,9 @@ TreeTransform<Derived>::TransformOMPReductionClause(OMPReductionClause *C) {
UnresolvedReductions.push_back(nullptr);
}
return getDerived().RebuildOMPReductionClause(
- Vars, C->getBeginLoc(), C->getLParenLoc(), C->getColonLoc(),
- C->getEndLoc(), ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
+ Vars, C->getModifier(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getModifierLoc(), C->getColonLoc(), C->getEndLoc(),
+ ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
}
template <typename Derived>
@@ -9025,8 +9508,25 @@ OMPClause *TreeTransform<Derived>::TransformOMPFlushClause(OMPFlushClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPDepobjClause(OMPDepobjClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getDepobj());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPDepobjClause(E.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
+ Expr *DepModifier = C->getModifier();
+ if (DepModifier) {
+ ExprResult DepModRes = getDerived().TransformExpr(DepModifier);
+ if (DepModRes.isInvalid())
+ return nullptr;
+ DepModifier = DepModRes.get();
+ }
Vars.reserve(C->varlist_size());
for (auto *VE : C->varlists()) {
ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
@@ -9035,8 +9535,9 @@ TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) {
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPDependClause(
- C->getDependencyKind(), C->getDependencyLoc(), C->getColonLoc(), Vars,
- C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ DepModifier, C->getDependencyKind(), C->getDependencyLoc(),
+ C->getColonLoc(), Vars, C->getBeginLoc(), C->getLParenLoc(),
+ C->getEndLoc());
}
template <typename Derived>
@@ -9045,8 +9546,9 @@ TreeTransform<Derived>::TransformOMPDeviceClause(OMPDeviceClause *C) {
ExprResult E = getDerived().TransformExpr(C->getDevice());
if (E.isInvalid())
return nullptr;
- return getDerived().RebuildOMPDeviceClause(E.get(), C->getBeginLoc(),
- C->getLParenLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPDeviceClause(
+ C->getModifier(), E.get(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getModifierLoc(), C->getEndLoc());
}
template <typename Derived, class T>
@@ -9269,6 +9771,21 @@ OMPClause *TreeTransform<Derived>::TransformOMPUseDevicePtrClause(
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPUseDeviceAddrClause(
+ OMPUseDeviceAddrClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPUseDeviceAddrClause(Vars, Locs);
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
@@ -9298,6 +9815,91 @@ TreeTransform<Derived>::TransformOMPNontemporalClause(OMPNontemporalClause *C) {
Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPInclusiveClause(OMPInclusiveClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPInclusiveClause(
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPExclusiveClause(OMPExclusiveClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPExclusiveClause(
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPUsesAllocatorsClause(
+ OMPUsesAllocatorsClause *C) {
+ SmallVector<Sema::UsesAllocatorsData, 16> Data;
+ Data.reserve(C->getNumberOfAllocators());
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
+ ExprResult Allocator = getDerived().TransformExpr(D.Allocator);
+ if (Allocator.isInvalid())
+ continue;
+ ExprResult AllocatorTraits;
+ if (Expr *AT = D.AllocatorTraits) {
+ AllocatorTraits = getDerived().TransformExpr(AT);
+ if (AllocatorTraits.isInvalid())
+ continue;
+ }
+ Sema::UsesAllocatorsData &NewD = Data.emplace_back();
+ NewD.Allocator = Allocator.get();
+ NewD.AllocatorTraits = AllocatorTraits.get();
+ NewD.LParenLoc = D.LParenLoc;
+ NewD.RParenLoc = D.RParenLoc;
+ }
+ return getDerived().RebuildOMPUsesAllocatorsClause(
+ Data, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPAffinityClause(OMPAffinityClause *C) {
+ SmallVector<Expr *, 4> Locators;
+ Locators.reserve(C->varlist_size());
+ ExprResult ModifierRes;
+ if (Expr *Modifier = C->getModifier()) {
+ ModifierRes = getDerived().TransformExpr(Modifier);
+ if (ModifierRes.isInvalid())
+ return nullptr;
+ }
+ for (Expr *E : C->varlists()) {
+ ExprResult Locator = getDerived().TransformExpr(E);
+ if (Locator.isInvalid())
+ continue;
+ Locators.push_back(Locator.get());
+ }
+ return getDerived().RebuildOMPAffinityClause(
+ C->getBeginLoc(), C->getLParenLoc(), C->getColonLoc(), C->getEndLoc(),
+ ModifierRes.get(), Locators);
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPOrderClause(OMPOrderClause *C) {
+ return getDerived().RebuildOMPOrderClause(C->getKind(), C->getKindKwLoc(),
+ C->getBeginLoc(), C->getLParenLoc(),
+ C->getEndLoc());
+}
+
//===----------------------------------------------------------------------===//
// Expression transformation
//===----------------------------------------------------------------------===//
@@ -9581,6 +10183,24 @@ TreeTransform<Derived>::TransformTypoExpr(TypoExpr *E) {
return E;
}
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformRecoveryExpr(RecoveryExpr *E) {
+ llvm::SmallVector<Expr *, 8> Children;
+ bool Changed = false;
+ for (Expr *C : E->subExpressions()) {
+ ExprResult NewC = getDerived().TransformExpr(C);
+ if (NewC.isInvalid())
+ return ExprError();
+ Children.push_back(NewC.get());
+
+ Changed |= NewC.get() != C;
+ }
+ if (!getDerived().AlwaysRebuild() && !Changed)
+ return E;
+ return getDerived().RebuildRecoveryExpr(E->getBeginLoc(), E->getEndLoc(),
+ Children);
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
@@ -9680,6 +10300,29 @@ TreeTransform<Derived>::TransformArraySubscriptExpr(ArraySubscriptExpr *E) {
template <typename Derived>
ExprResult
+TreeTransform<Derived>::TransformMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ ExprResult RowIdx = getDerived().TransformExpr(E->getRowIdx());
+ if (RowIdx.isInvalid())
+ return ExprError();
+
+ ExprResult ColumnIdx = getDerived().TransformExpr(E->getColumnIdx());
+ if (ColumnIdx.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase() &&
+ RowIdx.get() == E->getRowIdx() && ColumnIdx.get() == E->getColumnIdx())
+ return E;
+
+ return getDerived().RebuildMatrixSubscriptExpr(
+ Base.get(), RowIdx.get(), ColumnIdx.get(), E->getRBracketLoc());
+}
+
+template <typename Derived>
+ExprResult
TreeTransform<Derived>::TransformOMPArraySectionExpr(OMPArraySectionExpr *E) {
ExprResult Base = getDerived().TransformExpr(E->getBase());
if (Base.isInvalid())
@@ -9699,13 +10342,105 @@ TreeTransform<Derived>::TransformOMPArraySectionExpr(OMPArraySectionExpr *E) {
return ExprError();
}
+ ExprResult Stride;
+ if (Expr *Str = E->getStride()) {
+ Stride = getDerived().TransformExpr(Str);
+ if (Stride.isInvalid())
+ return ExprError();
+ }
+
if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase() &&
LowerBound.get() == E->getLowerBound() && Length.get() == E->getLength())
return E;
return getDerived().RebuildOMPArraySectionExpr(
- Base.get(), E->getBase()->getEndLoc(), LowerBound.get(), E->getColonLoc(),
- Length.get(), E->getRBracketLoc());
+ Base.get(), E->getBase()->getEndLoc(), LowerBound.get(),
+ E->getColonLocFirst(), E->getColonLocSecond(), Length.get(), Stride.get(),
+ E->getRBracketLoc());
+}
+
+template <typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ SmallVector<Expr *, 4> Dims;
+ bool ErrorFound = false;
+ for (Expr *Dim : E->getDimensions()) {
+ ExprResult DimRes = getDerived().TransformExpr(Dim);
+ if (DimRes.isInvalid()) {
+ ErrorFound = true;
+ continue;
+ }
+ Dims.push_back(DimRes.get());
+ }
+
+ if (ErrorFound)
+ return ExprError();
+ return getDerived().RebuildOMPArrayShapingExpr(Base.get(), E->getLParenLoc(),
+ E->getRParenLoc(), Dims,
+ E->getBracketsRanges());
+}
+
+template <typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOMPIteratorExpr(OMPIteratorExpr *E) {
+ unsigned NumIterators = E->numOfIterators();
+ SmallVector<Sema::OMPIteratorData, 4> Data(NumIterators);
+
+ bool ErrorFound = false;
+ bool NeedToRebuild = getDerived().AlwaysRebuild();
+ for (unsigned I = 0; I < NumIterators; ++I) {
+ auto *D = cast<VarDecl>(E->getIteratorDecl(I));
+ Data[I].DeclIdent = D->getIdentifier();
+ Data[I].DeclIdentLoc = D->getLocation();
+ if (D->getLocation() == D->getBeginLoc()) {
+ assert(SemaRef.Context.hasSameType(D->getType(), SemaRef.Context.IntTy) &&
+ "Implicit type must be int.");
+ } else {
+ TypeSourceInfo *TSI = getDerived().TransformType(D->getTypeSourceInfo());
+ QualType DeclTy = getDerived().TransformType(D->getType());
+ Data[I].Type = SemaRef.CreateParsedType(DeclTy, TSI);
+ }
+ OMPIteratorExpr::IteratorRange Range = E->getIteratorRange(I);
+ ExprResult Begin = getDerived().TransformExpr(Range.Begin);
+ ExprResult End = getDerived().TransformExpr(Range.End);
+ ExprResult Step = getDerived().TransformExpr(Range.Step);
+ ErrorFound = ErrorFound ||
+ !(!D->getTypeSourceInfo() || (Data[I].Type.getAsOpaquePtr() &&
+ !Data[I].Type.get().isNull())) ||
+ Begin.isInvalid() || End.isInvalid() || Step.isInvalid();
+ if (ErrorFound)
+ continue;
+ Data[I].Range.Begin = Begin.get();
+ Data[I].Range.End = End.get();
+ Data[I].Range.Step = Step.get();
+ Data[I].AssignLoc = E->getAssignLoc(I);
+ Data[I].ColonLoc = E->getColonLoc(I);
+ Data[I].SecColonLoc = E->getSecondColonLoc(I);
+ NeedToRebuild =
+ NeedToRebuild ||
+ (D->getTypeSourceInfo() && Data[I].Type.get().getTypePtrOrNull() !=
+ D->getType().getTypePtrOrNull()) ||
+ Range.Begin != Data[I].Range.Begin || Range.End != Data[I].Range.End ||
+ Range.Step != Data[I].Range.Step;
+ }
+ if (ErrorFound)
+ return ExprError();
+ if (!NeedToRebuild)
+ return E;
+
+ ExprResult Res = getDerived().RebuildOMPIteratorExpr(
+ E->getIteratorKwLoc(), E->getLParenLoc(), E->getRParenLoc(), Data);
+ if (!Res.isUsable())
+ return Res;
+ auto *IE = cast<OMPIteratorExpr>(Res.get());
+ for (unsigned I = 0; I < NumIterators; ++I)
+ getDerived().transformedLocalDecl(E->getIteratorDecl(I),
+ IE->getIteratorDecl(I));
+ return Res;
}
template<typename Derived>
@@ -9837,9 +10572,15 @@ TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
RHS.get() == E->getRHS())
return E;
- Sema::FPContractStateRAII FPContractState(getSema());
- getSema().FPFeatures = E->getFPFeatures();
-
+ if (E->isCompoundAssignmentOp())
+ // FPFeatures has already been established from trailing storage
+ return getDerived().RebuildBinaryOperator(
+ E->getOperatorLoc(), E->getOpcode(), LHS.get(), RHS.get());
+ Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
+ FPOptionsOverride NewOverrides(E->getFPFeatures(getSema().getLangOpts()));
+ getSema().CurFPFeatures =
+ NewOverrides.applyOverrides(getSema().getLangOpts());
+ getSema().FpPragmaStack.CurrentValue = NewOverrides.getAsOpaqueInt();
return getDerived().RebuildBinaryOperator(E->getOperatorLoc(), E->getOpcode(),
LHS.get(), RHS.get());
}
@@ -9892,6 +10633,11 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCompoundAssignOperator(
CompoundAssignOperator *E) {
+ Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
+ FPOptionsOverride NewOverrides(E->getFPFeatures(getSema().getLangOpts()));
+ getSema().CurFPFeatures =
+ NewOverrides.applyOverrides(getSema().getLangOpts());
+ getSema().FpPragmaStack.CurrentValue = NewOverrides.getAsOpaqueInt();
return getDerived().TransformBinaryOperator(E);
}
@@ -10245,16 +10991,18 @@ TreeTransform<Derived>::TransformStmtExpr(StmtExpr *E) {
return ExprError();
}
- if (!getDerived().AlwaysRebuild() &&
+ unsigned OldDepth = E->getTemplateDepth();
+ unsigned NewDepth = getDerived().TransformTemplateDepth(OldDepth);
+
+ if (!getDerived().AlwaysRebuild() && OldDepth == NewDepth &&
SubStmt.get() == E->getSubStmt()) {
// Calling this an 'error' is unintuitive, but it does the right thing.
SemaRef.ActOnStmtExprError();
return SemaRef.MaybeBindToTemporary(E);
}
- return getDerived().RebuildStmtExpr(E->getLParenLoc(),
- SubStmt.get(),
- E->getRParenLoc());
+ return getDerived().RebuildStmtExpr(E->getLParenLoc(), SubStmt.get(),
+ E->getRParenLoc(), NewDepth);
}
template<typename Derived>
@@ -10363,8 +11111,11 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
(E->getNumArgs() != 2 || Second.get() == E->getArg(1)))
return SemaRef.MaybeBindToTemporary(E);
- Sema::FPContractStateRAII FPContractState(getSema());
- getSema().FPFeatures = E->getFPFeatures();
+ Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
+ FPOptionsOverride NewOverrides(E->getFPFeatures());
+ getSema().CurFPFeatures =
+ NewOverrides.applyOverrides(getSema().getLangOpts());
+ getSema().FpPragmaStack.CurrentValue = NewOverrides.getAsOpaqueInt();
return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
E->getOperatorLoc(),
@@ -10491,6 +11242,12 @@ TreeTransform<Derived>::TransformCXXConstCastExpr(CXXConstCastExpr *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformCXXAddrspaceCastExpr(CXXAddrspaceCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformCXXFunctionalCastExpr(
CXXFunctionalCastExpr *E) {
TypeSourceInfo *Type =
@@ -11179,6 +11936,146 @@ TreeTransform<Derived>::TransformConceptSpecializationExpr(
&TransArgs);
}
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformRequiresExpr(RequiresExpr *E) {
+ SmallVector<ParmVarDecl*, 4> TransParams;
+ SmallVector<QualType, 4> TransParamTypes;
+ Sema::ExtParameterInfoBuilder ExtParamInfos;
+
+ // C++2a [expr.prim.req]p2
+ // Expressions appearing within a requirement-body are unevaluated operands.
+ EnterExpressionEvaluationContext Ctx(
+ SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
+
+ RequiresExprBodyDecl *Body = RequiresExprBodyDecl::Create(
+ getSema().Context, getSema().CurContext,
+ E->getBody()->getBeginLoc());
+
+ Sema::ContextRAII SavedContext(getSema(), Body, /*NewThisContext*/false);
+
+ if (getDerived().TransformFunctionTypeParams(E->getRequiresKWLoc(),
+ E->getLocalParameters(),
+ /*ParamTypes=*/nullptr,
+ /*ParamInfos=*/nullptr,
+ TransParamTypes, &TransParams,
+ ExtParamInfos))
+ return ExprError();
+
+ for (ParmVarDecl *Param : TransParams)
+ Param->setDeclContext(Body);
+
+ SmallVector<concepts::Requirement *, 4> TransReqs;
+ if (getDerived().TransformRequiresExprRequirements(E->getRequirements(),
+ TransReqs))
+ return ExprError();
+
+ for (concepts::Requirement *Req : TransReqs) {
+ if (auto *ER = dyn_cast<concepts::ExprRequirement>(Req)) {
+ if (ER->getReturnTypeRequirement().isTypeConstraint()) {
+ ER->getReturnTypeRequirement()
+ .getTypeConstraintTemplateParameterList()->getParam(0)
+ ->setDeclContext(Body);
+ }
+ }
+ }
+
+ return getDerived().RebuildRequiresExpr(E->getRequiresKWLoc(), Body,
+ TransParams, TransReqs,
+ E->getRBraceLoc());
+}
+
+template<typename Derived>
+bool TreeTransform<Derived>::TransformRequiresExprRequirements(
+ ArrayRef<concepts::Requirement *> Reqs,
+ SmallVectorImpl<concepts::Requirement *> &Transformed) {
+ for (concepts::Requirement *Req : Reqs) {
+ concepts::Requirement *TransReq = nullptr;
+ if (auto *TypeReq = dyn_cast<concepts::TypeRequirement>(Req))
+ TransReq = getDerived().TransformTypeRequirement(TypeReq);
+ else if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(Req))
+ TransReq = getDerived().TransformExprRequirement(ExprReq);
+ else
+ TransReq = getDerived().TransformNestedRequirement(
+ cast<concepts::NestedRequirement>(Req));
+ if (!TransReq)
+ return true;
+ Transformed.push_back(TransReq);
+ }
+ return false;
+}
+
+template<typename Derived>
+concepts::TypeRequirement *
+TreeTransform<Derived>::TransformTypeRequirement(
+ concepts::TypeRequirement *Req) {
+ if (Req->isSubstitutionFailure()) {
+ if (getDerived().AlwaysRebuild())
+ return getDerived().RebuildTypeRequirement(
+ Req->getSubstitutionDiagnostic());
+ return Req;
+ }
+ TypeSourceInfo *TransType = getDerived().TransformType(Req->getType());
+ if (!TransType)
+ return nullptr;
+ return getDerived().RebuildTypeRequirement(TransType);
+}
+
+template<typename Derived>
+concepts::ExprRequirement *
+TreeTransform<Derived>::TransformExprRequirement(concepts::ExprRequirement *Req) {
+ llvm::PointerUnion<Expr *, concepts::Requirement::SubstitutionDiagnostic *> TransExpr;
+ if (Req->isExprSubstitutionFailure())
+ TransExpr = Req->getExprSubstitutionDiagnostic();
+ else {
+ ExprResult TransExprRes = getDerived().TransformExpr(Req->getExpr());
+ if (TransExprRes.isInvalid())
+ return nullptr;
+ TransExpr = TransExprRes.get();
+ }
+
+ llvm::Optional<concepts::ExprRequirement::ReturnTypeRequirement> TransRetReq;
+ const auto &RetReq = Req->getReturnTypeRequirement();
+ if (RetReq.isEmpty())
+ TransRetReq.emplace();
+ else if (RetReq.isSubstitutionFailure())
+ TransRetReq.emplace(RetReq.getSubstitutionDiagnostic());
+ else if (RetReq.isTypeConstraint()) {
+ TemplateParameterList *OrigTPL =
+ RetReq.getTypeConstraintTemplateParameterList();
+ TemplateParameterList *TPL =
+ getDerived().TransformTemplateParameterList(OrigTPL);
+ if (!TPL)
+ return nullptr;
+ TransRetReq.emplace(TPL);
+ }
+ assert(TransRetReq.hasValue() &&
+ "All code paths leading here must set TransRetReq");
+ if (Expr *E = TransExpr.dyn_cast<Expr *>())
+ return getDerived().RebuildExprRequirement(E, Req->isSimple(),
+ Req->getNoexceptLoc(),
+ std::move(*TransRetReq));
+ return getDerived().RebuildExprRequirement(
+ TransExpr.get<concepts::Requirement::SubstitutionDiagnostic *>(),
+ Req->isSimple(), Req->getNoexceptLoc(), std::move(*TransRetReq));
+}
+
+template<typename Derived>
+concepts::NestedRequirement *
+TreeTransform<Derived>::TransformNestedRequirement(
+ concepts::NestedRequirement *Req) {
+ if (Req->isSubstitutionFailure()) {
+ if (getDerived().AlwaysRebuild())
+ return getDerived().RebuildNestedRequirement(
+ Req->getSubstitutionDiagnostic());
+ return Req;
+ }
+ ExprResult TransConstraint =
+ getDerived().TransformExpr(Req->getConstraintExpr());
+ if (TransConstraint.isInvalid())
+ return nullptr;
+ return getDerived().RebuildNestedRequirement(TransConstraint.get());
+}
template<typename Derived>
ExprResult
@@ -11303,10 +12200,11 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
// CXXConstructExprs other than for list-initialization and
// CXXTemporaryObjectExpr are always implicit, so when we have
// a 1-argument construction we just transform that argument.
- if ((E->getNumArgs() == 1 ||
- (E->getNumArgs() > 1 && getDerived().DropCallArgument(E->getArg(1)))) &&
- (!getDerived().DropCallArgument(E->getArg(0))) &&
- !E->isListInitialization())
+ if (getDerived().AllowSkippingCXXConstructExpr() &&
+ ((E->getNumArgs() == 1 ||
+ (E->getNumArgs() > 1 && getDerived().DropCallArgument(E->getArg(1)))) &&
+ (!getDerived().DropCallArgument(E->getArg(0))) &&
+ !E->isListInitialization()))
return getDerived().TransformExpr(E->getArg(0));
TemporaryBase Rebase(*this, /*FIXME*/ E->getBeginLoc(), DeclarationName());
@@ -11560,6 +12458,8 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
NewTrailingRequiresClause = getDerived().TransformExpr(TRC);
// Create the local class that will describe the lambda.
+ // FIXME: KnownDependent below is wrong when substituting inside a templated
+ // context that isn't a DeclContext (such as a variable template).
CXXRecordDecl *OldClass = E->getLambdaClass();
CXXRecordDecl *Class
= getSema().createLambdaClosureType(E->getIntroducerRange(),
@@ -11584,19 +12484,6 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
LSI->CallOperator = NewCallOperator;
- for (unsigned I = 0, NumParams = NewCallOperator->getNumParams();
- I != NumParams; ++I) {
- auto *P = NewCallOperator->getParamDecl(I);
- if (P->hasUninstantiatedDefaultArg()) {
- EnterExpressionEvaluationContext Eval(
- getSema(),
- Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed, P);
- ExprResult R = getDerived().TransformExpr(
- E->getCallOperator()->getParamDecl(I)->getDefaultArg());
- P->setDefaultArg(R.get());
- }
- }
-
getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
getDerived().transformedLocalDecl(E->getCallOperator(), {NewCallOperator});
@@ -13092,6 +13979,21 @@ TreeTransform<Derived>::RebuildDependentSizedExtVectorType(QualType ElementType,
return SemaRef.BuildExtVectorType(ElementType, SizeExpr, AttributeLoc);
}
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildConstantMatrixType(
+ QualType ElementType, unsigned NumRows, unsigned NumColumns) {
+ return SemaRef.Context.getConstantMatrixType(ElementType, NumRows,
+ NumColumns);
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildDependentSizedMatrixType(
+ QualType ElementType, Expr *RowExpr, Expr *ColumnExpr,
+ SourceLocation AttributeLoc) {
+ return SemaRef.BuildMatrixType(ElementType, RowExpr, ColumnExpr,
+ AttributeLoc);
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::RebuildFunctionProtoType(
QualType T,
@@ -13206,6 +14108,23 @@ QualType TreeTransform<Derived>::RebuildPipeType(QualType ValueType,
: SemaRef.BuildWritePipeType(ValueType, KWLoc);
}
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildExtIntType(bool IsUnsigned,
+ unsigned NumBits,
+ SourceLocation Loc) {
+ llvm::APInt NumBitsAP(SemaRef.Context.getIntWidth(SemaRef.Context.IntTy),
+ NumBits, true);
+ IntegerLiteral *Bits = IntegerLiteral::Create(SemaRef.Context, NumBitsAP,
+ SemaRef.Context.IntTy, Loc);
+ return SemaRef.BuildExtIntType(IsUnsigned, Bits, Loc);
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildDependentExtIntType(
+ bool IsUnsigned, Expr *NumBitsExpr, SourceLocation Loc) {
+ return SemaRef.BuildExtIntType(IsUnsigned, NumBitsExpr, Loc);
+}
+
template<typename Derived>
TemplateName
TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
@@ -13227,11 +14146,10 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
UnqualifiedId TemplateName;
TemplateName.setIdentifier(&Name, NameLoc);
Sema::TemplateTy Template;
- getSema().ActOnDependentTemplateName(/*Scope=*/nullptr,
- SS, TemplateKWLoc, TemplateName,
- ParsedType::make(ObjectType),
- /*EnteringContext=*/false,
- Template, AllowInjectedClassName);
+ getSema().ActOnTemplateName(/*Scope=*/nullptr, SS, TemplateKWLoc,
+ TemplateName, ParsedType::make(ObjectType),
+ /*EnteringContext=*/false, Template,
+ AllowInjectedClassName);
return Template.get();
}
@@ -13248,11 +14166,9 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
SourceLocation SymbolLocations[3] = { NameLoc, NameLoc, NameLoc };
Name.setOperatorFunctionId(NameLoc, Operator, SymbolLocations);
Sema::TemplateTy Template;
- getSema().ActOnDependentTemplateName(/*Scope=*/nullptr,
- SS, TemplateKWLoc, Name,
- ParsedType::make(ObjectType),
- /*EnteringContext=*/false,
- Template, AllowInjectedClassName);
+ getSema().ActOnTemplateName(
+ /*Scope=*/nullptr, SS, TemplateKWLoc, Name, ParsedType::make(ObjectType),
+ /*EnteringContext=*/false, Template, AllowInjectedClassName);
return Template.get();
}
diff --git a/clang/lib/Sema/UsedDeclVisitor.h b/clang/lib/Sema/UsedDeclVisitor.h
new file mode 100644
index 000000000000..d207e07f451a
--- /dev/null
+++ b/clang/lib/Sema/UsedDeclVisitor.h
@@ -0,0 +1,102 @@
+//===- UsedDeclVisitor.h - ODR-used declarations visitor --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//===----------------------------------------------------------------------===//
+//
+// This file defines UsedDeclVisitor, a CRTP class which visits all the
+// declarations that are ODR-used by an expression or statement.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_SEMA_USEDDECLVISITOR_H
+#define LLVM_CLANG_LIB_SEMA_USEDDECLVISITOR_H
+
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/Sema/SemaInternal.h"
+
+namespace clang {
+template <class Derived>
+class UsedDeclVisitor : public EvaluatedExprVisitor<Derived> {
+protected:
+ Sema &S;
+
+public:
+ typedef EvaluatedExprVisitor<Derived> Inherited;
+
+ UsedDeclVisitor(Sema &S) : Inherited(S.Context), S(S) {}
+
+ Derived &asImpl() { return *static_cast<Derived *>(this); }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ auto *D = E->getDecl();
+ if (isa<FunctionDecl>(D) || isa<VarDecl>(D)) {
+ asImpl().visitUsedDecl(E->getLocation(), D);
+ }
+ }
+
+ void VisitMemberExpr(MemberExpr *E) {
+ auto *D = E->getMemberDecl();
+ if (isa<FunctionDecl>(D) || isa<VarDecl>(D)) {
+ asImpl().visitUsedDecl(E->getMemberLoc(), D);
+ }
+ asImpl().Visit(E->getBase());
+ }
+
+ void VisitCapturedStmt(CapturedStmt *Node) {
+ asImpl().visitUsedDecl(Node->getBeginLoc(), Node->getCapturedDecl());
+ Inherited::VisitCapturedStmt(Node);
+ }
+
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ asImpl().visitUsedDecl(
+ E->getBeginLoc(),
+ const_cast<CXXDestructorDecl *>(E->getTemporary()->getDestructor()));
+ asImpl().Visit(E->getSubExpr());
+ }
+
+ void VisitCXXNewExpr(CXXNewExpr *E) {
+ if (E->getOperatorNew())
+ asImpl().visitUsedDecl(E->getBeginLoc(), E->getOperatorNew());
+ if (E->getOperatorDelete())
+ asImpl().visitUsedDecl(E->getBeginLoc(), E->getOperatorDelete());
+ Inherited::VisitCXXNewExpr(E);
+ }
+
+ void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ if (E->getOperatorDelete())
+ asImpl().visitUsedDecl(E->getBeginLoc(), E->getOperatorDelete());
+ QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
+ }
+
+ Inherited::VisitCXXDeleteExpr(E);
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ asImpl().visitUsedDecl(E->getBeginLoc(), E->getConstructor());
+ Inherited::VisitCXXConstructExpr(E);
+ }
+
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ asImpl().Visit(E->getExpr());
+ }
+
+ void visitUsedDecl(SourceLocation Loc, Decl *D) {
+ if (auto *CD = dyn_cast<CapturedDecl>(D)) {
+ if (auto *S = CD->getBody()) {
+ asImpl().Visit(S);
+ }
+ } else if (auto *CD = dyn_cast<BlockDecl>(D)) {
+ if (auto *S = CD->getBody()) {
+ asImpl().Visit(S);
+ }
+ }
+ }
+};
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_SEMA_USEDDECLVISITOR_H
diff --git a/clang/lib/Serialization/ASTCommon.cpp b/clang/lib/Serialization/ASTCommon.cpp
index cdb5b17022c2..bf583b02f96b 100644
--- a/clang/lib/Serialization/ASTCommon.cpp
+++ b/clang/lib/Serialization/ASTCommon.cpp
@@ -240,9 +240,21 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::BuiltinFn:
ID = PREDEF_TYPE_BUILTIN_FN;
break;
+ case BuiltinType::IncompleteMatrixIdx:
+ ID = PREDEF_TYPE_INCOMPLETE_MATRIX_IDX;
+ break;
case BuiltinType::OMPArraySection:
ID = PREDEF_TYPE_OMP_ARRAY_SECTION;
break;
+ case BuiltinType::OMPArrayShaping:
+ ID = PREDEF_TYPE_OMP_ARRAY_SHAPING;
+ break;
+ case BuiltinType::OMPIterator:
+ ID = PREDEF_TYPE_OMP_ITERATOR;
+ break;
+ case BuiltinType::BFloat16:
+ ID = PREDEF_TYPE_BFLOAT16_ID;
+ break;
}
return TypeIdx(ID);
@@ -365,6 +377,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::IndirectField:
case Decl::Field:
case Decl::MSProperty:
+ case Decl::MSGuid:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
case Decl::NonTypeTemplateParm:
@@ -402,6 +415,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::Binding:
case Decl::Concept:
case Decl::LifetimeExtendedTemporary:
+ case Decl::RequiresExprBody:
return false;
// These indirectly derive from Redeclarable<T> but are not actually
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index 19e7ebe03a1f..4a1a995204e5 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -89,6 +89,7 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
@@ -139,6 +140,7 @@ using namespace clang;
using namespace clang::serialization;
using namespace clang::serialization::reader;
using llvm::BitstreamCursor;
+using llvm::RoundingMode;
//===----------------------------------------------------------------------===//
// ChainedASTReaderListener implementation
@@ -1334,6 +1336,7 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
Error(std::move(Err));
return true;
}
+ F.SourceManagerBlockStartOffset = SLocEntryCursor.GetCurrentBitNo();
RecordData Record;
while (true) {
@@ -1411,7 +1414,7 @@ resolveFileRelativeToOriginalDir(const std::string &Filename,
path::append(currPCHPath, "..");
path::append(currPCHPath, fileDirI, fileDirE);
path::append(currPCHPath, path::filename(Filename));
- return currPCHPath.str();
+ return std::string(currPCHPath.str());
}
bool ASTReader::ReadSLocEntry(int ID) {
@@ -1468,6 +1471,7 @@ bool ASTReader::ReadSLocEntry(int ID) {
ModuleFile *F = GlobalSLocEntryMap.find(-ID)->second;
if (llvm::Error Err = F->SLocEntryCursor.JumpToBit(
+ F->SLocEntryOffsetsBase +
F->SLocEntryOffsets[ID - F->SLocEntryBaseID])) {
Error(std::move(Err));
return true;
@@ -1625,13 +1629,17 @@ SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
/// Enter a subblock of the specified BlockID with the specified cursor. Read
/// the abbreviations that are at the top of the block and then leave the cursor
/// pointing into the block.
-bool ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor, unsigned BlockID) {
+bool ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor, unsigned BlockID,
+ uint64_t *StartOfBlockOffset) {
if (llvm::Error Err = Cursor.EnterSubBlock(BlockID)) {
// FIXME this drops errors on the floor.
consumeError(std::move(Err));
return true;
}
+ if (StartOfBlockOffset)
+ *StartOfBlockOffset = Cursor.GetCurrentBitNo();
+
while (true) {
uint64_t Offset = Cursor.GetCurrentBitNo();
Expected<unsigned> MaybeCode = Cursor.ReadCode();
@@ -1838,7 +1846,7 @@ bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
return nullptr;
}
- std::string Resolved = Key.Filename;
+ std::string Resolved = std::string(Key.Filename);
Reader.ResolveImportedPath(M, Resolved);
if (auto File = FileMgr.getFile(Resolved))
return *File;
@@ -1913,13 +1921,13 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
ModuleMap &ModMap =
Reader.getPreprocessor().getHeaderSearchInfo().getModuleMap();
- std::string Filename = key.Filename;
+ std::string Filename = std::string(key.Filename);
if (key.Imported)
Reader.ResolveImportedPath(M, Filename);
// FIXME: This is not always the right filename-as-written, but we're not
// going to use this information to rebuild the module, so it doesn't make
// a lot of difference.
- Module::Header H = { key.Filename, *FileMgr.getFile(Filename) };
+ Module::Header H = {std::string(key.Filename), *FileMgr.getFile(Filename)};
ModMap.addHeader(Mod, H, HeaderRole, /*Imported*/true);
HFI.isModuleHeader |= !(HeaderRole & ModuleMap::TextualHeader);
}
@@ -1930,9 +1938,8 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
return HFI;
}
-void ASTReader::addPendingMacro(IdentifierInfo *II,
- ModuleFile *M,
- uint64_t MacroDirectivesOffset) {
+void ASTReader::addPendingMacro(IdentifierInfo *II, ModuleFile *M,
+ uint32_t MacroDirectivesOffset) {
assert(NumCurrentElementsDeserializing > 0 &&"Missing deserialization guard");
PendingMacroIDs[II].push_back(PendingMacroInfo(M, MacroDirectivesOffset));
}
@@ -2097,7 +2104,8 @@ void ASTReader::resolvePendingMacro(IdentifierInfo *II,
BitstreamCursor &Cursor = M.MacroCursor;
SavedStreamPosition SavedPosition(Cursor);
- if (llvm::Error Err = Cursor.JumpToBit(PMInfo.MacroDirectivesOffset)) {
+ if (llvm::Error Err =
+ Cursor.JumpToBit(M.MacroOffsetsBase + PMInfo.MacroDirectivesOffset)) {
Error(std::move(Err));
return;
}
@@ -2248,7 +2256,7 @@ ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
R.Overridden = static_cast<bool>(Record[3]);
R.Transient = static_cast<bool>(Record[4]);
R.TopLevelModuleMap = static_cast<bool>(Record[5]);
- R.Filename = Blob;
+ R.Filename = std::string(Blob);
ResolveImportedPath(F, R.Filename);
Expected<llvm::BitstreamEntry> MaybeEntry = Cursor.advance();
@@ -2309,7 +2317,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
if (File == nullptr && !F.OriginalDir.empty() && !F.BaseDirectory.empty() &&
F.OriginalDir != F.BaseDirectory) {
std::string Resolved = resolveFileRelativeToOriginalDir(
- Filename, F.OriginalDir, F.BaseDirectory);
+ std::string(Filename), F.OriginalDir, F.BaseDirectory);
if (!Resolved.empty())
if (auto FE = FileMgr.getFile(Resolved))
File = *FE;
@@ -2788,10 +2796,10 @@ ASTReader::ReadControlBlock(ModuleFile &F,
ReadUntranslatedSourceLocation(Record[Idx++]);
off_t StoredSize = (off_t)Record[Idx++];
time_t StoredModTime = (time_t)Record[Idx++];
- ASTFileSignature StoredSignature = {
- {{(uint32_t)Record[Idx++], (uint32_t)Record[Idx++],
- (uint32_t)Record[Idx++], (uint32_t)Record[Idx++],
- (uint32_t)Record[Idx++]}}};
+ auto FirstSignatureByte = Record.begin() + Idx;
+ ASTFileSignature StoredSignature = ASTFileSignature::create(
+ FirstSignatureByte, FirstSignatureByte + ASTFileSignature::size);
+ Idx += ASTFileSignature::size;
std::string ImportedName = ReadString(Record, Idx);
std::string ImportedFile;
@@ -2844,7 +2852,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
case ORIGINAL_FILE:
F.OriginalSourceFileID = FileID::get(Record[0]);
- F.ActualOriginalSourceFileName = Blob;
+ F.ActualOriginalSourceFileName = std::string(Blob);
F.OriginalSourceFileName = F.ActualOriginalSourceFileName;
ResolveImportedPath(F, F.OriginalSourceFileName);
break;
@@ -2854,11 +2862,11 @@ ASTReader::ReadControlBlock(ModuleFile &F,
break;
case ORIGINAL_PCH_DIR:
- F.OriginalDir = Blob;
+ F.OriginalDir = std::string(Blob);
break;
case MODULE_NAME:
- F.ModuleName = Blob;
+ F.ModuleName = std::string(Blob);
Diag(diag::remark_module_import)
<< F.ModuleName << F.FileName << (ImportedBy ? true : false)
<< (ImportedBy ? StringRef(ImportedBy->ModuleName) : StringRef());
@@ -2897,9 +2905,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return OutOfDate;
}
}
- F.BaseDirectory = M->Directory->getName();
+ F.BaseDirectory = std::string(M->Directory->getName());
} else {
- F.BaseDirectory = Blob;
+ F.BaseDirectory = std::string(Blob);
}
break;
}
@@ -2930,6 +2938,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error(std::move(Err));
return Failure;
}
+ F.ASTBlockStartOffset = Stream.GetCurrentBitNo();
// Read all of the records and blocks for the AST file.
RecordData Record;
@@ -2970,7 +2979,8 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error(std::move(Err));
return Failure;
}
- if (ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID)) {
+ if (ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID,
+ &F.DeclsBlockStartOffset)) {
Error("malformed block record in AST file");
return Failure;
}
@@ -3096,7 +3106,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error("duplicate TYPE_OFFSET record in AST file");
return Failure;
}
- F.TypeOffsets = (const uint32_t *)Blob.data();
+ F.TypeOffsets = reinterpret_cast<const UnderalignedInt64 *>(Blob.data());
F.LocalNumTypes = Record[0];
unsigned LocalBaseTypeIndex = Record[1];
F.BaseTypeIndex = getTotalNumTypes();
@@ -3375,6 +3385,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.SLocEntryOffsets = (const uint32_t *)Blob.data();
F.LocalNumSLocEntries = Record[0];
unsigned SLocSpaceSize = Record[1];
+ F.SLocEntryOffsetsBase = Record[2] + F.SourceManagerBlockStartOffset;
std::tie(F.SLocEntryBaseID, F.SLocEntryBaseOffset) =
SourceMgr.AllocateLoadedSLocEntries(F.LocalNumSLocEntries,
SLocSpaceSize);
@@ -3693,6 +3704,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.MacroOffsets = (const uint32_t *)Blob.data();
F.LocalNumMacros = Record[0];
unsigned LocalBaseMacroID = Record[1];
+ F.MacroOffsetsBase = Record[2] + F.ASTBlockStartOffset;
F.BaseMacroID = getTotalNumMacros();
if (F.LocalNumMacros > 0) {
@@ -3774,6 +3786,34 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
break;
}
+
+ case FLOAT_CONTROL_PRAGMA_OPTIONS: {
+ if (Record.size() < 3) {
+ Error("invalid pragma pack record");
+ return Failure;
+ }
+ FpPragmaCurrentValue = Record[0];
+ FpPragmaCurrentLocation = ReadSourceLocation(F, Record[1]);
+ unsigned NumStackEntries = Record[2];
+ unsigned Idx = 3;
+ // Reset the stack when importing a new module.
+ FpPragmaStack.clear();
+ for (unsigned I = 0; I < NumStackEntries; ++I) {
+ FpPragmaStackEntry Entry;
+ Entry.Value = Record[Idx++];
+ Entry.Location = ReadSourceLocation(F, Record[Idx++]);
+ Entry.PushLocation = ReadSourceLocation(F, Record[Idx++]);
+ FpPragmaStrings.push_back(ReadString(Record, Idx));
+ Entry.SlotLabel = FpPragmaStrings.back();
+ FpPragmaStack.push_back(Entry);
+ }
+ break;
+ }
+
+ case DECLS_TO_CHECK_FOR_DEFERRED_DIAGS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ DeclsToCheckForDeferredDiags.push_back(getGlobalDeclID(F, Record[I]));
+ break;
}
}
}
@@ -3805,21 +3845,22 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
while (Data < DataEnd) {
// FIXME: Looking up dependency modules by filename is horrible. Let's
- // start fixing this with prebuilt and explicit modules and see how it
- // goes...
+ // start fixing this with prebuilt, explicit and implicit modules and see
+ // how it goes...
using namespace llvm::support;
ModuleKind Kind = static_cast<ModuleKind>(
endian::readNext<uint8_t, little, unaligned>(Data));
uint16_t Len = endian::readNext<uint16_t, little, unaligned>(Data);
StringRef Name = StringRef((const char*)Data, Len);
Data += Len;
- ModuleFile *OM = (Kind == MK_PrebuiltModule || Kind == MK_ExplicitModule
- ? ModuleMgr.lookupByModuleName(Name)
- : ModuleMgr.lookupByFileName(Name));
+ ModuleFile *OM = (Kind == MK_PrebuiltModule || Kind == MK_ExplicitModule ||
+ Kind == MK_ImplicitModule
+ ? ModuleMgr.lookupByModuleName(Name)
+ : ModuleMgr.lookupByFileName(Name));
if (!OM) {
std::string Msg =
"SourceLocation remap refers to unknown module, cannot find ";
- Msg.append(Name);
+ Msg.append(std::string(Name));
Error(Msg);
return;
}
@@ -3998,7 +4039,7 @@ static void moveMethodToBackOfGlobalList(Sema &S, ObjCMethodDecl *Method) {
void ASTReader::makeNamesVisible(const HiddenNames &Names, Module *Owner) {
assert(Owner->NameVisibility != Module::Hidden && "nothing to make visible?");
for (Decl *D : Names) {
- bool wasHidden = D->isHidden();
+ bool wasHidden = !D->isUnconditionallyVisible();
D->setVisibleDespiteOwningModule();
if (wasHidden && SemaObj) {
@@ -4024,8 +4065,8 @@ void ASTReader::makeModuleVisible(Module *Mod,
continue;
}
- if (!Mod->isAvailable()) {
- // Modules that aren't available cannot be made visible.
+ if (Mod->isUnimportable()) {
+ // Modules that aren't importable cannot be made visible.
continue;
}
@@ -4060,9 +4101,9 @@ void ASTReader::makeModuleVisible(Module *Mod,
/// visible.
void ASTReader::mergeDefinitionVisibility(NamedDecl *Def,
NamedDecl *MergedDef) {
- if (Def->isHidden()) {
+ if (!Def->isUnconditionallyVisible()) {
// If MergedDef is visible or becomes visible, make the definition visible.
- if (!MergedDef->isHidden())
+ if (MergedDef->isUnconditionallyVisible())
Def->setVisibleDespiteOwningModule();
else {
getContext().mergeDefinitionIntoModule(
@@ -4702,7 +4743,12 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
switch ((UnhashedControlBlockRecordTypes)MaybeRecordType.get()) {
case SIGNATURE:
if (F)
- std::copy(Record.begin(), Record.end(), F->Signature.data());
+ F->Signature = ASTFileSignature::create(Record.begin(), Record.end());
+ break;
+ case AST_BLOCK_HASH:
+ if (F)
+ F->ASTBlockHash =
+ ASTFileSignature::create(Record.begin(), Record.end());
break;
case DIAGNOSTIC_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
@@ -4991,8 +5037,8 @@ static ASTFileSignature readASTFileSignature(StringRef PCH) {
return ASTFileSignature();
}
if (SIGNATURE == MaybeRecord.get())
- return {{{(uint32_t)Record[0], (uint32_t)Record[1], (uint32_t)Record[2],
- (uint32_t)Record[3], (uint32_t)Record[4]}}};
+ return ASTFileSignature::create(Record.begin(),
+ Record.begin() + ASTFileSignature::size);
}
}
@@ -5071,13 +5117,11 @@ namespace {
SimplePCHValidator(const LangOptions &ExistingLangOpts,
const TargetOptions &ExistingTargetOpts,
const PreprocessorOptions &ExistingPPOpts,
- StringRef ExistingModuleCachePath,
- FileManager &FileMgr)
- : ExistingLangOpts(ExistingLangOpts),
- ExistingTargetOpts(ExistingTargetOpts),
- ExistingPPOpts(ExistingPPOpts),
- ExistingModuleCachePath(ExistingModuleCachePath),
- FileMgr(FileMgr) {}
+ StringRef ExistingModuleCachePath, FileManager &FileMgr)
+ : ExistingLangOpts(ExistingLangOpts),
+ ExistingTargetOpts(ExistingTargetOpts),
+ ExistingPPOpts(ExistingPPOpts),
+ ExistingModuleCachePath(ExistingModuleCachePath), FileMgr(FileMgr) {}
bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
bool AllowCompatibleDifferences) override {
@@ -5221,7 +5265,7 @@ bool ASTReader::readASTFileControlBlock(
Listener.ReadModuleName(Blob);
break;
case MODULE_DIRECTORY:
- ModuleDir = Blob;
+ ModuleDir = std::string(Blob);
break;
case MODULE_MAP_FILE: {
unsigned Idx = 0;
@@ -5273,7 +5317,7 @@ bool ASTReader::readASTFileControlBlock(
break;
case INPUT_FILE:
bool Overridden = static_cast<bool>(Record[3]);
- std::string Filename = Blob;
+ std::string Filename = std::string(Blob);
ResolveImportedPath(Filename, ModuleDir);
shouldContinue = Listener.visitInputFile(
Filename, isSystemFile, Overridden, /*IsExplicitModule*/false);
@@ -5292,7 +5336,9 @@ bool ASTReader::readASTFileControlBlock(
unsigned Idx = 0, N = Record.size();
while (Idx < N) {
// Read information about the AST file.
- Idx += 1+1+1+1+5; // Kind, ImportLoc, Size, ModTime, Signature
+ Idx +=
+ 1 + 1 + 1 + 1 +
+ ASTFileSignature::size; // Kind, ImportLoc, Size, ModTime, Signature
std::string ModuleName = ReadString(Record, Idx);
std::string Filename = ReadString(Record, Idx);
ResolveImportedPath(Filename, ModuleDir);
@@ -5532,14 +5578,14 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// imported module file.
CurrentModule->Requirements.clear();
CurrentModule->MissingHeaders.clear();
- CurrentModule->IsMissingRequirement =
- ParentModule && ParentModule->IsMissingRequirement;
- CurrentModule->IsAvailable = !CurrentModule->IsMissingRequirement;
+ CurrentModule->IsUnimportable =
+ ParentModule && ParentModule->IsUnimportable;
+ CurrentModule->IsAvailable = !CurrentModule->IsUnimportable;
break;
}
case SUBMODULE_UMBRELLA_HEADER: {
- std::string Filename = Blob;
+ std::string Filename = std::string(Blob);
ResolveImportedPath(F, Filename);
if (auto Umbrella = PP.getFileManager().getFile(Filename)) {
if (!CurrentModule->getUmbrellaHeader())
@@ -5572,7 +5618,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case SUBMODULE_UMBRELLA_DIR: {
- std::string Dirname = Blob;
+ std::string Dirname = std::string(Blob);
ResolveImportedPath(F, Dirname);
if (auto Umbrella = PP.getFileManager().getDirectory(Dirname)) {
if (!CurrentModule->getUmbrellaDir())
@@ -5642,7 +5688,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case SUBMODULE_LINK_LIBRARY:
ModMap.resolveLinkAsDependencies(CurrentModule);
CurrentModule->LinkLibraries.push_back(
- Module::LinkLibrary(Blob, Record[0]));
+ Module::LinkLibrary(std::string(Blob), Record[0]));
break;
case SUBMODULE_CONFIG_MACRO:
@@ -5903,8 +5949,8 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
}
SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
- if (llvm::Error Err =
- M.PreprocessorDetailCursor.JumpToBit(PPOffs.BitOffset)) {
+ if (llvm::Error Err = M.PreprocessorDetailCursor.JumpToBit(
+ M.MacroOffsetsBase + PPOffs.BitOffset)) {
Error(std::move(Err));
return nullptr;
}
@@ -6317,7 +6363,9 @@ ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
GlobalTypeMapType::iterator I = GlobalTypeMap.find(Index);
assert(I != GlobalTypeMap.end() && "Corrupted global type map");
ModuleFile *M = I->second;
- return RecordLocation(M, M->TypeOffsets[Index - M->BaseTypeIndex]);
+ return RecordLocation(
+ M, M->TypeOffsets[Index - M->BaseTypeIndex].getBitOffset() +
+ M->DeclsBlockStartOffset);
}
static llvm::Optional<Type::TypeClass> getTypeClassForCode(TypeCode code) {
@@ -6523,6 +6571,21 @@ void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
+void TypeLocReader::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
+ TL.setAttrNameLoc(readSourceLocation());
+ TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrRowOperand(Reader.readExpr());
+ TL.setAttrColumnOperand(Reader.readExpr());
+}
+
+void TypeLocReader::VisitDependentSizedMatrixTypeLoc(
+ DependentSizedMatrixTypeLoc TL) {
+ TL.setAttrNameLoc(readSourceLocation());
+ TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrRowOperand(Reader.readExpr());
+ TL.setAttrColumnOperand(Reader.readExpr());
+}
+
void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
TL.setLocalRangeBegin(readSourceLocation());
TL.setLParenLoc(readSourceLocation());
@@ -6576,6 +6639,17 @@ void TypeLocReader::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
+ if (Reader.readBool()) {
+ TL.setNestedNameSpecifierLoc(ReadNestedNameSpecifierLoc());
+ TL.setTemplateKWLoc(readSourceLocation());
+ TL.setConceptNameLoc(readSourceLocation());
+ TL.setFoundDecl(Reader.readDeclAs<NamedDecl>());
+ TL.setLAngleLoc(readSourceLocation());
+ TL.setRAngleLoc(readSourceLocation());
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
+ TL.setArgLocInfo(i, Reader.readTemplateArgumentLocInfo(
+ TL.getTypePtr()->getArg(i).getKind()));
+ }
}
void TypeLocReader::VisitDeducedTemplateSpecializationTypeLoc(
@@ -6700,6 +6774,15 @@ void TypeLocReader::VisitPipeTypeLoc(PipeTypeLoc TL) {
TL.setKWLoc(readSourceLocation());
}
+void TypeLocReader::VisitExtIntTypeLoc(clang::ExtIntTypeLoc TL) {
+ TL.setNameLoc(readSourceLocation());
+}
+void TypeLocReader::VisitDependentExtIntTypeLoc(
+ clang::DependentExtIntTypeLoc TL) {
+ TL.setNameLoc(readSourceLocation());
+}
+
+
void ASTRecordReader::readTypeLoc(TypeLoc TL) {
TypeLocReader TLR(*this);
for (; !TL.isNull(); TL = TL.getNextTypeLoc())
@@ -6778,6 +6861,9 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_INT128_ID:
T = Context.Int128Ty;
break;
+ case PREDEF_TYPE_BFLOAT16_ID:
+ T = Context.BFloat16Ty;
+ break;
case PREDEF_TYPE_HALF_ID:
T = Context.HalfTy;
break;
@@ -6941,9 +7027,18 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_BUILTIN_FN:
T = Context.BuiltinFnTy;
break;
+ case PREDEF_TYPE_INCOMPLETE_MATRIX_IDX:
+ T = Context.IncompleteMatrixIdxTy;
+ break;
case PREDEF_TYPE_OMP_ARRAY_SECTION:
T = Context.OMPArraySectionTy;
break;
+ case PREDEF_TYPE_OMP_ARRAY_SHAPING:
+ T = Context.OMPArraySectionTy;
+ break;
+ case PREDEF_TYPE_OMP_ITERATOR:
+ T = Context.OMPIteratorTy;
+ break;
#define SVE_TYPE(Name, Id, SingletonId) \
case PREDEF_TYPE_##Id##_ID: \
T = Context.SingletonId; \
@@ -7266,6 +7361,9 @@ static Decl *getPredefinedDecl(ASTContext &Context, PredefinedDeclIDs ID) {
case PREDEF_DECL_BUILTIN_MS_VA_LIST_ID:
return Context.getBuiltinMSVaListDecl();
+ case PREDEF_DECL_BUILTIN_MS_GUID_ID:
+ return Context.getMSGuidTagDecl();
+
case PREDEF_DECL_EXTERN_C_CONTEXT_ID:
return Context.getExternCContextDecl();
@@ -7746,7 +7844,9 @@ void ASTReader::InitializeSema(Sema &S) {
// FIXME: What happens if these are changed by a module import?
if (!FPPragmaOptions.empty()) {
assert(FPPragmaOptions.size() == 1 && "Wrong number of FP_PRAGMA_OPTIONS");
- SemaObj->FPFeatures = FPOptions(FPPragmaOptions[0]);
+ FPOptionsOverride NewOverrides(FPPragmaOptions[0]);
+ SemaObj->CurFPFeatures =
+ NewOverrides.applyOverrides(SemaObj->getLangOpts());
}
SemaObj->OpenCLFeatures.copy(OpenCLExtensions);
@@ -7816,6 +7916,34 @@ void ASTReader::UpdateSema() {
SemaObj->PackStack.CurrentPragmaLocation = PragmaPackCurrentLocation;
}
}
+ if (FpPragmaCurrentValue) {
+ // The bottom of the stack might have a default value. It must be adjusted
+ // to the current value to ensure that fp-pragma state is preserved after
+ // popping entries that were included/imported from a PCH/module.
+ bool DropFirst = false;
+ if (!FpPragmaStack.empty() && FpPragmaStack.front().Location.isInvalid()) {
+ assert(FpPragmaStack.front().Value ==
+ SemaObj->FpPragmaStack.DefaultValue &&
+ "Expected a default pragma float_control value");
+ SemaObj->FpPragmaStack.Stack.emplace_back(
+ FpPragmaStack.front().SlotLabel, SemaObj->FpPragmaStack.CurrentValue,
+ SemaObj->FpPragmaStack.CurrentPragmaLocation,
+ FpPragmaStack.front().PushLocation);
+ DropFirst = true;
+ }
+ for (const auto &Entry :
+ llvm::makeArrayRef(FpPragmaStack).drop_front(DropFirst ? 1 : 0))
+ SemaObj->FpPragmaStack.Stack.emplace_back(
+ Entry.SlotLabel, Entry.Value, Entry.Location, Entry.PushLocation);
+ if (FpPragmaCurrentLocation.isInvalid()) {
+ assert(*FpPragmaCurrentValue == SemaObj->FpPragmaStack.DefaultValue &&
+ "Expected a default pragma float_control value");
+ // Keep the current values.
+ } else {
+ SemaObj->FpPragmaStack.CurrentValue = *FpPragmaCurrentValue;
+ SemaObj->FpPragmaStack.CurrentPragmaLocation = FpPragmaCurrentLocation;
+ }
+ }
}
IdentifierInfo *ASTReader::get(StringRef Name) {
@@ -8172,6 +8300,19 @@ void ASTReader::ReadUnusedLocalTypedefNameCandidates(
UnusedLocalTypedefNameCandidates.clear();
}
+void ASTReader::ReadDeclsToCheckForDeferredDiags(
+ llvm::SmallVector<Decl *, 4> &Decls) {
+ for (unsigned I = 0, N = DeclsToCheckForDeferredDiags.size(); I != N;
+ ++I) {
+ auto *D = dyn_cast_or_null<Decl>(
+ GetDecl(DeclsToCheckForDeferredDiags[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ DeclsToCheckForDeferredDiags.clear();
+}
+
+
void ASTReader::ReadReferencedSelectors(
SmallVectorImpl<std::pair<Selector, SourceLocation>> &Sels) {
if (ReferencedSelectorsData.empty())
@@ -8390,7 +8531,8 @@ MacroInfo *ASTReader::getMacro(MacroID ID) {
assert(I != GlobalMacroMap.end() && "Corrupted global macro map");
ModuleFile *M = I->second;
unsigned Index = ID - M->BaseMacroID;
- MacrosLoaded[ID] = ReadMacroRecord(*M, M->MacroOffsets[Index]);
+ MacrosLoaded[ID] =
+ ReadMacroRecord(*M, M->MacroOffsetsBase + M->MacroOffsets[Index]);
if (DeserializationListener)
DeserializationListener->MacroRead(ID + NUM_PREDEF_MACRO_IDS,
@@ -8483,10 +8625,10 @@ unsigned ASTReader::getModuleFileID(ModuleFile *F) {
return (I - PCHModules.end()) << 1;
}
-llvm::Optional<ExternalASTSource::ASTSourceDescriptor>
+llvm::Optional<ASTSourceDescriptor>
ASTReader::getSourceDescriptor(unsigned ID) {
- if (const Module *M = getSubmodule(ID))
- return ExternalASTSource::ASTSourceDescriptor(*M);
+ if (Module *M = getSubmodule(ID))
+ return ASTSourceDescriptor(*M);
// If there is only a single PCH, return it instead.
// Chained PCH are not supported.
@@ -8495,8 +8637,8 @@ ASTReader::getSourceDescriptor(unsigned ID) {
ModuleFile &MF = ModuleMgr.getPrimaryModule();
StringRef ModuleName = llvm::sys::path::filename(MF.OriginalSourceFileName);
StringRef FileName = llvm::sys::path::filename(MF.FileName);
- return ASTReader::ASTSourceDescriptor(ModuleName, MF.OriginalDir, FileName,
- MF.Signature);
+ return ASTSourceDescriptor(ModuleName, MF.OriginalDir, FileName,
+ MF.Signature);
}
return None;
}
@@ -9440,6 +9582,446 @@ void ASTReader::diagnoseOdrViolations() {
return Hash.CalculateHash();
};
+ // Used with err_module_odr_violation_mismatch_decl and
+ // note_module_odr_violation_mismatch_decl
+ // This list should be the same Decl's as in ODRHash::isDeclToBeProcessed
+ enum ODRMismatchDecl {
+ EndOfClass,
+ PublicSpecifer,
+ PrivateSpecifer,
+ ProtectedSpecifer,
+ StaticAssert,
+ Field,
+ CXXMethod,
+ TypeAlias,
+ TypeDef,
+ Var,
+ Friend,
+ FunctionTemplate,
+ Other
+ };
+
+ // Used with err_module_odr_violation_mismatch_decl_diff and
+ // note_module_odr_violation_mismatch_decl_diff
+ enum ODRMismatchDeclDifference {
+ StaticAssertCondition,
+ StaticAssertMessage,
+ StaticAssertOnlyMessage,
+ FieldName,
+ FieldTypeName,
+ FieldSingleBitField,
+ FieldDifferentWidthBitField,
+ FieldSingleMutable,
+ FieldSingleInitializer,
+ FieldDifferentInitializers,
+ MethodName,
+ MethodDeleted,
+ MethodDefaulted,
+ MethodVirtual,
+ MethodStatic,
+ MethodVolatile,
+ MethodConst,
+ MethodInline,
+ MethodNumberParameters,
+ MethodParameterType,
+ MethodParameterName,
+ MethodParameterSingleDefaultArgument,
+ MethodParameterDifferentDefaultArgument,
+ MethodNoTemplateArguments,
+ MethodDifferentNumberTemplateArguments,
+ MethodDifferentTemplateArgument,
+ MethodSingleBody,
+ MethodDifferentBody,
+ TypedefName,
+ TypedefType,
+ VarName,
+ VarType,
+ VarSingleInitializer,
+ VarDifferentInitializer,
+ VarConstexpr,
+ FriendTypeFunction,
+ FriendType,
+ FriendFunction,
+ FunctionTemplateDifferentNumberParameters,
+ FunctionTemplateParameterDifferentKind,
+ FunctionTemplateParameterName,
+ FunctionTemplateParameterSingleDefaultArgument,
+ FunctionTemplateParameterDifferentDefaultArgument,
+ FunctionTemplateParameterDifferentType,
+ FunctionTemplatePackParameter,
+ };
+
+ // These lambdas have the common portions of the ODR diagnostics. This
+ // has the same return as Diag(), so addition parameters can be passed
+ // in with operator<<
+ auto ODRDiagDeclError = [this](NamedDecl *FirstRecord, StringRef FirstModule,
+ SourceLocation Loc, SourceRange Range,
+ ODRMismatchDeclDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_mismatch_decl_diff)
+ << FirstRecord << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto ODRDiagDeclNote = [this](StringRef SecondModule, SourceLocation Loc,
+ SourceRange Range, ODRMismatchDeclDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_mismatch_decl_diff)
+ << SecondModule << Range << DiffType;
+ };
+
+ auto ODRDiagField = [this, &ODRDiagDeclError, &ODRDiagDeclNote,
+ &ComputeQualTypeODRHash, &ComputeODRHash](
+ NamedDecl *FirstRecord, StringRef FirstModule,
+ StringRef SecondModule, FieldDecl *FirstField,
+ FieldDecl *SecondField) {
+ IdentifierInfo *FirstII = FirstField->getIdentifier();
+ IdentifierInfo *SecondII = SecondField->getIdentifier();
+ if (FirstII->getName() != SecondII->getName()) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldName)
+ << FirstII;
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldName)
+ << SecondII;
+
+ return true;
+ }
+
+ assert(getContext().hasSameType(FirstField->getType(),
+ SecondField->getType()));
+
+ QualType FirstType = FirstField->getType();
+ QualType SecondType = SecondField->getType();
+ if (ComputeQualTypeODRHash(FirstType) !=
+ ComputeQualTypeODRHash(SecondType)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldTypeName)
+ << FirstII << FirstType;
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldTypeName)
+ << SecondII << SecondType;
+
+ return true;
+ }
+
+ const bool IsFirstBitField = FirstField->isBitField();
+ const bool IsSecondBitField = SecondField->isBitField();
+ if (IsFirstBitField != IsSecondBitField) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldSingleBitField)
+ << FirstII << IsFirstBitField;
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldSingleBitField)
+ << SecondII << IsSecondBitField;
+ return true;
+ }
+
+ if (IsFirstBitField && IsSecondBitField) {
+ unsigned FirstBitWidthHash =
+ ComputeODRHash(FirstField->getBitWidth());
+ unsigned SecondBitWidthHash =
+ ComputeODRHash(SecondField->getBitWidth());
+ if (FirstBitWidthHash != SecondBitWidthHash) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(),
+ FieldDifferentWidthBitField)
+ << FirstII << FirstField->getBitWidth()->getSourceRange();
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(),
+ FieldDifferentWidthBitField)
+ << SecondII << SecondField->getBitWidth()->getSourceRange();
+ return true;
+ }
+ }
+
+ if (!PP.getLangOpts().CPlusPlus)
+ return false;
+
+ const bool IsFirstMutable = FirstField->isMutable();
+ const bool IsSecondMutable = SecondField->isMutable();
+ if (IsFirstMutable != IsSecondMutable) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldSingleMutable)
+ << FirstII << IsFirstMutable;
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldSingleMutable)
+ << SecondII << IsSecondMutable;
+ return true;
+ }
+
+ const Expr *FirstInitializer = FirstField->getInClassInitializer();
+ const Expr *SecondInitializer = SecondField->getInClassInitializer();
+ if ((!FirstInitializer && SecondInitializer) ||
+ (FirstInitializer && !SecondInitializer)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(), FieldSingleInitializer)
+ << FirstII << (FirstInitializer != nullptr);
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(), FieldSingleInitializer)
+ << SecondII << (SecondInitializer != nullptr);
+ return true;
+ }
+
+ if (FirstInitializer && SecondInitializer) {
+ unsigned FirstInitHash = ComputeODRHash(FirstInitializer);
+ unsigned SecondInitHash = ComputeODRHash(SecondInitializer);
+ if (FirstInitHash != SecondInitHash) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
+ FirstField->getSourceRange(),
+ FieldDifferentInitializers)
+ << FirstII << FirstInitializer->getSourceRange();
+ ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
+ SecondField->getSourceRange(),
+ FieldDifferentInitializers)
+ << SecondII << SecondInitializer->getSourceRange();
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+ auto ODRDiagTypeDefOrAlias =
+ [&ODRDiagDeclError, &ODRDiagDeclNote, &ComputeQualTypeODRHash](
+ NamedDecl *FirstRecord, StringRef FirstModule, StringRef SecondModule,
+ TypedefNameDecl *FirstTD, TypedefNameDecl *SecondTD,
+ bool IsTypeAlias) {
+ auto FirstName = FirstTD->getDeclName();
+ auto SecondName = SecondTD->getDeclName();
+ if (FirstName != SecondName) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstTD->getLocation(),
+ FirstTD->getSourceRange(), TypedefName)
+ << IsTypeAlias << FirstName;
+ ODRDiagDeclNote(SecondModule, SecondTD->getLocation(),
+ SecondTD->getSourceRange(), TypedefName)
+ << IsTypeAlias << SecondName;
+ return true;
+ }
+
+ QualType FirstType = FirstTD->getUnderlyingType();
+ QualType SecondType = SecondTD->getUnderlyingType();
+ if (ComputeQualTypeODRHash(FirstType) !=
+ ComputeQualTypeODRHash(SecondType)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstTD->getLocation(),
+ FirstTD->getSourceRange(), TypedefType)
+ << IsTypeAlias << FirstName << FirstType;
+ ODRDiagDeclNote(SecondModule, SecondTD->getLocation(),
+ SecondTD->getSourceRange(), TypedefType)
+ << IsTypeAlias << SecondName << SecondType;
+ return true;
+ }
+
+ return false;
+ };
+
+ auto ODRDiagVar = [&ODRDiagDeclError, &ODRDiagDeclNote,
+ &ComputeQualTypeODRHash, &ComputeODRHash,
+ this](NamedDecl *FirstRecord, StringRef FirstModule,
+ StringRef SecondModule, VarDecl *FirstVD,
+ VarDecl *SecondVD) {
+ auto FirstName = FirstVD->getDeclName();
+ auto SecondName = SecondVD->getDeclName();
+ if (FirstName != SecondName) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarName)
+ << FirstName;
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarName)
+ << SecondName;
+ return true;
+ }
+
+ QualType FirstType = FirstVD->getType();
+ QualType SecondType = SecondVD->getType();
+ if (ComputeQualTypeODRHash(FirstType) !=
+ ComputeQualTypeODRHash(SecondType)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarType)
+ << FirstName << FirstType;
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarType)
+ << SecondName << SecondType;
+ return true;
+ }
+
+ if (!PP.getLangOpts().CPlusPlus)
+ return false;
+
+ const Expr *FirstInit = FirstVD->getInit();
+ const Expr *SecondInit = SecondVD->getInit();
+ if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarSingleInitializer)
+ << FirstName << (FirstInit == nullptr)
+ << (FirstInit ? FirstInit->getSourceRange() : SourceRange());
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarSingleInitializer)
+ << SecondName << (SecondInit == nullptr)
+ << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
+ return true;
+ }
+
+ if (FirstInit && SecondInit &&
+ ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarDifferentInitializer)
+ << FirstName << FirstInit->getSourceRange();
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarDifferentInitializer)
+ << SecondName << SecondInit->getSourceRange();
+ return true;
+ }
+
+ const bool FirstIsConstexpr = FirstVD->isConstexpr();
+ const bool SecondIsConstexpr = SecondVD->isConstexpr();
+ if (FirstIsConstexpr != SecondIsConstexpr) {
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
+ FirstVD->getSourceRange(), VarConstexpr)
+ << FirstName << FirstIsConstexpr;
+ ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
+ SecondVD->getSourceRange(), VarConstexpr)
+ << SecondName << SecondIsConstexpr;
+ return true;
+ }
+ return false;
+ };
+
+ auto DifferenceSelector = [](Decl *D) {
+ assert(D && "valid Decl required");
+ switch (D->getKind()) {
+ default:
+ return Other;
+ case Decl::AccessSpec:
+ switch (D->getAccess()) {
+ case AS_public:
+ return PublicSpecifer;
+ case AS_private:
+ return PrivateSpecifer;
+ case AS_protected:
+ return ProtectedSpecifer;
+ case AS_none:
+ break;
+ }
+ llvm_unreachable("Invalid access specifier");
+ case Decl::StaticAssert:
+ return StaticAssert;
+ case Decl::Field:
+ return Field;
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ return CXXMethod;
+ case Decl::TypeAlias:
+ return TypeAlias;
+ case Decl::Typedef:
+ return TypeDef;
+ case Decl::Var:
+ return Var;
+ case Decl::Friend:
+ return Friend;
+ case Decl::FunctionTemplate:
+ return FunctionTemplate;
+ }
+ };
+
+ using DeclHashes = llvm::SmallVector<std::pair<Decl *, unsigned>, 4>;
+ auto PopulateHashes = [&ComputeSubDeclODRHash](DeclHashes &Hashes,
+ RecordDecl *Record,
+ const DeclContext *DC) {
+ for (auto *D : Record->decls()) {
+ if (!ODRHash::isDeclToBeProcessed(D, DC))
+ continue;
+ Hashes.emplace_back(D, ComputeSubDeclODRHash(D));
+ }
+ };
+
+ struct DiffResult {
+ Decl *FirstDecl = nullptr, *SecondDecl = nullptr;
+ ODRMismatchDecl FirstDiffType = Other, SecondDiffType = Other;
+ };
+
+ // If there is a diagnoseable difference, FirstDiffType and
+ // SecondDiffType will not be Other and FirstDecl and SecondDecl will be
+ // filled in if not EndOfClass.
+ auto FindTypeDiffs = [&DifferenceSelector](DeclHashes &FirstHashes,
+ DeclHashes &SecondHashes) {
+ DiffResult DR;
+ auto FirstIt = FirstHashes.begin();
+ auto SecondIt = SecondHashes.begin();
+ while (FirstIt != FirstHashes.end() || SecondIt != SecondHashes.end()) {
+ if (FirstIt != FirstHashes.end() && SecondIt != SecondHashes.end() &&
+ FirstIt->second == SecondIt->second) {
+ ++FirstIt;
+ ++SecondIt;
+ continue;
+ }
+
+ DR.FirstDecl = FirstIt == FirstHashes.end() ? nullptr : FirstIt->first;
+ DR.SecondDecl =
+ SecondIt == SecondHashes.end() ? nullptr : SecondIt->first;
+
+ DR.FirstDiffType =
+ DR.FirstDecl ? DifferenceSelector(DR.FirstDecl) : EndOfClass;
+ DR.SecondDiffType =
+ DR.SecondDecl ? DifferenceSelector(DR.SecondDecl) : EndOfClass;
+ return DR;
+ }
+ return DR;
+ };
+
+ // Use this to diagnose that an unexpected Decl was encountered
+ // or no difference was detected. This causes a generic error
+ // message to be emitted.
+ auto DiagnoseODRUnexpected = [this](DiffResult &DR, NamedDecl *FirstRecord,
+ StringRef FirstModule,
+ NamedDecl *SecondRecord,
+ StringRef SecondModule) {
+ Diag(FirstRecord->getLocation(),
+ diag::err_module_odr_violation_different_definitions)
+ << FirstRecord << FirstModule.empty() << FirstModule;
+
+ if (DR.FirstDecl) {
+ Diag(DR.FirstDecl->getLocation(), diag::note_first_module_difference)
+ << FirstRecord << DR.FirstDecl->getSourceRange();
+ }
+
+ Diag(SecondRecord->getLocation(),
+ diag::note_module_odr_violation_different_definitions)
+ << SecondModule;
+
+ if (DR.SecondDecl) {
+ Diag(DR.SecondDecl->getLocation(), diag::note_second_module_difference)
+ << DR.SecondDecl->getSourceRange();
+ }
+ };
+
+ auto DiagnoseODRMismatch =
+ [this](DiffResult &DR, NamedDecl *FirstRecord, StringRef FirstModule,
+ NamedDecl *SecondRecord, StringRef SecondModule) {
+ SourceLocation FirstLoc;
+ SourceRange FirstRange;
+ auto *FirstTag = dyn_cast<TagDecl>(FirstRecord);
+ if (DR.FirstDiffType == EndOfClass && FirstTag) {
+ FirstLoc = FirstTag->getBraceRange().getEnd();
+ } else {
+ FirstLoc = DR.FirstDecl->getLocation();
+ FirstRange = DR.FirstDecl->getSourceRange();
+ }
+ Diag(FirstLoc, diag::err_module_odr_violation_mismatch_decl)
+ << FirstRecord << FirstModule.empty() << FirstModule << FirstRange
+ << DR.FirstDiffType;
+
+ SourceLocation SecondLoc;
+ SourceRange SecondRange;
+ auto *SecondTag = dyn_cast<TagDecl>(SecondRecord);
+ if (DR.SecondDiffType == EndOfClass && SecondTag) {
+ SecondLoc = SecondTag->getBraceRange().getEnd();
+ } else {
+ SecondLoc = DR.SecondDecl->getLocation();
+ SecondRange = DR.SecondDecl->getSourceRange();
+ }
+ Diag(SecondLoc, diag::note_module_odr_violation_mismatch_decl)
+ << SecondModule << SecondRange << DR.SecondDiffType;
+ };
+
// Issue any pending ODR-failure diagnostics.
for (auto &Merge : OdrMergeFailures) {
// If we've already pointed out a specific problem with this class, don't
@@ -9473,16 +10055,16 @@ void ASTReader::diagnoseOdrViolations() {
BaseVirtual,
BaseAccess,
};
- auto ODRDiagError = [FirstRecord, &FirstModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRDefinitionDataDifference DiffType) {
+ auto ODRDiagBaseError = [FirstRecord, &FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRDefinitionDataDifference DiffType) {
return Diag(Loc, diag::err_module_odr_violation_definition_data)
<< FirstRecord << FirstModule.empty() << FirstModule << Range
<< DiffType;
};
- auto ODRDiagNote = [&SecondModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRDefinitionDataDifference DiffType) {
+ auto ODRDiagBaseNote = [&SecondModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRDefinitionDataDifference DiffType) {
return Diag(Loc, diag::note_module_odr_violation_definition_data)
<< SecondModule << Range << DiffType;
};
@@ -9501,22 +10083,22 @@ void ASTReader::diagnoseOdrViolations() {
};
if (FirstNumBases != SecondNumBases) {
- ODRDiagError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
- NumBases)
+ ODRDiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
+ NumBases)
<< FirstNumBases;
- ODRDiagNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
- NumBases)
+ ODRDiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
+ NumBases)
<< SecondNumBases;
Diagnosed = true;
break;
}
if (FirstNumVBases != SecondNumVBases) {
- ODRDiagError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
- NumVBases)
+ ODRDiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
+ NumVBases)
<< FirstNumVBases;
- ODRDiagNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
- NumVBases)
+ ODRDiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
+ NumVBases)
<< SecondNumVBases;
Diagnosed = true;
break;
@@ -9530,33 +10112,33 @@ void ASTReader::diagnoseOdrViolations() {
auto SecondBase = SecondBases[i];
if (ComputeQualTypeODRHash(FirstBase.getType()) !=
ComputeQualTypeODRHash(SecondBase.getType())) {
- ODRDiagError(FirstRecord->getLocation(), FirstBase.getSourceRange(),
- BaseType)
+ ODRDiagBaseError(FirstRecord->getLocation(),
+ FirstBase.getSourceRange(), BaseType)
<< (i + 1) << FirstBase.getType();
- ODRDiagNote(SecondRecord->getLocation(),
- SecondBase.getSourceRange(), BaseType)
+ ODRDiagBaseNote(SecondRecord->getLocation(),
+ SecondBase.getSourceRange(), BaseType)
<< (i + 1) << SecondBase.getType();
break;
}
if (FirstBase.isVirtual() != SecondBase.isVirtual()) {
- ODRDiagError(FirstRecord->getLocation(), FirstBase.getSourceRange(),
- BaseVirtual)
+ ODRDiagBaseError(FirstRecord->getLocation(),
+ FirstBase.getSourceRange(), BaseVirtual)
<< (i + 1) << FirstBase.isVirtual() << FirstBase.getType();
- ODRDiagNote(SecondRecord->getLocation(),
- SecondBase.getSourceRange(), BaseVirtual)
+ ODRDiagBaseNote(SecondRecord->getLocation(),
+ SecondBase.getSourceRange(), BaseVirtual)
<< (i + 1) << SecondBase.isVirtual() << SecondBase.getType();
break;
}
if (FirstBase.getAccessSpecifierAsWritten() !=
SecondBase.getAccessSpecifierAsWritten()) {
- ODRDiagError(FirstRecord->getLocation(), FirstBase.getSourceRange(),
- BaseAccess)
+ ODRDiagBaseError(FirstRecord->getLocation(),
+ FirstBase.getSourceRange(), BaseAccess)
<< (i + 1) << FirstBase.getType()
<< (int)FirstBase.getAccessSpecifierAsWritten();
- ODRDiagNote(SecondRecord->getLocation(),
- SecondBase.getSourceRange(), BaseAccess)
+ ODRDiagBaseNote(SecondRecord->getLocation(),
+ SecondBase.getSourceRange(), BaseAccess)
<< (i + 1) << SecondBase.getType()
<< (int)SecondBase.getAccessSpecifierAsWritten();
break;
@@ -9569,8 +10151,6 @@ void ASTReader::diagnoseOdrViolations() {
}
}
- using DeclHashes = llvm::SmallVector<std::pair<Decl *, unsigned>, 4>;
-
const ClassTemplateDecl *FirstTemplate =
FirstRecord->getDescribedClassTemplate();
const ClassTemplateDecl *SecondTemplate =
@@ -9611,16 +10191,16 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstIt->second == SecondIt->second)
continue;
- auto ODRDiagError = [FirstRecord, &FirstModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRTemplateDifference DiffType) {
+ auto ODRDiagTemplateError = [FirstRecord, &FirstModule, this](
+ SourceLocation Loc, SourceRange Range,
+ ODRTemplateDifference DiffType) {
return Diag(Loc, diag::err_module_odr_violation_template_parameter)
<< FirstRecord << FirstModule.empty() << FirstModule << Range
<< DiffType;
};
- auto ODRDiagNote = [&SecondModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRTemplateDifference DiffType) {
+ auto ODRDiagTemplateNote = [&SecondModule, this](
+ SourceLocation Loc, SourceRange Range,
+ ODRTemplateDifference DiffType) {
return Diag(Loc, diag::note_module_odr_violation_template_parameter)
<< SecondModule << Range << DiffType;
};
@@ -9641,11 +10221,13 @@ void ASTReader::diagnoseOdrViolations() {
SecondName.isIdentifier() && !SecondName.getAsIdentifierInfo();
assert((!FirstNameEmpty || !SecondNameEmpty) &&
"Both template parameters cannot be unnamed.");
- ODRDiagError(FirstDecl->getLocation(), FirstDecl->getSourceRange(),
- FirstNameEmpty ? ParamEmptyName : ParamName)
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ FirstNameEmpty ? ParamEmptyName : ParamName)
<< FirstName;
- ODRDiagNote(SecondDecl->getLocation(), SecondDecl->getSourceRange(),
- SecondNameEmpty ? ParamEmptyName : ParamName)
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ SecondNameEmpty ? ParamEmptyName : ParamName)
<< SecondName;
break;
}
@@ -9664,13 +10246,13 @@ void ASTReader::diagnoseOdrViolations() {
!SecondParam->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasFirstDefaultArgument;
- ODRDiagNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasSecondDefaultArgument;
break;
}
@@ -9678,10 +10260,12 @@ void ASTReader::diagnoseOdrViolations() {
assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
"Expecting default arguments.");
- ODRDiagError(FirstDecl->getLocation(), FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagNote(SecondDecl->getLocation(), SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
break;
}
@@ -9696,13 +10280,13 @@ void ASTReader::diagnoseOdrViolations() {
!SecondParam->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasFirstDefaultArgument;
- ODRDiagNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasSecondDefaultArgument;
break;
}
@@ -9710,10 +10294,12 @@ void ASTReader::diagnoseOdrViolations() {
assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
"Expecting default arguments.");
- ODRDiagError(FirstDecl->getLocation(), FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagNote(SecondDecl->getLocation(), SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
break;
}
@@ -9729,13 +10315,13 @@ void ASTReader::diagnoseOdrViolations() {
!SecondParam->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasFirstDefaultArgument;
- ODRDiagNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamSingleDefaultArgument)
<< HasSecondDefaultArgument;
break;
}
@@ -9743,10 +10329,12 @@ void ASTReader::diagnoseOdrViolations() {
assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
"Expecting default arguments.");
- ODRDiagError(FirstDecl->getLocation(), FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagNote(SecondDecl->getLocation(), SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
+ ODRDiagTemplateError(FirstDecl->getLocation(),
+ FirstDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
+ ODRDiagTemplateNote(SecondDecl->getLocation(),
+ SecondDecl->getSourceRange(),
+ ParamDifferentDefaultArgument);
break;
}
@@ -9763,224 +10351,32 @@ void ASTReader::diagnoseOdrViolations() {
DeclHashes FirstHashes;
DeclHashes SecondHashes;
+ const DeclContext *DC = FirstRecord;
+ PopulateHashes(FirstHashes, FirstRecord, DC);
+ PopulateHashes(SecondHashes, SecondRecord, DC);
- auto PopulateHashes = [&ComputeSubDeclODRHash, FirstRecord](
- DeclHashes &Hashes, CXXRecordDecl *Record) {
- for (auto *D : Record->decls()) {
- // Due to decl merging, the first CXXRecordDecl is the parent of
- // Decls in both records.
- if (!ODRHash::isWhitelistedDecl(D, FirstRecord))
- continue;
- Hashes.emplace_back(D, ComputeSubDeclODRHash(D));
- }
- };
- PopulateHashes(FirstHashes, FirstRecord);
- PopulateHashes(SecondHashes, SecondRecord);
-
- // Used with err_module_odr_violation_mismatch_decl and
- // note_module_odr_violation_mismatch_decl
- // This list should be the same Decl's as in ODRHash::isWhiteListedDecl
- enum {
- EndOfClass,
- PublicSpecifer,
- PrivateSpecifer,
- ProtectedSpecifer,
- StaticAssert,
- Field,
- CXXMethod,
- TypeAlias,
- TypeDef,
- Var,
- Friend,
- FunctionTemplate,
- Other
- } FirstDiffType = Other,
- SecondDiffType = Other;
-
- auto DifferenceSelector = [](Decl *D) {
- assert(D && "valid Decl required");
- switch (D->getKind()) {
- default:
- return Other;
- case Decl::AccessSpec:
- switch (D->getAccess()) {
- case AS_public:
- return PublicSpecifer;
- case AS_private:
- return PrivateSpecifer;
- case AS_protected:
- return ProtectedSpecifer;
- case AS_none:
- break;
- }
- llvm_unreachable("Invalid access specifier");
- case Decl::StaticAssert:
- return StaticAssert;
- case Decl::Field:
- return Field;
- case Decl::CXXMethod:
- case Decl::CXXConstructor:
- case Decl::CXXDestructor:
- return CXXMethod;
- case Decl::TypeAlias:
- return TypeAlias;
- case Decl::Typedef:
- return TypeDef;
- case Decl::Var:
- return Var;
- case Decl::Friend:
- return Friend;
- case Decl::FunctionTemplate:
- return FunctionTemplate;
- }
- };
-
- Decl *FirstDecl = nullptr;
- Decl *SecondDecl = nullptr;
- auto FirstIt = FirstHashes.begin();
- auto SecondIt = SecondHashes.begin();
-
- // If there is a diagnoseable difference, FirstDiffType and
- // SecondDiffType will not be Other and FirstDecl and SecondDecl will be
- // filled in if not EndOfClass.
- while (FirstIt != FirstHashes.end() || SecondIt != SecondHashes.end()) {
- if (FirstIt != FirstHashes.end() && SecondIt != SecondHashes.end() &&
- FirstIt->second == SecondIt->second) {
- ++FirstIt;
- ++SecondIt;
- continue;
- }
-
- FirstDecl = FirstIt == FirstHashes.end() ? nullptr : FirstIt->first;
- SecondDecl = SecondIt == SecondHashes.end() ? nullptr : SecondIt->first;
-
- FirstDiffType = FirstDecl ? DifferenceSelector(FirstDecl) : EndOfClass;
- SecondDiffType =
- SecondDecl ? DifferenceSelector(SecondDecl) : EndOfClass;
-
- break;
- }
+ auto DR = FindTypeDiffs(FirstHashes, SecondHashes);
+ ODRMismatchDecl FirstDiffType = DR.FirstDiffType;
+ ODRMismatchDecl SecondDiffType = DR.SecondDiffType;
+ Decl *FirstDecl = DR.FirstDecl;
+ Decl *SecondDecl = DR.SecondDecl;
if (FirstDiffType == Other || SecondDiffType == Other) {
- // Reaching this point means an unexpected Decl was encountered
- // or no difference was detected. This causes a generic error
- // message to be emitted.
- Diag(FirstRecord->getLocation(),
- diag::err_module_odr_violation_different_definitions)
- << FirstRecord << FirstModule.empty() << FirstModule;
-
- if (FirstDecl) {
- Diag(FirstDecl->getLocation(), diag::note_first_module_difference)
- << FirstRecord << FirstDecl->getSourceRange();
- }
-
- Diag(SecondRecord->getLocation(),
- diag::note_module_odr_violation_different_definitions)
- << SecondModule;
-
- if (SecondDecl) {
- Diag(SecondDecl->getLocation(), diag::note_second_module_difference)
- << SecondDecl->getSourceRange();
- }
-
+ DiagnoseODRUnexpected(DR, FirstRecord, FirstModule, SecondRecord,
+ SecondModule);
Diagnosed = true;
break;
}
if (FirstDiffType != SecondDiffType) {
- SourceLocation FirstLoc;
- SourceRange FirstRange;
- if (FirstDiffType == EndOfClass) {
- FirstLoc = FirstRecord->getBraceRange().getEnd();
- } else {
- FirstLoc = FirstIt->first->getLocation();
- FirstRange = FirstIt->first->getSourceRange();
- }
- Diag(FirstLoc, diag::err_module_odr_violation_mismatch_decl)
- << FirstRecord << FirstModule.empty() << FirstModule << FirstRange
- << FirstDiffType;
-
- SourceLocation SecondLoc;
- SourceRange SecondRange;
- if (SecondDiffType == EndOfClass) {
- SecondLoc = SecondRecord->getBraceRange().getEnd();
- } else {
- SecondLoc = SecondDecl->getLocation();
- SecondRange = SecondDecl->getSourceRange();
- }
- Diag(SecondLoc, diag::note_module_odr_violation_mismatch_decl)
- << SecondModule << SecondRange << SecondDiffType;
+ DiagnoseODRMismatch(DR, FirstRecord, FirstModule, SecondRecord,
+ SecondModule);
Diagnosed = true;
break;
}
assert(FirstDiffType == SecondDiffType);
- // Used with err_module_odr_violation_mismatch_decl_diff and
- // note_module_odr_violation_mismatch_decl_diff
- enum ODRDeclDifference {
- StaticAssertCondition,
- StaticAssertMessage,
- StaticAssertOnlyMessage,
- FieldName,
- FieldTypeName,
- FieldSingleBitField,
- FieldDifferentWidthBitField,
- FieldSingleMutable,
- FieldSingleInitializer,
- FieldDifferentInitializers,
- MethodName,
- MethodDeleted,
- MethodDefaulted,
- MethodVirtual,
- MethodStatic,
- MethodVolatile,
- MethodConst,
- MethodInline,
- MethodNumberParameters,
- MethodParameterType,
- MethodParameterName,
- MethodParameterSingleDefaultArgument,
- MethodParameterDifferentDefaultArgument,
- MethodNoTemplateArguments,
- MethodDifferentNumberTemplateArguments,
- MethodDifferentTemplateArgument,
- MethodSingleBody,
- MethodDifferentBody,
- TypedefName,
- TypedefType,
- VarName,
- VarType,
- VarSingleInitializer,
- VarDifferentInitializer,
- VarConstexpr,
- FriendTypeFunction,
- FriendType,
- FriendFunction,
- FunctionTemplateDifferentNumberParameters,
- FunctionTemplateParameterDifferentKind,
- FunctionTemplateParameterName,
- FunctionTemplateParameterSingleDefaultArgument,
- FunctionTemplateParameterDifferentDefaultArgument,
- FunctionTemplateParameterDifferentType,
- FunctionTemplatePackParameter,
- };
-
- // These lambdas have the common portions of the ODR diagnostics. This
- // has the same return as Diag(), so addition parameters can be passed
- // in with operator<<
- auto ODRDiagError = [FirstRecord, &FirstModule, this](
- SourceLocation Loc, SourceRange Range, ODRDeclDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_mismatch_decl_diff)
- << FirstRecord << FirstModule.empty() << FirstModule << Range
- << DiffType;
- };
- auto ODRDiagNote = [&SecondModule, this](
- SourceLocation Loc, SourceRange Range, ODRDeclDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_mismatch_decl_diff)
- << SecondModule << Range << DiffType;
- };
-
switch (FirstDiffType) {
case Other:
case EndOfClass:
@@ -9998,10 +10394,10 @@ void ASTReader::diagnoseOdrViolations() {
unsigned FirstODRHash = ComputeODRHash(FirstExpr);
unsigned SecondODRHash = ComputeODRHash(SecondExpr);
if (FirstODRHash != SecondODRHash) {
- ODRDiagError(FirstExpr->getBeginLoc(), FirstExpr->getSourceRange(),
- StaticAssertCondition);
- ODRDiagNote(SecondExpr->getBeginLoc(), SecondExpr->getSourceRange(),
- StaticAssertCondition);
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstExpr->getBeginLoc(),
+ FirstExpr->getSourceRange(), StaticAssertCondition);
+ ODRDiagDeclNote(SecondModule, SecondExpr->getBeginLoc(),
+ SecondExpr->getSourceRange(), StaticAssertCondition);
Diagnosed = true;
break;
}
@@ -10026,9 +10422,11 @@ void ASTReader::diagnoseOdrViolations() {
SecondLoc = SecondSA->getBeginLoc();
SecondRange = SecondSA->getSourceRange();
}
- ODRDiagError(FirstLoc, FirstRange, StaticAssertOnlyMessage)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstLoc, FirstRange,
+ StaticAssertOnlyMessage)
<< (FirstStr == nullptr);
- ODRDiagNote(SecondLoc, SecondRange, StaticAssertOnlyMessage)
+ ODRDiagDeclNote(SecondModule, SecondLoc, SecondRange,
+ StaticAssertOnlyMessage)
<< (SecondStr == nullptr);
Diagnosed = true;
break;
@@ -10036,126 +10434,19 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstStr && SecondStr &&
FirstStr->getString() != SecondStr->getString()) {
- ODRDiagError(FirstStr->getBeginLoc(), FirstStr->getSourceRange(),
- StaticAssertMessage);
- ODRDiagNote(SecondStr->getBeginLoc(), SecondStr->getSourceRange(),
- StaticAssertMessage);
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstStr->getBeginLoc(),
+ FirstStr->getSourceRange(), StaticAssertMessage);
+ ODRDiagDeclNote(SecondModule, SecondStr->getBeginLoc(),
+ SecondStr->getSourceRange(), StaticAssertMessage);
Diagnosed = true;
break;
}
break;
}
case Field: {
- FieldDecl *FirstField = cast<FieldDecl>(FirstDecl);
- FieldDecl *SecondField = cast<FieldDecl>(SecondDecl);
- IdentifierInfo *FirstII = FirstField->getIdentifier();
- IdentifierInfo *SecondII = SecondField->getIdentifier();
- if (FirstII->getName() != SecondII->getName()) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldName)
- << FirstII;
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldName)
- << SecondII;
-
- Diagnosed = true;
- break;
- }
-
- assert(getContext().hasSameType(FirstField->getType(),
- SecondField->getType()));
-
- QualType FirstType = FirstField->getType();
- QualType SecondType = SecondField->getType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldTypeName)
- << FirstII << FirstType;
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldTypeName)
- << SecondII << SecondType;
-
- Diagnosed = true;
- break;
- }
-
- const bool IsFirstBitField = FirstField->isBitField();
- const bool IsSecondBitField = SecondField->isBitField();
- if (IsFirstBitField != IsSecondBitField) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldSingleBitField)
- << FirstII << IsFirstBitField;
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldSingleBitField)
- << SecondII << IsSecondBitField;
- Diagnosed = true;
- break;
- }
-
- if (IsFirstBitField && IsSecondBitField) {
- unsigned FirstBitWidthHash =
- ComputeODRHash(FirstField->getBitWidth());
- unsigned SecondBitWidthHash =
- ComputeODRHash(SecondField->getBitWidth());
- if (FirstBitWidthHash != SecondBitWidthHash) {
- ODRDiagError(FirstField->getLocation(),
- FirstField->getSourceRange(),
- FieldDifferentWidthBitField)
- << FirstII << FirstField->getBitWidth()->getSourceRange();
- ODRDiagNote(SecondField->getLocation(),
- SecondField->getSourceRange(),
- FieldDifferentWidthBitField)
- << SecondII << SecondField->getBitWidth()->getSourceRange();
- Diagnosed = true;
- break;
- }
- }
-
- const bool IsFirstMutable = FirstField->isMutable();
- const bool IsSecondMutable = SecondField->isMutable();
- if (IsFirstMutable != IsSecondMutable) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldSingleMutable)
- << FirstII << IsFirstMutable;
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldSingleMutable)
- << SecondII << IsSecondMutable;
- Diagnosed = true;
- break;
- }
-
- const Expr *FirstInitializer = FirstField->getInClassInitializer();
- const Expr *SecondInitializer = SecondField->getInClassInitializer();
- if ((!FirstInitializer && SecondInitializer) ||
- (FirstInitializer && !SecondInitializer)) {
- ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(),
- FieldSingleInitializer)
- << FirstII << (FirstInitializer != nullptr);
- ODRDiagNote(SecondField->getLocation(), SecondField->getSourceRange(),
- FieldSingleInitializer)
- << SecondII << (SecondInitializer != nullptr);
- Diagnosed = true;
- break;
- }
-
- if (FirstInitializer && SecondInitializer) {
- unsigned FirstInitHash = ComputeODRHash(FirstInitializer);
- unsigned SecondInitHash = ComputeODRHash(SecondInitializer);
- if (FirstInitHash != SecondInitHash) {
- ODRDiagError(FirstField->getLocation(),
- FirstField->getSourceRange(),
- FieldDifferentInitializers)
- << FirstII << FirstInitializer->getSourceRange();
- ODRDiagNote(SecondField->getLocation(),
- SecondField->getSourceRange(),
- FieldDifferentInitializers)
- << SecondII << SecondInitializer->getSourceRange();
- Diagnosed = true;
- break;
- }
- }
-
+ Diagnosed = ODRDiagField(FirstRecord, FirstModule, SecondModule,
+ cast<FieldDecl>(FirstDecl),
+ cast<FieldDecl>(SecondDecl));
break;
}
case CXXMethod: {
@@ -10177,11 +10468,11 @@ void ASTReader::diagnoseOdrViolations() {
auto FirstName = FirstMethod->getDeclName();
auto SecondName = SecondMethod->getDeclName();
if (FirstMethodType != SecondMethodType || FirstName != SecondName) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodName)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodName)
<< FirstMethodType << FirstName;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodName)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodName)
<< SecondMethodType << SecondName;
Diagnosed = true;
@@ -10191,12 +10482,12 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstDeleted = FirstMethod->isDeletedAsWritten();
const bool SecondDeleted = SecondMethod->isDeletedAsWritten();
if (FirstDeleted != SecondDeleted) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDeleted)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDeleted)
<< FirstMethodType << FirstName << FirstDeleted;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDeleted)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodDeleted)
<< SecondMethodType << SecondName << SecondDeleted;
Diagnosed = true;
break;
@@ -10205,12 +10496,12 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstDefaulted = FirstMethod->isExplicitlyDefaulted();
const bool SecondDefaulted = SecondMethod->isExplicitlyDefaulted();
if (FirstDefaulted != SecondDefaulted) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDefaulted)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDefaulted)
<< FirstMethodType << FirstName << FirstDefaulted;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDefaulted)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodDefaulted)
<< SecondMethodType << SecondName << SecondDefaulted;
Diagnosed = true;
break;
@@ -10222,11 +10513,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool SecondPure = SecondMethod->isPure();
if ((FirstVirtual || SecondVirtual) &&
(FirstVirtual != SecondVirtual || FirstPure != SecondPure)) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodVirtual)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodVirtual)
<< FirstMethodType << FirstName << FirstPure << FirstVirtual;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodVirtual)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodVirtual)
<< SecondMethodType << SecondName << SecondPure << SecondVirtual;
Diagnosed = true;
break;
@@ -10240,11 +10531,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstStatic = FirstStorage == SC_Static;
const bool SecondStatic = SecondStorage == SC_Static;
if (FirstStatic != SecondStatic) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodStatic)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodStatic)
<< FirstMethodType << FirstName << FirstStatic;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodStatic)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodStatic)
<< SecondMethodType << SecondName << SecondStatic;
Diagnosed = true;
break;
@@ -10253,11 +10544,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstVolatile = FirstMethod->isVolatile();
const bool SecondVolatile = SecondMethod->isVolatile();
if (FirstVolatile != SecondVolatile) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodVolatile)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodVolatile)
<< FirstMethodType << FirstName << FirstVolatile;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodVolatile)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodVolatile)
<< SecondMethodType << SecondName << SecondVolatile;
Diagnosed = true;
break;
@@ -10266,11 +10557,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstConst = FirstMethod->isConst();
const bool SecondConst = SecondMethod->isConst();
if (FirstConst != SecondConst) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodConst)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodConst)
<< FirstMethodType << FirstName << FirstConst;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodConst)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodConst)
<< SecondMethodType << SecondName << SecondConst;
Diagnosed = true;
break;
@@ -10279,11 +10570,11 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstInline = FirstMethod->isInlineSpecified();
const bool SecondInline = SecondMethod->isInlineSpecified();
if (FirstInline != SecondInline) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodInline)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodInline)
<< FirstMethodType << FirstName << FirstInline;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodInline)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodInline)
<< SecondMethodType << SecondName << SecondInline;
Diagnosed = true;
break;
@@ -10292,11 +10583,13 @@ void ASTReader::diagnoseOdrViolations() {
const unsigned FirstNumParameters = FirstMethod->param_size();
const unsigned SecondNumParameters = SecondMethod->param_size();
if (FirstNumParameters != SecondNumParameters) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodNumberParameters)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodNumberParameters)
<< FirstMethodType << FirstName << FirstNumParameters;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodNumberParameters)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodNumberParameters)
<< SecondMethodType << SecondName << SecondNumParameters;
Diagnosed = true;
break;
@@ -10315,27 +10608,31 @@ void ASTReader::diagnoseOdrViolations() {
ComputeQualTypeODRHash(SecondParamType)) {
if (const DecayedType *ParamDecayedType =
FirstParamType->getAs<DecayedType>()) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterType)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodParameterType)
<< FirstMethodType << FirstName << (I + 1) << FirstParamType
<< true << ParamDecayedType->getOriginalType();
} else {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterType)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodParameterType)
<< FirstMethodType << FirstName << (I + 1) << FirstParamType
<< false;
}
if (const DecayedType *ParamDecayedType =
SecondParamType->getAs<DecayedType>()) {
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodParameterType)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodParameterType)
<< SecondMethodType << SecondName << (I + 1)
<< SecondParamType << true
<< ParamDecayedType->getOriginalType();
} else {
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodParameterType)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodParameterType)
<< SecondMethodType << SecondName << (I + 1)
<< SecondParamType << false;
}
@@ -10346,11 +10643,12 @@ void ASTReader::diagnoseOdrViolations() {
DeclarationName FirstParamName = FirstParam->getDeclName();
DeclarationName SecondParamName = SecondParam->getDeclName();
if (FirstParamName != SecondParamName) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterName)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodParameterName)
<< FirstMethodType << FirstName << (I + 1) << FirstParamName;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodParameterName)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodParameterName)
<< SecondMethodType << SecondName << (I + 1) << SecondParamName;
ParameterMismatch = true;
break;
@@ -10359,15 +10657,16 @@ void ASTReader::diagnoseOdrViolations() {
const Expr *FirstInit = FirstParam->getInit();
const Expr *SecondInit = SecondParam->getInit();
if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodParameterSingleDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodParameterSingleDefaultArgument)
<< FirstMethodType << FirstName << (I + 1)
<< (FirstInit == nullptr)
<< (FirstInit ? FirstInit->getSourceRange() : SourceRange());
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterSingleDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodParameterSingleDefaultArgument)
<< SecondMethodType << SecondName << (I + 1)
<< (SecondInit == nullptr)
<< (SecondInit ? SecondInit->getSourceRange() : SourceRange());
@@ -10377,14 +10676,15 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstInit && SecondInit &&
ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodParameterDifferentDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodParameterDifferentDefaultArgument)
<< FirstMethodType << FirstName << (I + 1)
<< FirstInit->getSourceRange();
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterDifferentDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodParameterDifferentDefaultArgument)
<< SecondMethodType << SecondName << (I + 1)
<< SecondInit->getSourceRange();
ParameterMismatch = true;
@@ -10405,11 +10705,13 @@ void ASTReader::diagnoseOdrViolations() {
if ((FirstTemplateArgs && !SecondTemplateArgs) ||
(!FirstTemplateArgs && SecondTemplateArgs)) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodNoTemplateArguments)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodNoTemplateArguments)
<< FirstMethodType << FirstName << (FirstTemplateArgs != nullptr);
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodNoTemplateArguments)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodNoTemplateArguments)
<< SecondMethodType << SecondName
<< (SecondTemplateArgs != nullptr);
@@ -10439,14 +10741,15 @@ void ASTReader::diagnoseOdrViolations() {
ExpandTemplateArgumentList(SecondTemplateArgs);
if (FirstExpandedList.size() != SecondExpandedList.size()) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodDifferentNumberTemplateArguments)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodDifferentNumberTemplateArguments)
<< FirstMethodType << FirstName
<< (unsigned)FirstExpandedList.size();
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodDifferentNumberTemplateArguments)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodDifferentNumberTemplateArguments)
<< SecondMethodType << SecondName
<< (unsigned)SecondExpandedList.size();
@@ -10463,13 +10766,13 @@ void ASTReader::diagnoseOdrViolations() {
continue;
}
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodDifferentTemplateArgument)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDifferentTemplateArgument)
<< FirstMethodType << FirstName << FirstTA << i + 1;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodDifferentTemplateArgument)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodDifferentTemplateArgument)
<< SecondMethodType << SecondName << SecondTA << i + 1;
TemplateArgumentMismatch = true;
@@ -10498,22 +10801,22 @@ void ASTReader::diagnoseOdrViolations() {
ComputeCXXMethodODRHash(SecondMethod) != SecondMethod->getODRHash();
if (HasFirstBody != HasSecondBody) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodSingleBody)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodSingleBody)
<< FirstMethodType << FirstName << HasFirstBody;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodSingleBody)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodSingleBody)
<< SecondMethodType << SecondName << HasSecondBody;
Diagnosed = true;
break;
}
if (HasFirstBody && HasSecondBody) {
- ODRDiagError(FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDifferentBody)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDifferentBody)
<< FirstMethodType << FirstName;
- ODRDiagNote(SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDifferentBody)
+ ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodDifferentBody)
<< SecondMethodType << SecondName;
Diagnosed = true;
break;
@@ -10523,105 +10826,16 @@ void ASTReader::diagnoseOdrViolations() {
}
case TypeAlias:
case TypeDef: {
- TypedefNameDecl *FirstTD = cast<TypedefNameDecl>(FirstDecl);
- TypedefNameDecl *SecondTD = cast<TypedefNameDecl>(SecondDecl);
- auto FirstName = FirstTD->getDeclName();
- auto SecondName = SecondTD->getDeclName();
- if (FirstName != SecondName) {
- ODRDiagError(FirstTD->getLocation(), FirstTD->getSourceRange(),
- TypedefName)
- << (FirstDiffType == TypeAlias) << FirstName;
- ODRDiagNote(SecondTD->getLocation(), SecondTD->getSourceRange(),
- TypedefName)
- << (FirstDiffType == TypeAlias) << SecondName;
- Diagnosed = true;
- break;
- }
-
- QualType FirstType = FirstTD->getUnderlyingType();
- QualType SecondType = SecondTD->getUnderlyingType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstTD->getLocation(), FirstTD->getSourceRange(),
- TypedefType)
- << (FirstDiffType == TypeAlias) << FirstName << FirstType;
- ODRDiagNote(SecondTD->getLocation(), SecondTD->getSourceRange(),
- TypedefType)
- << (FirstDiffType == TypeAlias) << SecondName << SecondType;
- Diagnosed = true;
- break;
- }
+ Diagnosed = ODRDiagTypeDefOrAlias(
+ FirstRecord, FirstModule, SecondModule,
+ cast<TypedefNameDecl>(FirstDecl), cast<TypedefNameDecl>(SecondDecl),
+ FirstDiffType == TypeAlias);
break;
}
case Var: {
- VarDecl *FirstVD = cast<VarDecl>(FirstDecl);
- VarDecl *SecondVD = cast<VarDecl>(SecondDecl);
- auto FirstName = FirstVD->getDeclName();
- auto SecondName = SecondVD->getDeclName();
- if (FirstName != SecondName) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarName)
- << FirstName;
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarName)
- << SecondName;
- Diagnosed = true;
- break;
- }
-
- QualType FirstType = FirstVD->getType();
- QualType SecondType = SecondVD->getType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarType)
- << FirstName << FirstType;
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarType)
- << SecondName << SecondType;
- Diagnosed = true;
- break;
- }
-
- const Expr *FirstInit = FirstVD->getInit();
- const Expr *SecondInit = SecondVD->getInit();
- if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarSingleInitializer)
- << FirstName << (FirstInit == nullptr)
- << (FirstInit ? FirstInit->getSourceRange(): SourceRange());
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarSingleInitializer)
- << SecondName << (SecondInit == nullptr)
- << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
- Diagnosed = true;
- break;
- }
-
- if (FirstInit && SecondInit &&
- ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarDifferentInitializer)
- << FirstName << FirstInit->getSourceRange();
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarDifferentInitializer)
- << SecondName << SecondInit->getSourceRange();
- Diagnosed = true;
- break;
- }
-
- const bool FirstIsConstexpr = FirstVD->isConstexpr();
- const bool SecondIsConstexpr = SecondVD->isConstexpr();
- if (FirstIsConstexpr != SecondIsConstexpr) {
- ODRDiagError(FirstVD->getLocation(), FirstVD->getSourceRange(),
- VarConstexpr)
- << FirstName << FirstIsConstexpr;
- ODRDiagNote(SecondVD->getLocation(), SecondVD->getSourceRange(),
- VarConstexpr)
- << SecondName << SecondIsConstexpr;
- Diagnosed = true;
- break;
- }
+ Diagnosed =
+ ODRDiagVar(FirstRecord, FirstModule, SecondModule,
+ cast<VarDecl>(FirstDecl), cast<VarDecl>(SecondDecl));
break;
}
case Friend: {
@@ -10635,11 +10849,12 @@ void ASTReader::diagnoseOdrViolations() {
TypeSourceInfo *SecondTSI = SecondFriend->getFriendType();
if (FirstND && SecondND) {
- ODRDiagError(FirstFriend->getFriendLoc(),
- FirstFriend->getSourceRange(), FriendFunction)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstFriend->getFriendLoc(),
+ FirstFriend->getSourceRange(), FriendFunction)
<< FirstND;
- ODRDiagNote(SecondFriend->getFriendLoc(),
- SecondFriend->getSourceRange(), FriendFunction)
+ ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
+ SecondFriend->getSourceRange(), FriendFunction)
<< SecondND;
Diagnosed = true;
@@ -10651,21 +10866,22 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondFriendType = SecondTSI->getType();
assert(ComputeQualTypeODRHash(FirstFriendType) !=
ComputeQualTypeODRHash(SecondFriendType));
- ODRDiagError(FirstFriend->getFriendLoc(),
- FirstFriend->getSourceRange(), FriendType)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstFriend->getFriendLoc(),
+ FirstFriend->getSourceRange(), FriendType)
<< FirstFriendType;
- ODRDiagNote(SecondFriend->getFriendLoc(),
- SecondFriend->getSourceRange(), FriendType)
+ ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
+ SecondFriend->getSourceRange(), FriendType)
<< SecondFriendType;
Diagnosed = true;
break;
}
- ODRDiagError(FirstFriend->getFriendLoc(), FirstFriend->getSourceRange(),
- FriendTypeFunction)
+ ODRDiagDeclError(FirstRecord, FirstModule, FirstFriend->getFriendLoc(),
+ FirstFriend->getSourceRange(), FriendTypeFunction)
<< (FirstTSI == nullptr);
- ODRDiagNote(SecondFriend->getFriendLoc(),
- SecondFriend->getSourceRange(), FriendTypeFunction)
+ ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
+ SecondFriend->getSourceRange(), FriendTypeFunction)
<< (SecondTSI == nullptr);
Diagnosed = true;
@@ -10683,14 +10899,15 @@ void ASTReader::diagnoseOdrViolations() {
SecondTemplate->getTemplateParameters();
if (FirstTPL->size() != SecondTPL->size()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateDifferentNumberParameters)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateDifferentNumberParameters)
<< FirstTemplate << FirstTPL->size();
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateDifferentNumberParameters)
- << SecondTemplate << SecondTPL->size();
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateDifferentNumberParameters)
+ << SecondTemplate << SecondTPL->size();
Diagnosed = true;
break;
@@ -10720,13 +10937,14 @@ void ASTReader::diagnoseOdrViolations() {
}
};
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentKind)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentKind)
<< FirstTemplate << (i + 1) << GetParamType(FirstParam);
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentKind)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentKind)
<< SecondTemplate << (i + 1) << GetParamType(SecondParam);
ParameterMismatch = true;
@@ -10734,14 +10952,14 @@ void ASTReader::diagnoseOdrViolations() {
}
if (FirstParam->getName() != SecondParam->getName()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterName)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(), FunctionTemplateParameterName)
<< FirstTemplate << (i + 1) << (bool)FirstParam->getIdentifier()
<< FirstParam;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterName)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterName)
<< SecondTemplate << (i + 1)
<< (bool)SecondParam->getIdentifier() << SecondParam;
ParameterMismatch = true;
@@ -10761,13 +10979,14 @@ void ASTReader::diagnoseOdrViolations() {
SecondTTPD->hasDefaultArgument() &&
!SecondTTPD->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< SecondTemplate << (i + 1) << HasSecondDefaultArgument;
ParameterMismatch = true;
break;
@@ -10778,13 +10997,15 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondType = SecondTTPD->getDefaultArgument();
if (ComputeQualTypeODRHash(FirstType) !=
ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< FirstTemplate << (i + 1) << FirstType;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclNote(
+ SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< SecondTemplate << (i + 1) << SecondType;
ParameterMismatch = true;
break;
@@ -10793,13 +11014,14 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstTTPD->isParameterPack() !=
SecondTTPD->isParameterPack()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
ParameterMismatch = true;
break;
@@ -10820,13 +11042,14 @@ void ASTReader::diagnoseOdrViolations() {
if (ComputeTemplateParameterListODRHash(FirstTPL) !=
ComputeTemplateParameterListODRHash(SecondTPL)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
<< FirstTemplate << (i + 1);
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
<< SecondTemplate << (i + 1);
ParameterMismatch = true;
break;
@@ -10839,13 +11062,14 @@ void ASTReader::diagnoseOdrViolations() {
SecondTTPD->hasDefaultArgument() &&
!SecondTTPD->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< SecondTemplate << (i + 1) << HasSecondDefaultArgument;
ParameterMismatch = true;
break;
@@ -10858,13 +11082,15 @@ void ASTReader::diagnoseOdrViolations() {
SecondTTPD->getDefaultArgument().getArgument();
if (ComputeTemplateArgumentODRHash(FirstTA) !=
ComputeTemplateArgumentODRHash(SecondTA)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< FirstTemplate << (i + 1) << FirstTA;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclNote(
+ SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< SecondTemplate << (i + 1) << SecondTA;
ParameterMismatch = true;
break;
@@ -10873,13 +11099,14 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstTTPD->isParameterPack() !=
SecondTTPD->isParameterPack()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
ParameterMismatch = true;
break;
@@ -10897,13 +11124,14 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondType = SecondNTTPD->getType();
if (ComputeQualTypeODRHash(FirstType) !=
ComputeQualTypeODRHash(SecondType)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
<< FirstTemplate << (i + 1);
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
<< SecondTemplate << (i + 1);
ParameterMismatch = true;
break;
@@ -10916,13 +11144,14 @@ void ASTReader::diagnoseOdrViolations() {
SecondNTTPD->hasDefaultArgument() &&
!SecondNTTPD->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
<< SecondTemplate << (i + 1) << HasSecondDefaultArgument;
ParameterMismatch = true;
break;
@@ -10933,13 +11162,15 @@ void ASTReader::diagnoseOdrViolations() {
Expr *SecondDefaultArgument = SecondNTTPD->getDefaultArgument();
if (ComputeODRHash(FirstDefaultArgument) !=
ComputeODRHash(SecondDefaultArgument)) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclError(
+ FirstRecord, FirstModule, FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< FirstTemplate << (i + 1) << FirstDefaultArgument;
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
+ ODRDiagDeclNote(
+ SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
<< SecondTemplate << (i + 1) << SecondDefaultArgument;
ParameterMismatch = true;
break;
@@ -10948,13 +11179,14 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstNTTPD->isParameterPack() !=
SecondNTTPD->isParameterPack()) {
- ODRDiagError(FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclError(FirstRecord, FirstModule,
+ FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< FirstTemplate << (i + 1) << FirstNTTPD->isParameterPack();
- ODRDiagNote(SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
+ ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
<< SecondTemplate << (i + 1)
<< SecondNTTPD->isParameterPack();
ParameterMismatch = true;
@@ -11181,7 +11413,7 @@ void ASTReader::diagnoseOdrViolations() {
for (auto *D : Enum->decls()) {
// Due to decl merging, the first EnumDecl is the parent of
// Decls in both records.
- if (!ODRHash::isWhitelistedDecl(D, FirstEnum))
+ if (!ODRHash::isDeclToBeProcessed(D, FirstEnum))
continue;
assert(isa<EnumConstantDecl>(D) && "Unexpected Decl kind");
Hashes.emplace_back(cast<EnumConstantDecl>(D),
@@ -11494,8 +11726,8 @@ public:
OMPClauseReader(ASTRecordReader &Record)
: Record(Record), Context(Record.getContext()) {}
-#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *C);
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *C);
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
OMPClause *readClause();
void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
@@ -11509,134 +11741,152 @@ OMPClause *ASTRecordReader::readOMPClause() {
OMPClause *OMPClauseReader::readClause() {
OMPClause *C = nullptr;
- switch (Record.readInt()) {
- case OMPC_if:
+ switch (llvm::omp::Clause(Record.readInt())) {
+ case llvm::omp::OMPC_if:
C = new (Context) OMPIfClause();
break;
- case OMPC_final:
+ case llvm::omp::OMPC_final:
C = new (Context) OMPFinalClause();
break;
- case OMPC_num_threads:
+ case llvm::omp::OMPC_num_threads:
C = new (Context) OMPNumThreadsClause();
break;
- case OMPC_safelen:
+ case llvm::omp::OMPC_safelen:
C = new (Context) OMPSafelenClause();
break;
- case OMPC_simdlen:
+ case llvm::omp::OMPC_simdlen:
C = new (Context) OMPSimdlenClause();
break;
- case OMPC_allocator:
+ case llvm::omp::OMPC_allocator:
C = new (Context) OMPAllocatorClause();
break;
- case OMPC_collapse:
+ case llvm::omp::OMPC_collapse:
C = new (Context) OMPCollapseClause();
break;
- case OMPC_default:
+ case llvm::omp::OMPC_default:
C = new (Context) OMPDefaultClause();
break;
- case OMPC_proc_bind:
+ case llvm::omp::OMPC_proc_bind:
C = new (Context) OMPProcBindClause();
break;
- case OMPC_schedule:
+ case llvm::omp::OMPC_schedule:
C = new (Context) OMPScheduleClause();
break;
- case OMPC_ordered:
+ case llvm::omp::OMPC_ordered:
C = OMPOrderedClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_nowait:
+ case llvm::omp::OMPC_nowait:
C = new (Context) OMPNowaitClause();
break;
- case OMPC_untied:
+ case llvm::omp::OMPC_untied:
C = new (Context) OMPUntiedClause();
break;
- case OMPC_mergeable:
+ case llvm::omp::OMPC_mergeable:
C = new (Context) OMPMergeableClause();
break;
- case OMPC_read:
+ case llvm::omp::OMPC_read:
C = new (Context) OMPReadClause();
break;
- case OMPC_write:
+ case llvm::omp::OMPC_write:
C = new (Context) OMPWriteClause();
break;
- case OMPC_update:
- C = new (Context) OMPUpdateClause();
+ case llvm::omp::OMPC_update:
+ C = OMPUpdateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_capture:
+ case llvm::omp::OMPC_capture:
C = new (Context) OMPCaptureClause();
break;
- case OMPC_seq_cst:
+ case llvm::omp::OMPC_seq_cst:
C = new (Context) OMPSeqCstClause();
break;
- case OMPC_threads:
+ case llvm::omp::OMPC_acq_rel:
+ C = new (Context) OMPAcqRelClause();
+ break;
+ case llvm::omp::OMPC_acquire:
+ C = new (Context) OMPAcquireClause();
+ break;
+ case llvm::omp::OMPC_release:
+ C = new (Context) OMPReleaseClause();
+ break;
+ case llvm::omp::OMPC_relaxed:
+ C = new (Context) OMPRelaxedClause();
+ break;
+ case llvm::omp::OMPC_threads:
C = new (Context) OMPThreadsClause();
break;
- case OMPC_simd:
+ case llvm::omp::OMPC_simd:
C = new (Context) OMPSIMDClause();
break;
- case OMPC_nogroup:
+ case llvm::omp::OMPC_nogroup:
C = new (Context) OMPNogroupClause();
break;
- case OMPC_unified_address:
+ case llvm::omp::OMPC_unified_address:
C = new (Context) OMPUnifiedAddressClause();
break;
- case OMPC_unified_shared_memory:
+ case llvm::omp::OMPC_unified_shared_memory:
C = new (Context) OMPUnifiedSharedMemoryClause();
break;
- case OMPC_reverse_offload:
+ case llvm::omp::OMPC_reverse_offload:
C = new (Context) OMPReverseOffloadClause();
break;
- case OMPC_dynamic_allocators:
+ case llvm::omp::OMPC_dynamic_allocators:
C = new (Context) OMPDynamicAllocatorsClause();
break;
- case OMPC_atomic_default_mem_order:
+ case llvm::omp::OMPC_atomic_default_mem_order:
C = new (Context) OMPAtomicDefaultMemOrderClause();
break;
- case OMPC_private:
+ case llvm::omp::OMPC_private:
C = OMPPrivateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_firstprivate:
+ case llvm::omp::OMPC_firstprivate:
C = OMPFirstprivateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_lastprivate:
+ case llvm::omp::OMPC_lastprivate:
C = OMPLastprivateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_shared:
+ case llvm::omp::OMPC_shared:
C = OMPSharedClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_reduction:
- C = OMPReductionClause::CreateEmpty(Context, Record.readInt());
+ case llvm::omp::OMPC_reduction: {
+ unsigned N = Record.readInt();
+ auto Modifier = Record.readEnum<OpenMPReductionClauseModifier>();
+ C = OMPReductionClause::CreateEmpty(Context, N, Modifier);
break;
- case OMPC_task_reduction:
+ }
+ case llvm::omp::OMPC_task_reduction:
C = OMPTaskReductionClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_in_reduction:
+ case llvm::omp::OMPC_in_reduction:
C = OMPInReductionClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_linear:
+ case llvm::omp::OMPC_linear:
C = OMPLinearClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_aligned:
+ case llvm::omp::OMPC_aligned:
C = OMPAlignedClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_copyin:
+ case llvm::omp::OMPC_copyin:
C = OMPCopyinClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_copyprivate:
+ case llvm::omp::OMPC_copyprivate:
C = OMPCopyprivateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_flush:
+ case llvm::omp::OMPC_flush:
C = OMPFlushClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_depend: {
+ case llvm::omp::OMPC_depobj:
+ C = OMPDepobjClause::CreateEmpty(Context);
+ break;
+ case llvm::omp::OMPC_depend: {
unsigned NumVars = Record.readInt();
unsigned NumLoops = Record.readInt();
C = OMPDependClause::CreateEmpty(Context, NumVars, NumLoops);
break;
}
- case OMPC_device:
+ case llvm::omp::OMPC_device:
C = new (Context) OMPDeviceClause();
break;
- case OMPC_map: {
+ case llvm::omp::OMPC_map: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11645,31 +11895,31 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPMapClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_num_teams:
+ case llvm::omp::OMPC_num_teams:
C = new (Context) OMPNumTeamsClause();
break;
- case OMPC_thread_limit:
+ case llvm::omp::OMPC_thread_limit:
C = new (Context) OMPThreadLimitClause();
break;
- case OMPC_priority:
+ case llvm::omp::OMPC_priority:
C = new (Context) OMPPriorityClause();
break;
- case OMPC_grainsize:
+ case llvm::omp::OMPC_grainsize:
C = new (Context) OMPGrainsizeClause();
break;
- case OMPC_num_tasks:
+ case llvm::omp::OMPC_num_tasks:
C = new (Context) OMPNumTasksClause();
break;
- case OMPC_hint:
+ case llvm::omp::OMPC_hint:
C = new (Context) OMPHintClause();
break;
- case OMPC_dist_schedule:
+ case llvm::omp::OMPC_dist_schedule:
C = new (Context) OMPDistScheduleClause();
break;
- case OMPC_defaultmap:
+ case llvm::omp::OMPC_defaultmap:
C = new (Context) OMPDefaultmapClause();
break;
- case OMPC_to: {
+ case llvm::omp::OMPC_to: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11678,7 +11928,7 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPToClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_from: {
+ case llvm::omp::OMPC_from: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11687,7 +11937,7 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPFromClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_use_device_ptr: {
+ case llvm::omp::OMPC_use_device_ptr: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11696,7 +11946,16 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPUseDevicePtrClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_is_device_ptr: {
+ case llvm::omp::OMPC_use_device_addr: {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Record.readInt();
+ Sizes.NumUniqueDeclarations = Record.readInt();
+ Sizes.NumComponentLists = Record.readInt();
+ Sizes.NumComponents = Record.readInt();
+ C = OMPUseDeviceAddrClause::CreateEmpty(Context, Sizes);
+ break;
+ }
+ case llvm::omp::OMPC_is_device_ptr: {
OMPMappableExprListSizeTy Sizes;
Sizes.NumVars = Record.readInt();
Sizes.NumUniqueDeclarations = Record.readInt();
@@ -11705,12 +11964,39 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPIsDevicePtrClause::CreateEmpty(Context, Sizes);
break;
}
- case OMPC_allocate:
+ case llvm::omp::OMPC_allocate:
C = OMPAllocateClause::CreateEmpty(Context, Record.readInt());
break;
- case OMPC_nontemporal:
+ case llvm::omp::OMPC_nontemporal:
C = OMPNontemporalClause::CreateEmpty(Context, Record.readInt());
break;
+ case llvm::omp::OMPC_inclusive:
+ C = OMPInclusiveClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case llvm::omp::OMPC_exclusive:
+ C = OMPExclusiveClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case llvm::omp::OMPC_order:
+ C = new (Context) OMPOrderClause();
+ break;
+ case llvm::omp::OMPC_destroy:
+ C = new (Context) OMPDestroyClause();
+ break;
+ case llvm::omp::OMPC_detach:
+ C = new (Context) OMPDetachClause();
+ break;
+ case llvm::omp::OMPC_uses_allocators:
+ C = OMPUsesAllocatorsClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case llvm::omp::OMPC_affinity:
+ C = OMPAffinityClause::CreateEmpty(Context, Record.readInt());
+ break;
+#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
+ case llvm::omp::Enum: \
+ break;
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
+ default:
+ break;
}
assert(C && "Unknown OMPClause type");
@@ -11773,8 +12059,7 @@ void OMPClauseReader::VisitOMPCollapseClause(OMPCollapseClause *C) {
}
void OMPClauseReader::VisitOMPDefaultClause(OMPDefaultClause *C) {
- C->setDefaultKind(
- static_cast<OpenMPDefaultClauseKind>(Record.readInt()));
+ C->setDefaultKind(static_cast<llvm::omp::DefaultKind>(Record.readInt()));
C->setLParenLoc(Record.readSourceLocation());
C->setDefaultKindKwLoc(Record.readSourceLocation());
}
@@ -11810,6 +12095,11 @@ void OMPClauseReader::VisitOMPOrderedClause(OMPOrderedClause *C) {
C->setLParenLoc(Record.readSourceLocation());
}
+void OMPClauseReader::VisitOMPDetachClause(OMPDetachClause *C) {
+ C->setEventHandler(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
void OMPClauseReader::VisitOMPNowaitClause(OMPNowaitClause *) {}
void OMPClauseReader::VisitOMPUntiedClause(OMPUntiedClause *) {}
@@ -11820,18 +12110,34 @@ void OMPClauseReader::VisitOMPReadClause(OMPReadClause *) {}
void OMPClauseReader::VisitOMPWriteClause(OMPWriteClause *) {}
-void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *) {}
+void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *C) {
+ if (C->isExtended()) {
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setArgumentLoc(Record.readSourceLocation());
+ C->setDependencyKind(Record.readEnum<OpenMPDependClauseKind>());
+ }
+}
void OMPClauseReader::VisitOMPCaptureClause(OMPCaptureClause *) {}
void OMPClauseReader::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+void OMPClauseReader::VisitOMPAcqRelClause(OMPAcqRelClause *) {}
+
+void OMPClauseReader::VisitOMPAcquireClause(OMPAcquireClause *) {}
+
+void OMPClauseReader::VisitOMPReleaseClause(OMPReleaseClause *) {}
+
+void OMPClauseReader::VisitOMPRelaxedClause(OMPRelaxedClause *) {}
+
void OMPClauseReader::VisitOMPThreadsClause(OMPThreadsClause *) {}
void OMPClauseReader::VisitOMPSIMDClause(OMPSIMDClause *) {}
void OMPClauseReader::VisitOMPNogroupClause(OMPNogroupClause *) {}
+void OMPClauseReader::VisitOMPDestroyClause(OMPDestroyClause *) {}
+
void OMPClauseReader::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {}
void OMPClauseReader::VisitOMPUnifiedSharedMemoryClause(
@@ -11927,6 +12233,7 @@ void OMPClauseReader::VisitOMPSharedClause(OMPSharedClause *C) {
void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
VisitOMPClauseWithPostUpdate(C);
C->setLParenLoc(Record.readSourceLocation());
+ C->setModifierLoc(Record.readSourceLocation());
C->setColonLoc(Record.readSourceLocation());
NestedNameSpecifierLoc NNSL = Record.readNestedNameSpecifierLoc();
DeclarationNameInfo DNI = Record.readDeclarationNameInfo();
@@ -11955,6 +12262,20 @@ void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
for (unsigned i = 0; i != NumVars; ++i)
Vars.push_back(Record.readSubExpr());
C->setReductionOps(Vars);
+ if (C->getModifier() == OMPC_REDUCTION_inscan) {
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setInscanCopyOps(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setInscanCopyArrayTemps(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setInscanCopyArrayElems(Vars);
+ }
}
void OMPClauseReader::VisitOMPTaskReductionClause(OMPTaskReductionClause *C) {
@@ -12129,8 +12450,14 @@ void OMPClauseReader::VisitOMPFlushClause(OMPFlushClause *C) {
C->setVarRefs(Vars);
}
+void OMPClauseReader::VisitOMPDepobjClause(OMPDepobjClause *C) {
+ C->setDepobj(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
C->setLParenLoc(Record.readSourceLocation());
+ C->setModifier(Record.readSubExpr());
C->setDependencyKind(
static_cast<OpenMPDependClauseKind>(Record.readInt()));
C->setDependencyLoc(Record.readSourceLocation());
@@ -12147,13 +12474,15 @@ void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
void OMPClauseReader::VisitOMPDeviceClause(OMPDeviceClause *C) {
VisitOMPClauseWithPreInit(C);
+ C->setModifier(Record.readEnum<OpenMPDeviceClauseModifier>());
C->setDevice(Record.readSubExpr());
+ C->setModifierLoc(Record.readSourceLocation());
C->setLParenLoc(Record.readSourceLocation());
}
void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
C->setLParenLoc(Record.readSourceLocation());
- for (unsigned I = 0; I < OMPMapClause::NumberOfModifiers; ++I) {
+ for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) {
C->setMapTypeModifier(
I, static_cast<OpenMPMapModifierKind>(Record.readInt()));
C->setMapTypeModifierLoc(I, Record.readSourceLocation());
@@ -12427,6 +12756,48 @@ void OMPClauseReader::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
C->setComponents(Components, ListSizes);
}
+void OMPClauseReader::VisitOMPUseDeviceAddrClause(OMPUseDeviceAddrClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ Decls.push_back(Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ ListsPerDecl.push_back(Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned i = 0; i < TotalComponents; ++i) {
+ Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
+ Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
+ AssociatedExpr, AssociatedDecl));
+ }
+ C->setComponents(Components, ListSizes);
+}
+
void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
C->setLParenLoc(Record.readSourceLocation());
auto NumVars = C->varlist_size();
@@ -12484,3 +12855,75 @@ void OMPClauseReader::VisitOMPNontemporalClause(OMPNontemporalClause *C) {
Vars.push_back(Record.readSubExpr());
C->setPrivateRefs(Vars);
}
+
+void OMPClauseReader::VisitOMPInclusiveClause(OMPInclusiveClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPExclusiveClause(OMPExclusiveClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPUsesAllocatorsClause(OMPUsesAllocatorsClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumOfAllocators = C->getNumberOfAllocators();
+ SmallVector<OMPUsesAllocatorsClause::Data, 4> Data;
+ Data.reserve(NumOfAllocators);
+ for (unsigned I = 0; I != NumOfAllocators; ++I) {
+ OMPUsesAllocatorsClause::Data &D = Data.emplace_back();
+ D.Allocator = Record.readSubExpr();
+ D.AllocatorTraits = Record.readSubExpr();
+ D.LParenLoc = Record.readSourceLocation();
+ D.RParenLoc = Record.readSourceLocation();
+ }
+ C->setAllocatorsData(Data);
+}
+
+void OMPClauseReader::VisitOMPAffinityClause(OMPAffinityClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setModifier(Record.readSubExpr());
+ C->setColonLoc(Record.readSourceLocation());
+ unsigned NumOfLocators = C->varlist_size();
+ SmallVector<Expr *, 4> Locators;
+ Locators.reserve(NumOfLocators);
+ for (unsigned I = 0; I != NumOfLocators; ++I)
+ Locators.push_back(Record.readSubExpr());
+ C->setVarRefs(Locators);
+}
+
+void OMPClauseReader::VisitOMPOrderClause(OMPOrderClause *C) {
+ C->setKind(Record.readEnum<OpenMPOrderClauseKind>());
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setKindKwLoc(Record.readSourceLocation());
+}
+
+OMPTraitInfo *ASTRecordReader::readOMPTraitInfo() {
+ OMPTraitInfo &TI = getContext().getNewOMPTraitInfo();
+ TI.Sets.resize(readUInt32());
+ for (auto &Set : TI.Sets) {
+ Set.Kind = readEnum<llvm::omp::TraitSet>();
+ Set.Selectors.resize(readUInt32());
+ for (auto &Selector : Set.Selectors) {
+ Selector.Kind = readEnum<llvm::omp::TraitSelector>();
+ Selector.ScoreOrCondition = nullptr;
+ if (readBool())
+ Selector.ScoreOrCondition = readExprRef();
+ Selector.Properties.resize(readUInt32());
+ for (auto &Property : Selector.Properties)
+ Property.Kind = readEnum<llvm::omp::TraitProperty>();
+ }
+ }
+ return &TI;
+}
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index 96a7d5ae0a31..eef4ab16ec15 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -365,6 +365,7 @@ namespace clang {
void VisitCXXConversionDecl(CXXConversionDecl *D);
void VisitFieldDecl(FieldDecl *FD);
void VisitMSPropertyDecl(MSPropertyDecl *FD);
+ void VisitMSGuidDecl(MSGuidDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *FD);
RedeclarableResult VisitVarDeclImpl(VarDecl *D);
void VisitVarDecl(VarDecl *VD) { VisitVarDeclImpl(VD); }
@@ -375,6 +376,7 @@ namespace clang {
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
DeclID VisitTemplateDecl(TemplateDecl *D);
void VisitConceptDecl(ConceptDecl *D);
+ void VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D);
RedeclarableResult VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
void VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D);
@@ -554,7 +556,7 @@ void ASTDeclReader::Visit(Decl *D) {
void ASTDeclReader::VisitDecl(Decl *D) {
if (D->isTemplateParameter() || D->isTemplateParameterPack() ||
- isa<ParmVarDecl>(D)) {
+ isa<ParmVarDecl>(D) || isa<ObjCTypeParamDecl>(D)) {
// We don't want to deserialize the DeclContext of a template
// parameter or of a parameter of a function template immediately. These
// entities might be used in the formulation of its DeclContext (for
@@ -1282,10 +1284,9 @@ void ASTDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
QualType T = Record.readType();
TypeSourceInfo *TSI = readTypeSourceInfo();
D->setType(T, TSI);
- D->setPropertyAttributes(
- (ObjCPropertyDecl::PropertyAttributeKind)Record.readInt());
+ D->setPropertyAttributes((ObjCPropertyAttribute::Kind)Record.readInt());
D->setPropertyAttributesAsWritten(
- (ObjCPropertyDecl::PropertyAttributeKind)Record.readInt());
+ (ObjCPropertyAttribute::Kind)Record.readInt());
D->setPropertyImplementation(
(ObjCPropertyDecl::PropertyControl)Record.readInt());
DeclarationName GetterName = Record.readDeclarationName();
@@ -1361,6 +1362,19 @@ void ASTDeclReader::VisitMSPropertyDecl(MSPropertyDecl *PD) {
PD->SetterId = Record.readIdentifier();
}
+void ASTDeclReader::VisitMSGuidDecl(MSGuidDecl *D) {
+ VisitValueDecl(D);
+ D->PartVal.Part1 = Record.readInt();
+ D->PartVal.Part2 = Record.readInt();
+ D->PartVal.Part3 = Record.readInt();
+ for (auto &C : D->PartVal.Part4And5)
+ C = Record.readInt();
+
+ // Add this GUID to the AST context's lookup structure, and merge if needed.
+ if (MSGuidDecl *Existing = Reader.getContext().MSGuidDecls.GetOrInsertNode(D))
+ Reader.getContext().setPrimaryMergedDecl(D, Existing->getCanonicalDecl());
+}
+
void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) {
VisitValueDecl(FD);
@@ -1979,8 +1993,8 @@ void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) {
void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
VisitDecl(D);
- D->ImportedAndComplete.setPointer(readModule());
- D->ImportedAndComplete.setInt(Record.readInt());
+ D->ImportedModule = readModule();
+ D->setImportComplete(Record.readInt());
auto *StoredLocs = D->getTrailingObjects<SourceLocation>();
for (unsigned I = 0, N = Record.back(); I != N; ++I)
StoredLocs[I] = readSourceLocation();
@@ -2037,6 +2051,9 @@ void ASTDeclReader::VisitConceptDecl(ConceptDecl *D) {
mergeMergeable(D);
}
+void ASTDeclReader::VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D) {
+}
+
ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
@@ -2313,12 +2330,12 @@ void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
D->setDeclaredWithTypename(Record.readInt());
- if (Record.readInt()) {
+ if (Record.readBool()) {
NestedNameSpecifierLoc NNS = Record.readNestedNameSpecifierLoc();
DeclarationNameInfo DN = Record.readDeclarationNameInfo();
- ConceptDecl *NamedConcept = cast<ConceptDecl>(Record.readDecl());
+ ConceptDecl *NamedConcept = Record.readDeclAs<ConceptDecl>();
const ASTTemplateArgumentListInfo *ArgsAsWritten = nullptr;
- if (Record.readInt())
+ if (Record.readBool())
ArgsAsWritten = Record.readASTTemplateArgumentListInfo();
Expr *ImmediatelyDeclaredConstraint = Record.readExpr();
D->setTypeConstraint(NNS, DN, /*FoundDecl=*/nullptr, NamedConcept,
@@ -2336,6 +2353,8 @@ void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
// TemplateParmPosition.
D->setDepth(Record.readInt());
D->setPosition(Record.readInt());
+ if (D->hasPlaceholderTypeConstraint())
+ D->setPlaceholderTypeConstraint(Record.readExpr());
if (D->isExpandedParameterPack()) {
auto TypesAndInfos =
D->getTrailingObjects<std::pair<QualType, TypeSourceInfo *>>();
@@ -2750,6 +2769,8 @@ public:
return Reader.readVersionTuple();
}
+ OMPTraitInfo *readOMPTraitInfo() { return Reader.readOMPTraitInfo(); }
+
template <typename T> T *GetLocalDeclAs(uint32_t LocalID) {
return Reader.GetLocalDeclAs<T>(LocalID);
}
@@ -2834,7 +2855,8 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
isa<PragmaDetectMismatchDecl>(D))
return true;
if (isa<OMPThreadPrivateDecl>(D) || isa<OMPDeclareReductionDecl>(D) ||
- isa<OMPDeclareMapperDecl>(D) || isa<OMPAllocateDecl>(D))
+ isa<OMPDeclareMapperDecl>(D) || isa<OMPAllocateDecl>(D) ||
+ isa<OMPRequiresDecl>(D))
return !D->getDeclContext()->isFunctionOrMethod();
if (const auto *Var = dyn_cast<VarDecl>(D))
return Var->isFileVarDecl() &&
@@ -2859,7 +2881,7 @@ ASTReader::DeclCursorForID(DeclID ID, SourceLocation &Loc) {
const DeclOffset &DOffs =
M->DeclOffsets[ID - M->BaseDeclID - NUM_PREDEF_DECL_IDS];
Loc = TranslateSourceLocation(*M, DOffs.getLocation());
- return RecordLocation(M, DOffs.BitOffset);
+ return RecordLocation(M, DOffs.getBitOffset(M->DeclsBlockStartOffset));
}
ASTReader::RecordLocation ASTReader::getLocalBitOffset(uint64_t GlobalOffset) {
@@ -2869,11 +2891,12 @@ ASTReader::RecordLocation ASTReader::getLocalBitOffset(uint64_t GlobalOffset) {
return RecordLocation(I->second, GlobalOffset - I->second->GlobalBitOffset);
}
-uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset) {
+uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint64_t LocalOffset) {
return LocalOffset + M.GlobalBitOffset;
}
-static bool isSameTemplateParameterList(const TemplateParameterList *X,
+static bool isSameTemplateParameterList(const ASTContext &C,
+ const TemplateParameterList *X,
const TemplateParameterList *Y);
/// Determine whether two template parameters are similar enough
@@ -2885,7 +2908,32 @@ static bool isSameTemplateParameter(const NamedDecl *X,
if (const auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) {
const auto *TY = cast<TemplateTypeParmDecl>(Y);
- return TX->isParameterPack() == TY->isParameterPack();
+ if (TX->isParameterPack() != TY->isParameterPack())
+ return false;
+ if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
+ return false;
+ if (TX->hasTypeConstraint()) {
+ const TypeConstraint *TXTC = TX->getTypeConstraint();
+ const TypeConstraint *TYTC = TY->getTypeConstraint();
+ if (TXTC->getNamedConcept() != TYTC->getNamedConcept())
+ return false;
+ if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs())
+ return false;
+ if (TXTC->hasExplicitTemplateArgs()) {
+ const auto *TXTCArgs = TXTC->getTemplateArgsAsWritten();
+ const auto *TYTCArgs = TYTC->getTemplateArgsAsWritten();
+ if (TXTCArgs->NumTemplateArgs != TYTCArgs->NumTemplateArgs)
+ return false;
+ llvm::FoldingSetNodeID XID, YID;
+ for (const auto &ArgLoc : TXTCArgs->arguments())
+ ArgLoc.getArgument().Profile(XID, X->getASTContext());
+ for (const auto &ArgLoc : TYTCArgs->arguments())
+ ArgLoc.getArgument().Profile(YID, Y->getASTContext());
+ if (XID != YID)
+ return false;
+ }
+ }
+ return true;
}
if (const auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
@@ -2897,7 +2945,8 @@ static bool isSameTemplateParameter(const NamedDecl *X,
const auto *TX = cast<TemplateTemplateParmDecl>(X);
const auto *TY = cast<TemplateTemplateParmDecl>(Y);
return TX->isParameterPack() == TY->isParameterPack() &&
- isSameTemplateParameterList(TX->getTemplateParameters(),
+ isSameTemplateParameterList(TX->getASTContext(),
+ TX->getTemplateParameters(),
TY->getTemplateParameters());
}
@@ -2950,7 +2999,8 @@ static bool isSameQualifier(const NestedNameSpecifier *X,
/// Determine whether two template parameter lists are similar enough
/// that they may be used in declarations of the same template.
-static bool isSameTemplateParameterList(const TemplateParameterList *X,
+static bool isSameTemplateParameterList(const ASTContext &C,
+ const TemplateParameterList *X,
const TemplateParameterList *Y) {
if (X->size() != Y->size())
return false;
@@ -2959,6 +3009,18 @@ static bool isSameTemplateParameterList(const TemplateParameterList *X,
if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I)))
return false;
+ const Expr *XRC = X->getRequiresClause();
+ const Expr *YRC = Y->getRequiresClause();
+ if (!XRC != !YRC)
+ return false;
+ if (XRC) {
+ llvm::FoldingSetNodeID XRCID, YRCID;
+ XRC->Profile(XRCID, C, /*Canonical=*/true);
+ YRC->Profile(YRCID, C, /*Canonical=*/true);
+ if (XRCID != YRCID)
+ return false;
+ }
+
return true;
}
@@ -2995,7 +3057,7 @@ static bool hasSameOverloadableAttrs(const FunctionDecl *A,
return true;
}
-/// Determine whether the two declarations refer to the same entity.
+/// Determine whether the two declarations refer to the same entity.pr
static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!");
@@ -3070,6 +3132,19 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
}
ASTContext &C = FuncX->getASTContext();
+
+ const Expr *XRC = FuncX->getTrailingRequiresClause();
+ const Expr *YRC = FuncY->getTrailingRequiresClause();
+ if (!XRC != !YRC)
+ return false;
+ if (XRC) {
+ llvm::FoldingSetNodeID XRCID, YRCID;
+ XRC->Profile(XRCID, C, /*Canonical=*/true);
+ YRC->Profile(YRCID, C, /*Canonical=*/true);
+ if (XRCID != YRCID)
+ return false;
+ }
+
auto GetTypeAsWritten = [](const FunctionDecl *FD) {
// Map to the first declaration that we've already merged into this one.
// The TSI of redeclarations might not match (due to calling conventions
@@ -3093,6 +3168,7 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
return true;
return false;
}
+
return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
hasSameOverloadableAttrs(FuncX, FuncY);
}
@@ -3132,7 +3208,8 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
const auto *TemplateY = cast<TemplateDecl>(Y);
return isSameEntity(TemplateX->getTemplatedDecl(),
TemplateY->getTemplatedDecl()) &&
- isSameTemplateParameterList(TemplateX->getTemplateParameters(),
+ isSameTemplateParameterList(TemplateX->getASTContext(),
+ TemplateX->getTemplateParameters(),
TemplateY->getTemplateParameters());
}
@@ -3819,13 +3896,19 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
HasTypeConstraint);
break;
}
- case DECL_NON_TYPE_TEMPLATE_PARM:
- D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID);
+ case DECL_NON_TYPE_TEMPLATE_PARM: {
+ bool HasTypeConstraint = Record.readInt();
+ D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID,
+ HasTypeConstraint);
break;
- case DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK:
+ }
+ case DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK: {
+ bool HasTypeConstraint = Record.readInt();
D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID,
- Record.readInt());
+ Record.readInt(),
+ HasTypeConstraint);
break;
+ }
case DECL_TEMPLATE_TEMPLATE_PARM:
D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID);
break;
@@ -3839,6 +3922,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_CONCEPT:
D = ConceptDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_REQUIRES_EXPR_BODY:
+ D = RequiresExprBodyDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_STATIC_ASSERT:
D = StaticAssertDecl::CreateDeserialized(Context, ID);
break;
@@ -3905,6 +3991,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_MS_PROPERTY:
D = MSPropertyDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_MS_GUID:
+ D = MSGuidDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_CAPTURED:
D = CapturedDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index f558c26b5f1e..a40c5499a6d7 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Serialization/ASTRecordReader.h"
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/AttrIterator.h"
@@ -22,6 +21,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -49,6 +49,8 @@
#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/Token.h"
#include "clang/Serialization/ASTBitCodes.h"
+#include "clang/Serialization/ASTRecordReader.h"
+#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -101,11 +103,12 @@ namespace clang {
/// The number of record fields required for the Stmt class
/// itself.
- static const unsigned NumStmtFields = 1;
+ static const unsigned NumStmtFields = 0;
/// The number of record fields required for the Expr class
/// itself.
- static const unsigned NumExprFields = NumStmtFields + 7;
+ static const unsigned NumExprFields =
+ NumStmtFields + llvm::BitWidth<ExprDependence> + 3;
/// Read and initialize a ExplicitTemplateArgumentList structure.
void ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
@@ -137,7 +140,6 @@ void ASTStmtReader::ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
}
void ASTStmtReader::VisitStmt(Stmt *S) {
- S->setIsOMPStructuredBlock(Record.readInt());
assert(Record.getIdx() == NumStmtFields && "Incorrect statement field count");
}
@@ -269,6 +271,8 @@ void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
S->setWhileLoc(readSourceLocation());
+ S->setLParenLoc(readSourceLocation());
+ S->setRParenLoc(readSourceLocation());
}
void ASTStmtReader::VisitDoStmt(DoStmt *S) {
@@ -511,10 +515,26 @@ void ASTStmtReader::VisitCapturedStmt(CapturedStmt *S) {
void ASTStmtReader::VisitExpr(Expr *E) {
VisitStmt(E);
E->setType(Record.readType());
- E->setTypeDependent(Record.readInt());
- E->setValueDependent(Record.readInt());
- E->setInstantiationDependent(Record.readInt());
- E->ExprBits.ContainsUnexpandedParameterPack = Record.readInt();
+
+ // FIXME: write and read all DependentFlags with a single call.
+ bool TypeDependent = Record.readInt();
+ bool ValueDependent = Record.readInt();
+ bool InstantiationDependent = Record.readInt();
+ bool ContainsUnexpandedTemplateParameters = Record.readInt();
+ bool ContainsErrors = Record.readInt();
+ auto Deps = ExprDependence::None;
+ if (TypeDependent)
+ Deps |= ExprDependence::Type;
+ if (ValueDependent)
+ Deps |= ExprDependence::Value;
+ if (InstantiationDependent)
+ Deps |= ExprDependence::Instantiation;
+ if (ContainsUnexpandedTemplateParameters)
+ Deps |= ExprDependence::UnexpandedPack;
+ if (ContainsErrors)
+ Deps |= ExprDependence::Error;
+ E->setDependence(Deps);
+
E->setValueKind(static_cast<ExprValueKind>(Record.readInt()));
E->setObjectKind(static_cast<ExprObjectKind>(Record.readInt()));
assert(Record.getIdx() == NumExprFields &&
@@ -523,18 +543,35 @@ void ASTStmtReader::VisitExpr(Expr *E) {
void ASTStmtReader::VisitConstantExpr(ConstantExpr *E) {
VisitExpr(E);
- E->ConstantExprBits.ResultKind = Record.readInt();
- switch (E->ConstantExprBits.ResultKind) {
- case ConstantExpr::RSK_Int64: {
+
+ auto StorageKind = Record.readInt();
+ assert(E->ConstantExprBits.ResultKind == StorageKind && "Wrong ResultKind!");
+
+ E->ConstantExprBits.APValueKind = Record.readInt();
+ E->ConstantExprBits.IsUnsigned = Record.readInt();
+ E->ConstantExprBits.BitWidth = Record.readInt();
+ E->ConstantExprBits.HasCleanup = false; // Not serialized, see below.
+ E->ConstantExprBits.IsImmediateInvocation = Record.readInt();
+
+ switch (StorageKind) {
+ case ConstantExpr::RSK_None:
+ break;
+
+ case ConstantExpr::RSK_Int64:
E->Int64Result() = Record.readInt();
- uint64_t tmp = Record.readInt();
- E->ConstantExprBits.IsUnsigned = tmp & 0x1;
- E->ConstantExprBits.BitWidth = tmp >> 1;
break;
- }
+
case ConstantExpr::RSK_APValue:
E->APValueResult() = Record.readAPValue();
+ if (E->APValueResult().needsCleanup()) {
+ E->ConstantExprBits.HasCleanup = true;
+ Record.getContext().addDestruction(&E->APValueResult());
+ }
+ break;
+ default:
+ llvm_unreachable("unexpected ResultKind!");
}
+
E->setSubExpr(Record.readSubExpr());
}
@@ -587,6 +624,7 @@ void ASTStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
void ASTStmtReader::VisitFixedPointLiteral(FixedPointLiteral *E) {
VisitExpr(E);
E->setLocation(readSourceLocation());
+ E->setScale(Record.readInt());
E->setValue(Record.getContext(), Record.readAPInt());
}
@@ -663,10 +701,14 @@ void ASTStmtReader::VisitParenListExpr(ParenListExpr *E) {
void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
+ bool hasFP_Features = Record.readInt();
+ assert(hasFP_Features == E->hasStoredFPFeatures());
E->setSubExpr(Record.readSubExpr());
E->setOpcode((UnaryOperator::Opcode)Record.readInt());
E->setOperatorLoc(readSourceLocation());
E->setCanOverflow(Record.readInt());
+ if (hasFP_Features)
+ E->setStoredFPFeatures(FPOptionsOverride(Record.readInt()));
}
void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
@@ -724,27 +766,15 @@ void ASTStmtReader::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
E->setRParenLoc(readSourceLocation());
}
-void ASTStmtReader::VisitConceptSpecializationExpr(
- ConceptSpecializationExpr *E) {
- VisitExpr(E);
- unsigned NumTemplateArgs = Record.readInt();
- E->NestedNameSpec = Record.readNestedNameSpecifierLoc();
- E->TemplateKWLoc = Record.readSourceLocation();
- E->ConceptName = Record.readDeclarationNameInfo();
- E->NamedConcept = readDeclAs<ConceptDecl>();
- E->ArgsAsWritten = Record.readASTTemplateArgumentListInfo();
- llvm::SmallVector<TemplateArgument, 4> Args;
- for (unsigned I = 0; I < NumTemplateArgs; ++I)
- Args.push_back(Record.readTemplateArgument());
- E->setTemplateArguments(Args);
+static ConstraintSatisfaction
+readConstraintSatisfaction(ASTRecordReader &Record) {
ConstraintSatisfaction Satisfaction;
Satisfaction.IsSatisfied = Record.readInt();
if (!Satisfaction.IsSatisfied) {
unsigned NumDetailRecords = Record.readInt();
for (unsigned i = 0; i != NumDetailRecords; ++i) {
Expr *ConstraintExpr = Record.readExpr();
- bool IsDiagnostic = Record.readInt();
- if (IsDiagnostic) {
+ if (/* IsDiagnostic */Record.readInt()) {
SourceLocation DiagLocation = Record.readSourceLocation();
std::string DiagMessage = Record.readString();
Satisfaction.Details.emplace_back(
@@ -755,8 +785,138 @@ void ASTStmtReader::VisitConceptSpecializationExpr(
Satisfaction.Details.emplace_back(ConstraintExpr, Record.readExpr());
}
}
- E->Satisfaction = ASTConstraintSatisfaction::Create(Record.getContext(),
- Satisfaction);
+ return Satisfaction;
+}
+
+void ASTStmtReader::VisitConceptSpecializationExpr(
+ ConceptSpecializationExpr *E) {
+ VisitExpr(E);
+ unsigned NumTemplateArgs = Record.readInt();
+ E->NestedNameSpec = Record.readNestedNameSpecifierLoc();
+ E->TemplateKWLoc = Record.readSourceLocation();
+ E->ConceptName = Record.readDeclarationNameInfo();
+ E->NamedConcept = readDeclAs<ConceptDecl>();
+ E->FoundDecl = Record.readDeclAs<NamedDecl>();
+ E->ArgsAsWritten = Record.readASTTemplateArgumentListInfo();
+ llvm::SmallVector<TemplateArgument, 4> Args;
+ for (unsigned I = 0; I < NumTemplateArgs; ++I)
+ Args.push_back(Record.readTemplateArgument());
+ E->setTemplateArguments(Args);
+ E->Satisfaction = E->isValueDependent() ? nullptr :
+ ASTConstraintSatisfaction::Create(Record.getContext(),
+ readConstraintSatisfaction(Record));
+}
+
+static concepts::Requirement::SubstitutionDiagnostic *
+readSubstitutionDiagnostic(ASTRecordReader &Record) {
+ std::string SubstitutedEntity = Record.readString();
+ SourceLocation DiagLoc = Record.readSourceLocation();
+ std::string DiagMessage = Record.readString();
+ return new (Record.getContext())
+ concepts::Requirement::SubstitutionDiagnostic{SubstitutedEntity, DiagLoc,
+ DiagMessage};
+}
+
+void ASTStmtReader::VisitRequiresExpr(RequiresExpr *E) {
+ VisitExpr(E);
+ unsigned NumLocalParameters = Record.readInt();
+ unsigned NumRequirements = Record.readInt();
+ E->RequiresExprBits.RequiresKWLoc = Record.readSourceLocation();
+ E->RequiresExprBits.IsSatisfied = Record.readInt();
+ E->Body = Record.readDeclAs<RequiresExprBodyDecl>();
+ llvm::SmallVector<ParmVarDecl *, 4> LocalParameters;
+ for (unsigned i = 0; i < NumLocalParameters; ++i)
+ LocalParameters.push_back(cast<ParmVarDecl>(Record.readDecl()));
+ std::copy(LocalParameters.begin(), LocalParameters.end(),
+ E->getTrailingObjects<ParmVarDecl *>());
+ llvm::SmallVector<concepts::Requirement *, 4> Requirements;
+ for (unsigned i = 0; i < NumRequirements; ++i) {
+ auto RK =
+ static_cast<concepts::Requirement::RequirementKind>(Record.readInt());
+ concepts::Requirement *R = nullptr;
+ switch (RK) {
+ case concepts::Requirement::RK_Type: {
+ auto Status =
+ static_cast<concepts::TypeRequirement::SatisfactionStatus>(
+ Record.readInt());
+ if (Status == concepts::TypeRequirement::SS_SubstitutionFailure)
+ R = new (Record.getContext())
+ concepts::TypeRequirement(readSubstitutionDiagnostic(Record));
+ else
+ R = new (Record.getContext())
+ concepts::TypeRequirement(Record.readTypeSourceInfo());
+ } break;
+ case concepts::Requirement::RK_Simple:
+ case concepts::Requirement::RK_Compound: {
+ auto Status =
+ static_cast<concepts::ExprRequirement::SatisfactionStatus>(
+ Record.readInt());
+ llvm::PointerUnion<concepts::Requirement::SubstitutionDiagnostic *,
+ Expr *> E;
+ if (Status == concepts::ExprRequirement::SS_ExprSubstitutionFailure) {
+ E = readSubstitutionDiagnostic(Record);
+ } else
+ E = Record.readExpr();
+
+ llvm::Optional<concepts::ExprRequirement::ReturnTypeRequirement> Req;
+ ConceptSpecializationExpr *SubstitutedConstraintExpr = nullptr;
+ SourceLocation NoexceptLoc;
+ if (RK == concepts::Requirement::RK_Simple) {
+ Req.emplace();
+ } else {
+ NoexceptLoc = Record.readSourceLocation();
+ switch (/* returnTypeRequirementKind */Record.readInt()) {
+ case 0:
+ // No return type requirement.
+ Req.emplace();
+ break;
+ case 1: {
+ // type-constraint
+ TemplateParameterList *TPL = Record.readTemplateParameterList();
+ if (Status >=
+ concepts::ExprRequirement::SS_ConstraintsNotSatisfied)
+ SubstitutedConstraintExpr =
+ cast<ConceptSpecializationExpr>(Record.readExpr());
+ Req.emplace(TPL);
+ } break;
+ case 2:
+ // Substitution failure
+ Req.emplace(readSubstitutionDiagnostic(Record));
+ break;
+ }
+ }
+ if (Expr *Ex = E.dyn_cast<Expr *>())
+ R = new (Record.getContext()) concepts::ExprRequirement(
+ Ex, RK == concepts::Requirement::RK_Simple, NoexceptLoc,
+ std::move(*Req), Status, SubstitutedConstraintExpr);
+ else
+ R = new (Record.getContext()) concepts::ExprRequirement(
+ E.get<concepts::Requirement::SubstitutionDiagnostic *>(),
+ RK == concepts::Requirement::RK_Simple, NoexceptLoc,
+ std::move(*Req));
+ } break;
+ case concepts::Requirement::RK_Nested: {
+ if (/* IsSubstitutionDiagnostic */Record.readInt()) {
+ R = new (Record.getContext()) concepts::NestedRequirement(
+ readSubstitutionDiagnostic(Record));
+ break;
+ }
+ Expr *E = Record.readExpr();
+ if (E->isInstantiationDependent())
+ R = new (Record.getContext()) concepts::NestedRequirement(E);
+ else
+ R = new (Record.getContext())
+ concepts::NestedRequirement(Record.getContext(), E,
+ readConstraintSatisfaction(Record));
+ } break;
+ }
+ if (!R)
+ continue;
+ Requirements.push_back(R);
+ }
+ std::copy(Requirements.begin(), Requirements.end(),
+ E->getTrailingObjects<concepts::Requirement *>());
+ E->RBraceLoc = Record.readSourceLocation();
}
void ASTStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
@@ -766,15 +926,68 @@ void ASTStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
E->setRBracketLoc(readSourceLocation());
}
+void ASTStmtReader::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
+ VisitExpr(E);
+ E->setBase(Record.readSubExpr());
+ E->setRowIdx(Record.readSubExpr());
+ E->setColumnIdx(Record.readSubExpr());
+ E->setRBracketLoc(readSourceLocation());
+}
+
void ASTStmtReader::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
VisitExpr(E);
E->setBase(Record.readSubExpr());
E->setLowerBound(Record.readSubExpr());
E->setLength(Record.readSubExpr());
- E->setColonLoc(readSourceLocation());
+ E->setStride(Record.readSubExpr());
+ E->setColonLocFirst(readSourceLocation());
+ E->setColonLocSecond(readSourceLocation());
E->setRBracketLoc(readSourceLocation());
}
+void ASTStmtReader::VisitOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
+ VisitExpr(E);
+ unsigned NumDims = Record.readInt();
+ E->setBase(Record.readSubExpr());
+ SmallVector<Expr *, 4> Dims(NumDims);
+ for (unsigned I = 0; I < NumDims; ++I)
+ Dims[I] = Record.readSubExpr();
+ E->setDimensions(Dims);
+ SmallVector<SourceRange, 4> SRs(NumDims);
+ for (unsigned I = 0; I < NumDims; ++I)
+ SRs[I] = readSourceRange();
+ E->setBracketsRanges(SRs);
+ E->setLParenLoc(readSourceLocation());
+ E->setRParenLoc(readSourceLocation());
+}
+
+void ASTStmtReader::VisitOMPIteratorExpr(OMPIteratorExpr *E) {
+ VisitExpr(E);
+ unsigned NumIters = Record.readInt();
+ E->setIteratorKwLoc(readSourceLocation());
+ E->setLParenLoc(readSourceLocation());
+ E->setRParenLoc(readSourceLocation());
+ for (unsigned I = 0; I < NumIters; ++I) {
+ E->setIteratorDeclaration(I, Record.readDeclRef());
+ E->setAssignmentLoc(I, readSourceLocation());
+ Expr *Begin = Record.readSubExpr();
+ Expr *End = Record.readSubExpr();
+ Expr *Step = Record.readSubExpr();
+ SourceLocation ColonLoc = readSourceLocation();
+ SourceLocation SecColonLoc;
+ if (Step)
+ SecColonLoc = readSourceLocation();
+ E->setIteratorRange(I, Begin, ColonLoc, End, SecColonLoc, Step);
+ // Deserialize helpers
+ OMPIteratorHelperData HD;
+ HD.CounterVD = cast_or_null<VarDecl>(Record.readDeclRef());
+ HD.Upper = Record.readSubExpr();
+ HD.Update = Record.readSubExpr();
+ HD.CounterUpdate = Record.readSubExpr();
+ E->setHelper(I, HD);
+ }
+}
+
void ASTStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
unsigned NumArgs = Record.readInt();
@@ -871,12 +1084,16 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
}
void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
+ bool hasFP_Features;
+ BinaryOperator::Opcode opc;
VisitExpr(E);
+ E->setHasStoredFPFeatures(hasFP_Features = Record.readInt());
+ E->setOpcode(opc = (BinaryOperator::Opcode)Record.readInt());
E->setLHS(Record.readSubExpr());
E->setRHS(Record.readSubExpr());
- E->setOpcode((BinaryOperator::Opcode)Record.readInt());
E->setOperatorLoc(readSourceLocation());
- E->setFPFeatures(FPOptions(Record.readInt()));
+ if (hasFP_Features)
+ E->setStoredFPFeatures(FPOptionsOverride(Record.readInt()));
}
void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
@@ -1073,6 +1290,7 @@ void ASTStmtReader::VisitStmtExpr(StmtExpr *E) {
E->setLParenLoc(readSourceLocation());
E->setRParenLoc(readSourceLocation());
E->setSubStmt(cast_or_null<CompoundStmt>(Record.readSubStmt()));
+ E->StmtExprBits.TemplateDepth = Record.readInt();
}
void ASTStmtReader::VisitChooseExpr(ChooseExpr *E) {
@@ -1443,8 +1661,8 @@ void ASTStmtReader::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
E->CXXOperatorCallExprBits.OperatorKind = Record.readInt();
- E->CXXOperatorCallExprBits.FPFeatures = Record.readInt();
E->Range = Record.readSourceRange();
+ E->setFPFeatures(FPOptionsOverride(Record.readInt()));
}
void ASTStmtReader::VisitCXXRewrittenBinaryOperator(
@@ -1490,19 +1708,23 @@ void ASTStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
void ASTStmtReader::VisitLambdaExpr(LambdaExpr *E) {
VisitExpr(E);
unsigned NumCaptures = Record.readInt();
- assert(NumCaptures == E->NumCaptures);(void)NumCaptures;
+ (void)NumCaptures;
+ assert(NumCaptures == E->LambdaExprBits.NumCaptures);
E->IntroducerRange = readSourceRange();
- E->CaptureDefault = static_cast<LambdaCaptureDefault>(Record.readInt());
+ E->LambdaExprBits.CaptureDefault = Record.readInt();
E->CaptureDefaultLoc = readSourceLocation();
- E->ExplicitParams = Record.readInt();
- E->ExplicitResultType = Record.readInt();
+ E->LambdaExprBits.ExplicitParams = Record.readInt();
+ E->LambdaExprBits.ExplicitResultType = Record.readInt();
E->ClosingBrace = readSourceLocation();
// Read capture initializers.
for (LambdaExpr::capture_init_iterator C = E->capture_init_begin(),
- CEnd = E->capture_init_end();
+ CEnd = E->capture_init_end();
C != CEnd; ++C)
*C = Record.readSubExpr();
+
+ // The body will be lazily deserialized when needed from the call operator
+ // declaration.
}
void
@@ -1532,6 +1754,10 @@ void ASTStmtReader::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) {
return VisitCXXNamedCastExpr(E);
}
+void ASTStmtReader::VisitCXXAddrspaceCastExpr(CXXAddrspaceCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
void ASTStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
return VisitCXXNamedCastExpr(E);
}
@@ -1567,14 +1793,10 @@ void ASTStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
void ASTStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
VisitExpr(E);
E->setSourceRange(readSourceRange());
- if (E->isTypeOperand()) { // typeid(int)
- E->setTypeOperandSourceInfo(
- readTypeSourceInfo());
- return;
- }
-
- // typeid(42+2)
- E->setExprOperand(Record.readSubExpr());
+ if (E->isTypeOperand())
+ E->Operand = readTypeSourceInfo();
+ else
+ E->Operand = Record.readSubExpr();
}
void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
@@ -1687,9 +1909,17 @@ void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
unsigned NumObjects = Record.readInt();
assert(NumObjects == E->getNumObjects());
- for (unsigned i = 0; i != NumObjects; ++i)
- E->getTrailingObjects<BlockDecl *>()[i] =
- readDeclAs<BlockDecl>();
+ for (unsigned i = 0; i != NumObjects; ++i) {
+ unsigned CleanupKind = Record.readInt();
+ ExprWithCleanups::CleanupObject Obj;
+ if (CleanupKind == COK_Block)
+ Obj = readDeclAs<BlockDecl>();
+ else if (CleanupKind == COK_CompoundLiteral)
+ Obj = cast<CompoundLiteralExpr>(Record.readSubExpr());
+ else
+ llvm_unreachable("unexpected cleanup object type");
+ E->getTrailingObjects<ExprWithCleanups::CleanupObject>()[i] = Obj;
+ }
E->ExprWithCleanupsBits.CleanupsHaveSideEffects = Record.readInt();
E->SubExpr = Record.readSubExpr();
@@ -1936,6 +2166,19 @@ void ASTStmtReader::VisitTypoExpr(TypoExpr *E) {
llvm_unreachable("Cannot read TypoExpr nodes");
}
+void ASTStmtReader::VisitRecoveryExpr(RecoveryExpr *E) {
+ VisitExpr(E);
+ unsigned NumArgs = Record.readInt();
+ E->BeginLoc = readSourceLocation();
+ E->EndLoc = readSourceLocation();
+ assert(
+ (NumArgs == std::distance(E->children().begin(), E->children().end())) &&
+ "Wrong NumArgs!");
+ (void)NumArgs;
+ for (Stmt *&Child : E->children())
+ Child = Record.readSubStmt();
+}
+
//===----------------------------------------------------------------------===//
// Microsoft Expressions and Statements
//===----------------------------------------------------------------------===//
@@ -1958,16 +2201,11 @@ void ASTStmtReader::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *E) {
void ASTStmtReader::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
VisitExpr(E);
E->setSourceRange(readSourceRange());
- std::string UuidStr = readString();
- E->setUuidStr(StringRef(UuidStr).copy(Record.getContext()));
- if (E->isTypeOperand()) { // __uuidof(ComType)
- E->setTypeOperandSourceInfo(
- readTypeSourceInfo());
- return;
- }
-
- // __uuidof(expr)
- E->setExprOperand(Record.readSubExpr());
+ E->Guid = readDeclAs<MSGuidDecl>();
+ if (E->isTypeOperand())
+ E->Operand = readTypeSourceInfo();
+ else
+ E->Operand = Record.readSubExpr();
}
void ASTStmtReader::VisitSEHLeaveStmt(SEHLeaveStmt *S) {
@@ -2111,6 +2349,7 @@ void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
// The NumClauses field was read in ReadStmtFromStream.
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2120,6 +2359,7 @@ void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2132,6 +2372,7 @@ void ASTStmtReader::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
// The NumClauses field was read in ReadStmtFromStream.
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2163,6 +2404,7 @@ void ASTStmtReader::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
void ASTStmtReader::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2177,6 +2419,7 @@ void ASTStmtReader::VisitOMPParallelMasterDirective(
// The NumClauses field was read in ReadStmtFromStream.
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
}
void ASTStmtReader::VisitOMPParallelSectionsDirective(
@@ -2185,6 +2428,7 @@ void ASTStmtReader::VisitOMPParallelSectionsDirective(
// The NumClauses field was read in ReadStmtFromStream.
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2226,6 +2470,20 @@ void ASTStmtReader::VisitOMPFlushDirective(OMPFlushDirective *D) {
VisitOMPExecutableDirective(D);
}
+void ASTStmtReader::VisitOMPDepobjDirective(OMPDepobjDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ Record.skipInts(1);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPScanDirective(OMPScanDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ Record.skipInts(1);
+ VisitOMPExecutableDirective(D);
+}
+
void ASTStmtReader::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
@@ -2278,11 +2536,14 @@ void ASTStmtReader::VisitOMPTargetParallelDirective(
VisitStmt(D);
Record.skipInts(1);
VisitOMPExecutableDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
+ D->setHasCancel(Record.readBool());
}
void ASTStmtReader::VisitOMPTargetParallelForDirective(
OMPTargetParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2310,6 +2571,7 @@ void ASTStmtReader::VisitOMPCancelDirective(OMPCancelDirective *D) {
void ASTStmtReader::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
@@ -2319,6 +2581,7 @@ void ASTStmtReader::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
void ASTStmtReader::VisitOMPMasterTaskLoopDirective(
OMPMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPMasterTaskLoopSimdDirective(
@@ -2329,6 +2592,7 @@ void ASTStmtReader::VisitOMPMasterTaskLoopSimdDirective(
void ASTStmtReader::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPParallelMasterTaskLoopSimdDirective(
@@ -2349,6 +2613,7 @@ void ASTStmtReader::VisitOMPTargetUpdateDirective(OMPTargetUpdateDirective *D) {
void ASTStmtReader::VisitOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2389,6 +2654,7 @@ void ASTStmtReader::VisitOMPTeamsDistributeParallelForSimdDirective(
void ASTStmtReader::VisitOMPTeamsDistributeParallelForDirective(
OMPTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2407,6 +2673,7 @@ void ASTStmtReader::VisitOMPTargetTeamsDistributeDirective(
void ASTStmtReader::VisitOMPTargetTeamsDistributeParallelForDirective(
OMPTargetTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ D->setTaskReductionRefExpr(Record.readSubExpr());
D->setHasCancel(Record.readInt());
}
@@ -2613,10 +2880,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CONSTANT:
S = ConstantExpr::CreateEmpty(
- Context,
- static_cast<ConstantExpr::ResultStorageKind>(
- Record[ASTStmtReader::NumExprFields]),
- Empty);
+ Context, static_cast<ConstantExpr::ResultStorageKind>(
+ /*StorageKind=*/Record[ASTStmtReader::NumExprFields]));
break;
case EXPR_PREDEFINED:
@@ -2639,6 +2904,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = IntegerLiteral::Create(Context, Empty);
break;
+ case EXPR_FIXEDPOINT_LITERAL:
+ S = FixedPointLiteral::Create(Context, Empty);
+ break;
+
case EXPR_FLOATING_LITERAL:
S = FloatingLiteral::Create(Context, Empty);
break;
@@ -2670,7 +2939,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_UNARY_OPERATOR:
- S = new (Context) UnaryOperator(Empty);
+ S = UnaryOperator::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_OFFSETOF:
@@ -2687,15 +2957,34 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) ArraySubscriptExpr(Empty);
break;
+ case EXPR_MATRIX_SUBSCRIPT:
+ S = new (Context) MatrixSubscriptExpr(Empty);
+ break;
+
case EXPR_OMP_ARRAY_SECTION:
S = new (Context) OMPArraySectionExpr(Empty);
break;
+ case EXPR_OMP_ARRAY_SHAPING:
+ S = OMPArrayShapingExpr::CreateEmpty(
+ Context, Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_OMP_ITERATOR:
+ S = OMPIteratorExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+
case EXPR_CALL:
S = CallExpr::CreateEmpty(
Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
+ case EXPR_RECOVERY:
+ S = RecoveryExpr::CreateEmpty(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields]);
+ break;
+
case EXPR_MEMBER:
S = MemberExpr::CreateEmpty(Context, Record[ASTStmtReader::NumExprFields],
Record[ASTStmtReader::NumExprFields + 1],
@@ -2704,11 +2993,13 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_BINARY_OPERATOR:
- S = new (Context) BinaryOperator(Empty);
+ S = BinaryOperator::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_COMPOUND_ASSIGN_OPERATOR:
- S = new (Context) CompoundAssignOperator(Empty);
+ S = CompoundAssignOperator::CreateEmpty(
+ Context, Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_CONDITIONAL_OPERATOR:
@@ -3054,6 +3345,16 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
+ case STMT_OMP_DEPOBJ_DIRECTIVE:
+ S = OMPDepobjDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_SCAN_DIRECTIVE:
+ S = OMPScanDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
case STMT_OMP_ORDERED_DIRECTIVE:
S = OMPOrderedDirective::CreateEmpty(
Context, Record[ASTStmtReader::NumStmtFields], Empty);
@@ -3331,11 +3632,20 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = CXXConstCastExpr::CreateEmpty(Context);
break;
+ case EXPR_CXX_ADDRSPACE_CAST:
+ S = CXXAddrspaceCastExpr::CreateEmpty(Context);
+ break;
+
case EXPR_CXX_FUNCTIONAL_CAST:
S = CXXFunctionalCastExpr::CreateEmpty(Context,
/*PathSize*/ Record[ASTStmtReader::NumExprFields]);
break;
+ case EXPR_BUILTIN_BIT_CAST:
+ assert(Record[ASTStmtReader::NumExprFields] == 0 && "Wrong PathSize!");
+ S = new (Context) BuiltinBitCastExpr(Empty);
+ break;
+
case EXPR_USER_DEFINED_LITERAL:
S = UserDefinedLiteral::CreateEmpty(
Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
@@ -3566,11 +3876,18 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) DependentCoawaitExpr(Empty);
break;
- case EXPR_CONCEPT_SPECIALIZATION:
+ case EXPR_CONCEPT_SPECIALIZATION: {
unsigned numTemplateArgs = Record[ASTStmtReader::NumExprFields];
S = ConceptSpecializationExpr::Create(Context, Empty, numTemplateArgs);
break;
-
+ }
+
+ case EXPR_REQUIRES:
+ unsigned numLocalParameters = Record[ASTStmtReader::NumExprFields];
+ unsigned numRequirement = Record[ASTStmtReader::NumExprFields + 1];
+ S = RequiresExpr::Create(Context, Empty, numLocalParameters,
+ numRequirement);
+ break;
}
// We hit a STMT_STOP, so we're done with this expression.
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 6eba48a1abe9..2345a12caeb2 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -10,14 +10,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/OpenMPClause.h"
-#include "clang/Serialization/ASTRecordWriter.h"
#include "ASTCommon.h"
#include "ASTReaderInternals.h"
#include "MultiOnDiskHashTable.h"
-#include "clang/AST/AbstractTypeWriter.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTUnresolvedSet.h"
+#include "clang/AST/AbstractTypeWriter.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
@@ -31,6 +29,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/RawCommentList.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -65,7 +64,9 @@
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/Weak.h"
+#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTRecordWriter.h"
#include "clang/Serialization/InMemoryModuleCache.h"
#include "clang/Serialization/ModuleFile.h"
#include "clang/Serialization/ModuleFileExtension.h"
@@ -288,6 +289,25 @@ void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+void TypeLocWriter::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
+ Record.AddSourceLocation(TL.getAttrNameLoc());
+ SourceRange range = TL.getAttrOperandParensRange();
+ Record.AddSourceLocation(range.getBegin());
+ Record.AddSourceLocation(range.getEnd());
+ Record.AddStmt(TL.getAttrRowOperand());
+ Record.AddStmt(TL.getAttrColumnOperand());
+}
+
+void TypeLocWriter::VisitDependentSizedMatrixTypeLoc(
+ DependentSizedMatrixTypeLoc TL) {
+ Record.AddSourceLocation(TL.getAttrNameLoc());
+ SourceRange range = TL.getAttrOperandParensRange();
+ Record.AddSourceLocation(range.getBegin());
+ Record.AddSourceLocation(range.getEnd());
+ Record.AddStmt(TL.getAttrRowOperand());
+ Record.AddStmt(TL.getAttrColumnOperand());
+}
+
void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
Record.AddSourceLocation(TL.getLocalRangeBegin());
Record.AddSourceLocation(TL.getLParenLoc());
@@ -349,6 +369,18 @@ void TypeLocWriter::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
+ Record.push_back(TL.isConstrained());
+ if (TL.isConstrained()) {
+ Record.AddNestedNameSpecifierLoc(TL.getNestedNameSpecifierLoc());
+ Record.AddSourceLocation(TL.getTemplateKWLoc());
+ Record.AddSourceLocation(TL.getConceptNameLoc());
+ Record.AddDeclRef(TL.getFoundDecl());
+ Record.AddSourceLocation(TL.getLAngleLoc());
+ Record.AddSourceLocation(TL.getRAngleLoc());
+ for (unsigned I = 0; I < TL.getNumArgs(); ++I)
+ Record.AddTemplateArgumentLocInfo(TL.getTypePtr()->getArg(I).getKind(),
+ TL.getArgLocInfo(I));
+ }
}
void TypeLocWriter::VisitDeducedTemplateSpecializationTypeLoc(
@@ -464,6 +496,14 @@ void TypeLocWriter::VisitPipeTypeLoc(PipeTypeLoc TL) {
Record.AddSourceLocation(TL.getKWLoc());
}
+void TypeLocWriter::VisitExtIntTypeLoc(clang::ExtIntTypeLoc TL) {
+ Record.AddSourceLocation(TL.getNameLoc());
+}
+void TypeLocWriter::VisitDependentExtIntTypeLoc(
+ clang::DependentExtIntTypeLoc TL) {
+ Record.AddSourceLocation(TL.getNameLoc());
+}
+
void ASTWriter::WriteTypeAbbrevs() {
using namespace llvm;
@@ -488,6 +528,7 @@ void ASTWriter::WriteTypeAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // ProducesResult
Abv->Add(BitCodeAbbrevOp(0)); // NoCallerSavedRegs
Abv->Add(BitCodeAbbrevOp(0)); // NoCfCheck
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // CmseNSCall
// FunctionProtoType
Abv->Add(BitCodeAbbrevOp(0)); // IsVariadic
Abv->Add(BitCodeAbbrevOp(0)); // HasTrailingReturn
@@ -558,6 +599,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_PREDEFINED);
RECORD(EXPR_DECL_REF);
RECORD(EXPR_INTEGER_LITERAL);
+ RECORD(EXPR_FIXEDPOINT_LITERAL);
RECORD(EXPR_FLOATING_LITERAL);
RECORD(EXPR_IMAGINARY_LITERAL);
RECORD(EXPR_STRING_LITERAL);
@@ -619,6 +661,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_CXX_DYNAMIC_CAST);
RECORD(EXPR_CXX_REINTERPRET_CAST);
RECORD(EXPR_CXX_CONST_CAST);
+ RECORD(EXPR_CXX_ADDRSPACE_CAST);
RECORD(EXPR_CXX_FUNCTIONAL_CAST);
RECORD(EXPR_USER_DEFINED_LITERAL);
RECORD(EXPR_CXX_STD_INITIALIZER_LIST);
@@ -744,6 +787,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DELETE_EXPRS_TO_ANALYZE);
RECORD(CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH);
RECORD(PP_CONDITIONAL_STACK);
+ RECORD(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -885,6 +929,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_NON_TYPE_TEMPLATE_PARM);
RECORD(DECL_TEMPLATE_TEMPLATE_PARM);
RECORD(DECL_CONCEPT);
+ RECORD(DECL_REQUIRES_EXPR_BODY);
RECORD(DECL_TYPE_ALIAS_TEMPLATE);
RECORD(DECL_STATIC_ASSERT);
RECORD(DECL_CXX_BASE_SPECIFIERS);
@@ -917,6 +962,7 @@ void ASTWriter::WriteBlockInfoBlock() {
BLOCK(UNHASHED_CONTROL_BLOCK);
RECORD(SIGNATURE);
+ RECORD(AST_BLOCK_HASH);
RECORD(DIAGNOSTIC_OPTIONS);
RECORD(DIAG_PRAGMA_MAPPINGS);
@@ -982,22 +1028,23 @@ adjustFilenameForRelocatableAST(const char *Filename, StringRef BaseDir) {
return Filename + Pos;
}
-ASTFileSignature ASTWriter::createSignature(StringRef Bytes) {
- // Calculate the hash till start of UNHASHED_CONTROL_BLOCK.
+std::pair<ASTFileSignature, ASTFileSignature>
+ASTWriter::createSignature(StringRef AllBytes, StringRef ASTBlockBytes) {
llvm::SHA1 Hasher;
- Hasher.update(ArrayRef<uint8_t>(Bytes.bytes_begin(), Bytes.size()));
+ Hasher.update(ASTBlockBytes);
auto Hash = Hasher.result();
+ ASTFileSignature ASTBlockHash = ASTFileSignature::create(Hash);
- // Convert to an array [5*i32].
- ASTFileSignature Signature;
- auto LShift = [&](unsigned char Val, unsigned Shift) {
- return (uint32_t)Val << Shift;
- };
- for (int I = 0; I != 5; ++I)
- Signature[I] = LShift(Hash[I * 4 + 0], 24) | LShift(Hash[I * 4 + 1], 16) |
- LShift(Hash[I * 4 + 2], 8) | LShift(Hash[I * 4 + 3], 0);
+ // Add the remaining bytes (i.e. bytes before the unhashed control block that
+ // are not part of the AST block).
+ Hasher.update(
+ AllBytes.take_front(ASTBlockBytes.bytes_end() - AllBytes.bytes_begin()));
+ Hasher.update(
+ AllBytes.take_back(AllBytes.bytes_end() - ASTBlockBytes.bytes_end()));
+ Hash = Hasher.result();
+ ASTFileSignature Signature = ASTFileSignature::create(Hash);
- return Signature;
+ return std::make_pair(ASTBlockHash, Signature);
}
ASTFileSignature ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
@@ -1014,7 +1061,16 @@ ASTFileSignature ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
ASTFileSignature Signature;
if (WritingModule &&
PP.getHeaderSearchInfo().getHeaderSearchOpts().ModulesHashContent) {
- Signature = createSignature(StringRef(Buffer.begin(), StartOfUnhashedControl));
+ ASTFileSignature ASTBlockHash;
+ auto ASTBlockStartByte = ASTBlockRange.first >> 3;
+ auto ASTBlockByteLength = (ASTBlockRange.second >> 3) - ASTBlockStartByte;
+ std::tie(ASTBlockHash, Signature) = createSignature(
+ StringRef(Buffer.begin(), StartOfUnhashedControl),
+ StringRef(Buffer.begin() + ASTBlockStartByte, ASTBlockByteLength));
+
+ Record.append(ASTBlockHash.begin(), ASTBlockHash.end());
+ Stream.EmitRecord(AST_BLOCK_HASH, Record);
+ Record.clear();
Record.append(Signature.begin(), Signature.end());
Stream.EmitRecord(SIGNATURE, Record);
Record.clear();
@@ -1119,7 +1175,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
BaseDirectory.assign(BaseDir.begin(), BaseDir.end());
} else if (!isysroot.empty()) {
// Write out paths relative to the sysroot if possible.
- BaseDirectory = isysroot;
+ BaseDirectory = std::string(isysroot);
}
// Module map file
@@ -1705,7 +1761,8 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
llvm::SmallVector<Module *, 16> Worklist(1, WritingModule);
while (!Worklist.empty()) {
Module *M = Worklist.pop_back_val();
- if (!M->isAvailable())
+ // We don't care about headers in unimportable submodules.
+ if (M->isUnimportable())
continue;
// Map to disk files where possible, to pick up any missing stat
@@ -1787,7 +1844,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
Filename, File->getSize(), getTimestampForOutput(File)
};
HeaderFileInfoTrait::data_type Data = {
- *HFI, HS.getModuleMap().findAllModulesForHeader(File), {}
+ *HFI, HS.getModuleMap().findResolvedModulesForHeader(File), {}
};
Generator.insert(Key, Data, GeneratorTrait);
++NumHeaderSearchEntries;
@@ -1865,6 +1922,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Enter the source manager block.
Stream.EnterSubblock(SOURCE_MANAGER_BLOCK_ID, 4);
+ const uint64_t SourceManagerBlockOffset = Stream.GetCurrentBitNo();
// Abbreviations for the various kinds of source-location entries.
unsigned SLocFileAbbrv = CreateSLocFileAbbrev(Stream);
@@ -1877,6 +1935,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Write out the source location entry table. We skip the first
// entry, which is always the same dummy entry.
std::vector<uint32_t> SLocEntryOffsets;
+ uint64_t SLocEntryOffsetsBase = Stream.GetCurrentBitNo();
RecordData PreloadSLocs;
SLocEntryOffsets.reserve(SourceMgr.local_sloc_entry_size() - 1);
for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size();
@@ -1887,7 +1946,9 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
assert(&SourceMgr.getSLocEntry(FID) == SLoc);
// Record the offset of this source-location entry.
- SLocEntryOffsets.push_back(Stream.GetCurrentBitNo());
+ uint64_t Offset = Stream.GetCurrentBitNo() - SLocEntryOffsetsBase;
+ assert((Offset >> 32) == 0 && "SLocEntry offset too large");
+ SLocEntryOffsets.push_back(Offset);
// Figure out which record code to use.
unsigned Code;
@@ -1995,12 +2056,14 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Abbrev->Add(BitCodeAbbrevOp(SOURCE_LOCATION_OFFSETS));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // # of slocs
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // total size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // base offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // offsets
unsigned SLocOffsetsAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
{
RecordData::value_type Record[] = {
SOURCE_LOCATION_OFFSETS, SLocEntryOffsets.size(),
- SourceMgr.getNextLocalOffset() - 1 /* skip dummy */};
+ SourceMgr.getNextLocalOffset() - 1 /* skip dummy */,
+ SLocEntryOffsetsBase - SourceManagerBlockOffset};
Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record,
bytes(SLocEntryOffsets));
}
@@ -2077,9 +2140,11 @@ static bool shouldIgnoreMacro(MacroDirective *MD, bool IsModule,
/// Writes the block containing the serialized form of the
/// preprocessor.
void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
+ uint64_t MacroOffsetsBase = Stream.GetCurrentBitNo();
+
PreprocessingRecord *PPRec = PP.getPreprocessingRecord();
if (PPRec)
- WritePreprocessorDetail(*PPRec);
+ WritePreprocessorDetail(*PPRec, MacroOffsetsBase);
RecordData Record;
RecordData ModuleMacroRecord;
@@ -2140,7 +2205,8 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
// identifier they belong to.
for (const IdentifierInfo *Name : MacroIdentifiers) {
MacroDirective *MD = PP.getLocalMacroDirectiveHistory(Name);
- auto StartOffset = Stream.GetCurrentBitNo();
+ uint64_t StartOffset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
+ assert((StartOffset >> 32) == 0 && "Macro identifiers offset too large");
// Emit the macro directives in reverse source order.
for (; MD; MD = MD->getPrevious()) {
@@ -2213,14 +2279,12 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
// Record the local offset of this macro.
unsigned Index = ID - FirstMacroID;
- if (Index == MacroOffsets.size())
- MacroOffsets.push_back(Stream.GetCurrentBitNo());
- else {
- if (Index > MacroOffsets.size())
- MacroOffsets.resize(Index + 1);
+ if (Index >= MacroOffsets.size())
+ MacroOffsets.resize(Index + 1);
- MacroOffsets[Index] = Stream.GetCurrentBitNo();
- }
+ uint64_t Offset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
+ assert((Offset >> 32) == 0 && "Macro offset too large");
+ MacroOffsets[Index] = Offset;
AddIdentifierRef(Name, Record);
AddSourceLocation(MI->getDefinitionLoc(), Record);
@@ -2271,17 +2335,20 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
Abbrev->Add(BitCodeAbbrevOp(MACRO_OFFSET));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of macros
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // base offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned MacroOffsetAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
{
RecordData::value_type Record[] = {MACRO_OFFSET, MacroOffsets.size(),
- FirstMacroID - NUM_PREDEF_MACRO_IDS};
+ FirstMacroID - NUM_PREDEF_MACRO_IDS,
+ MacroOffsetsBase - ASTBlockStartOffset};
Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record, bytes(MacroOffsets));
}
}
-void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
+void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec,
+ uint64_t MacroOffsetsBase) {
if (PPRec.local_begin() == PPRec.local_end())
return;
@@ -2318,8 +2385,10 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
(void)++E, ++NumPreprocessingRecords, ++NextPreprocessorEntityID) {
Record.clear();
+ uint64_t Offset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
+ assert((Offset >> 32) == 0 && "Preprocessed entity offset too large");
PreprocessedEntityOffsets.push_back(
- PPEntityOffset((*E)->getSourceRange(), Stream.GetCurrentBitNo()));
+ PPEntityOffset((*E)->getSourceRange(), Offset));
if (auto *MD = dyn_cast<MacroDefinitionRecord>(*E)) {
// Record this macro definition's ID.
@@ -2787,15 +2856,15 @@ void ASTWriter::WriteType(QualType T) {
assert(Idx.getIndex() >= FirstTypeID && "Re-writing a type from a prior AST");
// Emit the type's representation.
- uint64_t Offset = ASTTypeWriter(*this).write(T);
+ uint64_t Offset = ASTTypeWriter(*this).write(T) - DeclTypesBlockStartOffset;
// Record the offset for this type.
unsigned Index = Idx.getIndex() - FirstTypeID;
if (TypeOffsets.size() == Index)
- TypeOffsets.push_back(Offset);
+ TypeOffsets.emplace_back(Offset);
else if (TypeOffsets.size() < Index) {
TypeOffsets.resize(Index + 1);
- TypeOffsets[Index] = Offset;
+ TypeOffsets[Index].setBitOffset(Offset);
} else {
llvm_unreachable("Types emitted in wrong order");
}
@@ -2862,8 +2931,10 @@ void ASTWriter::WriteTypeDeclOffsets() {
void ASTWriter::WriteFileDeclIDsMap() {
using namespace llvm;
- SmallVector<std::pair<FileID, DeclIDInFileInfo *>, 64> SortedFileDeclIDs(
- FileDeclIDs.begin(), FileDeclIDs.end());
+ SmallVector<std::pair<FileID, DeclIDInFileInfo *>, 64> SortedFileDeclIDs;
+ SortedFileDeclIDs.reserve(FileDeclIDs.size());
+ for (const auto &P : FileDeclIDs)
+ SortedFileDeclIDs.push_back(std::make_pair(P.first, P.second.get()));
llvm::sort(SortedFileDeclIDs, llvm::less_first());
// Join the vectors of DeclIDs from all files.
@@ -3889,8 +3960,8 @@ void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
}
/// Write an FP_PRAGMA_OPTIONS block for the given FPOptions.
-void ASTWriter::WriteFPPragmaOptions(const FPOptions &Opts) {
- RecordData::value_type Record[] = {Opts.getInt()};
+void ASTWriter::WriteFPPragmaOptions(const FPOptionsOverride &Opts) {
+ RecordData::value_type Record[] = {Opts.getAsOpaqueInt()};
Stream.EmitRecord(FP_PRAGMA_OPTIONS, Record);
}
@@ -4101,6 +4172,26 @@ void ASTWriter::WritePackPragmaOptions(Sema &SemaRef) {
Stream.EmitRecord(PACK_PRAGMA_OPTIONS, Record);
}
+/// Write the state of 'pragma float_control' at the end of the module.
+void ASTWriter::WriteFloatControlPragmaOptions(Sema &SemaRef) {
+ // Don't serialize pragma float_control state for modules,
+ // since it should only take effect on a per-submodule basis.
+ if (WritingModule)
+ return;
+
+ RecordData Record;
+ Record.push_back(SemaRef.FpPragmaStack.CurrentValue);
+ AddSourceLocation(SemaRef.FpPragmaStack.CurrentPragmaLocation, Record);
+ Record.push_back(SemaRef.FpPragmaStack.Stack.size());
+ for (const auto &StackEntry : SemaRef.FpPragmaStack.Stack) {
+ Record.push_back(StackEntry.Value);
+ AddSourceLocation(StackEntry.PragmaLocation, Record);
+ AddSourceLocation(StackEntry.PragmaPushLocation, Record);
+ AddString(StackEntry.StackSlotLabel, Record);
+ }
+ Stream.EmitRecord(FLOAT_CONTROL_PRAGMA_OPTIONS, Record);
+}
+
void ASTWriter::WriteModuleFileExtension(Sema &SemaRef,
ModuleFileExtensionWriter &Writer) {
// Enter the extension block.
@@ -4261,9 +4352,7 @@ ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream,
}
}
-ASTWriter::~ASTWriter() {
- llvm::DeleteContainerSeconds(FileDeclIDs);
-}
+ASTWriter::~ASTWriter() = default;
const LangOptions &ASTWriter::getLangOpts() const {
assert(WritingAST && "can't determine lang opts when not writing AST");
@@ -4356,6 +4445,8 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
RegisterPredefDecl(Context.VaListTagDecl, PREDEF_DECL_VA_LIST_TAG);
RegisterPredefDecl(Context.BuiltinMSVaListDecl,
PREDEF_DECL_BUILTIN_MS_VA_LIST_ID);
+ RegisterPredefDecl(Context.MSGuidTagDecl,
+ PREDEF_DECL_BUILTIN_MS_GUID_ID);
RegisterPredefDecl(Context.ExternCContext, PREDEF_DECL_EXTERN_C_CONTEXT_ID);
RegisterPredefDecl(Context.MakeIntegerSeqDecl,
PREDEF_DECL_MAKE_INTEGER_SEQ_ID);
@@ -4475,7 +4566,10 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteControlBlock(PP, Context, isysroot, OutputFile);
// Write the remaining AST contents.
+ Stream.FlushToWord();
+ ASTBlockRange.first = Stream.GetCurrentBitNo();
Stream.EnterSubblock(AST_BLOCK_ID, 5);
+ ASTBlockStartOffset = Stream.GetCurrentBitNo();
// This is so that older clang versions, before the introduction
// of the control block, can read and reject the newer PCH format.
@@ -4606,9 +4700,9 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// c++-base-specifiers-id:i32
// type-id:i32)
//
- // module-kind is the ModuleKind enum value. If it is MK_PrebuiltModule or
- // MK_ExplicitModule, then the module-name is the module name. Otherwise,
- // it is the module file name.
+ // module-kind is the ModuleKind enum value. If it is MK_PrebuiltModule,
+ // MK_ExplicitModule or MK_ImplicitModule, then the module-name is the
+ // module name. Otherwise, it is the module file name.
auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(MODULE_OFFSET_MAP));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
@@ -4621,10 +4715,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
endian::Writer LE(Out, little);
LE.write<uint8_t>(static_cast<uint8_t>(M.Kind));
- StringRef Name =
- M.Kind == MK_PrebuiltModule || M.Kind == MK_ExplicitModule
- ? M.ModuleName
- : M.FileName;
+ StringRef Name = M.isModule() ? M.ModuleName : M.FileName;
LE.write<uint16_t>(Name.size());
Out.write(Name.data(), Name.size());
@@ -4658,11 +4749,17 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
Buffer.data(), Buffer.size());
}
+ // Build a record containing all of the DeclsToCheckForDeferredDiags.
+ RecordData DeclsToCheckForDeferredDiags;
+ for (auto *D : SemaRef.DeclsToCheckForDeferredDiags)
+ AddDeclRef(D, DeclsToCheckForDeferredDiags);
+
RecordData DeclUpdatesOffsetsRecord;
// Keep writing types, declarations, and declaration update records
// until we've emitted all of them.
Stream.EnterSubblock(DECLTYPES_BLOCK_ID, /*bits for abbreviations*/5);
+ DeclTypesBlockStartOffset = Stream.GetCurrentBitNo();
WriteTypeAbbrevs();
WriteDeclAbbrevs();
do {
@@ -4693,7 +4790,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteReferencedSelectorsPool(SemaRef);
WriteLateParsedTemplates(SemaRef);
WriteIdentifierTable(PP, SemaRef.IdResolver, isModule);
- WriteFPPragmaOptions(SemaRef.getFPOptions());
+ WriteFPPragmaOptions(SemaRef.CurFPFeatureOverrides());
WriteOpenCLExtensions(SemaRef);
WriteOpenCLExtensionTypes(SemaRef);
WriteCUDAPragmas(SemaRef);
@@ -4749,6 +4846,11 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
if (!SemaDeclRefs.empty())
Stream.EmitRecord(SEMA_DECL_REFS, SemaDeclRefs);
+ // Write the record containing decls to be checked for deferred diags.
+ if (!DeclsToCheckForDeferredDiags.empty())
+ Stream.EmitRecord(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS,
+ DeclsToCheckForDeferredDiags);
+
// Write the record containing CUDA-specific declaration references.
if (!CUDASpecialDeclRefs.empty())
Stream.EmitRecord(CUDA_SPECIAL_DECL_REFS, CUDASpecialDeclRefs);
@@ -4819,12 +4921,15 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteMSPointersToMembersPragmaOptions(SemaRef);
}
WritePackPragmaOptions(SemaRef);
+ WriteFloatControlPragmaOptions(SemaRef);
// Some simple statistics
RecordData::value_type Record[] = {
NumStatements, NumMacros, NumLexicalDeclContexts, NumVisibleDeclContexts};
Stream.EmitRecord(STATISTICS, Record);
Stream.ExitBlock();
+ Stream.FlushToWord();
+ ASTBlockRange.second = Stream.GetCurrentBitNo();
// Write the module file extension blocks.
for (const auto &ExtWriter : ModuleFileExtensionWriters)
@@ -5116,7 +5221,7 @@ MacroID ASTWriter::getMacroID(MacroInfo *MI) {
return MacroIDs[MI];
}
-uint64_t ASTWriter::getMacroDirectivesOffset(const IdentifierInfo *Name) {
+uint32_t ASTWriter::getMacroDirectivesOffset(const IdentifierInfo *Name) {
return IdentMacroDirectivesOffsetMap.lookup(Name);
}
@@ -5318,9 +5423,9 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
return;
assert(SM.getSLocEntry(FID).isFile());
- DeclIDInFileInfo *&Info = FileDeclIDs[FID];
+ std::unique_ptr<DeclIDInFileInfo> &Info = FileDeclIDs[FID];
if (!Info)
- Info = new DeclIDInFileInfo();
+ Info = std::make_unique<DeclIDInFileInfo>();
std::pair<unsigned, serialization::DeclID> LocDecl(Offset, ID);
LocDeclIDsTy &Decls = Info->DeclIDs;
@@ -5952,7 +6057,7 @@ void ASTWriter::DeclarationMarkedOpenMPDeclareTarget(const Decl *D,
void ASTWriter::RedefinedHiddenDefinition(const NamedDecl *D, Module *M) {
if (Chain && Chain->isProcessingUpdateRecords()) return;
assert(!WritingAST && "Already writing the AST!");
- assert(D->isHidden() && "expected a hidden declaration");
+ assert(!D->isUnconditionallyVisible() && "expected a hidden declaration");
DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_EXPORTED, M));
}
@@ -6012,8 +6117,8 @@ class OMPClauseWriter : public OMPClauseVisitor<OMPClauseWriter> {
public:
OMPClauseWriter(ASTRecordWriter &Record) : Record(Record) {}
-#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S);
-#include "clang/Basic/OpenMPKinds.def"
+#define OMP_CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S);
+#include "llvm/Frontend/OpenMP/OMPKinds.def"
void writeClause(OMPClause *C);
void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
@@ -6026,7 +6131,7 @@ void ASTRecordWriter::writeOMPClause(OMPClause *C) {
}
void OMPClauseWriter::writeClause(OMPClause *C) {
- Record.push_back(C->getClauseKind());
+ Record.push_back(unsigned(C->getClauseKind()));
Visit(C);
Record.AddSourceLocation(C->getBeginLoc());
Record.AddSourceLocation(C->getEndLoc());
@@ -6083,8 +6188,13 @@ void OMPClauseWriter::VisitOMPCollapseClause(OMPCollapseClause *C) {
Record.AddSourceLocation(C->getLParenLoc());
}
+void OMPClauseWriter::VisitOMPDetachClause(OMPDetachClause *C) {
+ Record.AddStmt(C->getEventHandler());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
void OMPClauseWriter::VisitOMPDefaultClause(OMPDefaultClause *C) {
- Record.push_back(C->getDefaultKind());
+ Record.push_back(unsigned(C->getDefaultKind()));
Record.AddSourceLocation(C->getLParenLoc());
Record.AddSourceLocation(C->getDefaultKindKwLoc());
}
@@ -6128,18 +6238,35 @@ void OMPClauseWriter::VisitOMPReadClause(OMPReadClause *) {}
void OMPClauseWriter::VisitOMPWriteClause(OMPWriteClause *) {}
-void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *) {}
+void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *C) {
+ Record.push_back(C->isExtended() ? 1 : 0);
+ if (C->isExtended()) {
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getArgumentLoc());
+ Record.writeEnum(C->getDependencyKind());
+ }
+}
void OMPClauseWriter::VisitOMPCaptureClause(OMPCaptureClause *) {}
void OMPClauseWriter::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+void OMPClauseWriter::VisitOMPAcqRelClause(OMPAcqRelClause *) {}
+
+void OMPClauseWriter::VisitOMPAcquireClause(OMPAcquireClause *) {}
+
+void OMPClauseWriter::VisitOMPReleaseClause(OMPReleaseClause *) {}
+
+void OMPClauseWriter::VisitOMPRelaxedClause(OMPRelaxedClause *) {}
+
void OMPClauseWriter::VisitOMPThreadsClause(OMPThreadsClause *) {}
void OMPClauseWriter::VisitOMPSIMDClause(OMPSIMDClause *) {}
void OMPClauseWriter::VisitOMPNogroupClause(OMPNogroupClause *) {}
+void OMPClauseWriter::VisitOMPDestroyClause(OMPDestroyClause *) {}
+
void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
Record.push_back(C->varlist_size());
Record.AddSourceLocation(C->getLParenLoc());
@@ -6194,8 +6321,10 @@ void OMPClauseWriter::VisitOMPSharedClause(OMPSharedClause *C) {
void OMPClauseWriter::VisitOMPReductionClause(OMPReductionClause *C) {
Record.push_back(C->varlist_size());
+ Record.writeEnum(C->getModifier());
VisitOMPClauseWithPostUpdate(C);
Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getModifierLoc());
Record.AddSourceLocation(C->getColonLoc());
Record.AddNestedNameSpecifierLoc(C->getQualifierLoc());
Record.AddDeclarationNameInfo(C->getNameInfo());
@@ -6209,6 +6338,14 @@ void OMPClauseWriter::VisitOMPReductionClause(OMPReductionClause *C) {
Record.AddStmt(E);
for (auto *E : C->reduction_ops())
Record.AddStmt(E);
+ if (C->getModifier() == clang::OMPC_REDUCTION_inscan) {
+ for (auto *E : C->copy_ops())
+ Record.AddStmt(E);
+ for (auto *E : C->copy_array_temps())
+ Record.AddStmt(E);
+ for (auto *E : C->copy_array_elems())
+ Record.AddStmt(E);
+ }
}
void OMPClauseWriter::VisitOMPTaskReductionClause(OMPTaskReductionClause *C) {
@@ -6321,10 +6458,16 @@ void OMPClauseWriter::VisitOMPFlushClause(OMPFlushClause *C) {
Record.AddStmt(VE);
}
+void OMPClauseWriter::VisitOMPDepobjClause(OMPDepobjClause *C) {
+ Record.AddStmt(C->getDepobj());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
Record.push_back(C->varlist_size());
Record.push_back(C->getNumLoops());
Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddStmt(C->getModifier());
Record.push_back(C->getDependencyKind());
Record.AddSourceLocation(C->getDependencyLoc());
Record.AddSourceLocation(C->getColonLoc());
@@ -6336,7 +6479,9 @@ void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
void OMPClauseWriter::VisitOMPDeviceClause(OMPDeviceClause *C) {
VisitOMPClauseWithPreInit(C);
+ Record.writeEnum(C->getModifier());
Record.AddStmt(C->getDevice());
+ Record.AddSourceLocation(C->getModifierLoc());
Record.AddSourceLocation(C->getLParenLoc());
}
@@ -6346,7 +6491,7 @@ void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
Record.push_back(C->getTotalComponentListNum());
Record.push_back(C->getTotalComponentsNum());
Record.AddSourceLocation(C->getLParenLoc());
- for (unsigned I = 0; I < OMPMapClause::NumberOfModifiers; ++I) {
+ for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) {
Record.push_back(C->getMapTypeModifier(I));
Record.AddSourceLocation(C->getMapTypeModifierLoc(I));
}
@@ -6504,6 +6649,26 @@ void OMPClauseWriter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
}
}
+void OMPClauseWriter::VisitOMPUseDeviceAddrClause(OMPUseDeviceAddrClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
+ }
+}
+
void OMPClauseWriter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
Record.push_back(C->varlist_size());
Record.push_back(C->getUniqueDeclarationsNum());
@@ -6550,3 +6715,61 @@ void OMPClauseWriter::VisitOMPNontemporalClause(OMPNontemporalClause *C) {
for (auto *E : C->private_refs())
Record.AddStmt(E);
}
+
+void OMPClauseWriter::VisitOMPInclusiveClause(OMPInclusiveClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPExclusiveClause(OMPExclusiveClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPOrderClause(OMPOrderClause *C) {
+ Record.writeEnum(C->getKind());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getKindKwLoc());
+}
+
+void OMPClauseWriter::VisitOMPUsesAllocatorsClause(OMPUsesAllocatorsClause *C) {
+ Record.push_back(C->getNumberOfAllocators());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data Data = C->getAllocatorData(I);
+ Record.AddStmt(Data.Allocator);
+ Record.AddStmt(Data.AllocatorTraits);
+ Record.AddSourceLocation(Data.LParenLoc);
+ Record.AddSourceLocation(Data.RParenLoc);
+ }
+}
+
+void OMPClauseWriter::VisitOMPAffinityClause(OMPAffinityClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddStmt(C->getModifier());
+ Record.AddSourceLocation(C->getColonLoc());
+ for (Expr *E : C->varlists())
+ Record.AddStmt(E);
+}
+
+void ASTRecordWriter::writeOMPTraitInfo(const OMPTraitInfo *TI) {
+ writeUInt32(TI->Sets.size());
+ for (const auto &Set : TI->Sets) {
+ writeEnum(Set.Kind);
+ writeUInt32(Set.Selectors.size());
+ for (const auto &Selector : Set.Selectors) {
+ writeEnum(Selector.Kind);
+ writeBool(Selector.ScoreOrCondition);
+ if (Selector.ScoreOrCondition)
+ writeExprRef(Selector.ScoreOrCondition);
+ writeUInt32(Selector.Properties.size());
+ for (const auto &Property : Selector.Properties)
+ writeEnum(Property.Kind);
+ }
+ }
+}
diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp
index b2a8c118d401..eecdf89c791a 100644
--- a/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -95,6 +95,7 @@ namespace clang {
void VisitCXXConversionDecl(CXXConversionDecl *D);
void VisitFieldDecl(FieldDecl *D);
void VisitMSPropertyDecl(MSPropertyDecl *D);
+ void VisitMSGuidDecl(MSGuidDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *D);
void VisitVarDecl(VarDecl *D);
void VisitImplicitParamDecl(ImplicitParamDecl *D);
@@ -104,6 +105,7 @@ namespace clang {
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
void VisitTemplateDecl(TemplateDecl *D);
void VisitConceptDecl(ConceptDecl *D);
+ void VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D);
void VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
void VisitVarTemplateDecl(VarTemplateDecl *D);
@@ -952,6 +954,17 @@ void ASTDeclWriter::VisitMSPropertyDecl(MSPropertyDecl *D) {
Code = serialization::DECL_MS_PROPERTY;
}
+void ASTDeclWriter::VisitMSGuidDecl(MSGuidDecl *D) {
+ VisitValueDecl(D);
+ MSGuidDecl::Parts Parts = D->getParts();
+ Record.push_back(Parts.Part1);
+ Record.push_back(Parts.Part2);
+ Record.push_back(Parts.Part3);
+ for (auto C : Parts.Part4And5)
+ Record.push_back(C);
+ Code = serialization::DECL_MS_GUID;
+}
+
void ASTDeclWriter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
VisitValueDecl(D);
Record.push_back(D->getChainingSize());
@@ -1010,16 +1023,15 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
if (D->getStorageDuration() == SD_Static) {
bool ModulesCodegen = false;
- if (!D->getDescribedVarTemplate() && !D->getMemberSpecializationInfo() &&
+ if (Writer.WritingModule &&
+ !D->getDescribedVarTemplate() && !D->getMemberSpecializationInfo() &&
!isa<VarTemplateSpecializationDecl>(D)) {
// When building a C++ Modules TS module interface unit, a strong
// definition in the module interface is provided by the compilation of
// that module interface unit, not by its users. (Inline variables are
// still emitted in module users.)
ModulesCodegen =
- (((Writer.WritingModule &&
- Writer.WritingModule->Kind == Module::ModuleInterfaceUnit) ||
- Writer.Context->getLangOpts().BuildingPCHWithObjectFile) &&
+ (Writer.WritingModule->Kind == Module::ModuleInterfaceUnit &&
Writer.Context->GetGVALinkageForVariable(D) == GVA_StrongExternal);
}
Record.push_back(ModulesCodegen);
@@ -1088,8 +1100,6 @@ void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
Record.AddStmt(D->getUninstantiatedDefaultArg());
Code = serialization::DECL_PARM_VAR;
- assert(!D->isARCPseudoStrong()); // can be true of ImplicitParamDecl
-
// If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here
// we dynamically check for the properties that we optimize for, but don't
// know are true of all PARM_VAR_DECLs.
@@ -1481,6 +1491,10 @@ void ASTDeclWriter::VisitConceptDecl(ConceptDecl *D) {
Code = serialization::DECL_CONCEPT;
}
+void ASTDeclWriter::VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D) {
+ Code = serialization::DECL_REQUIRES_EXPR_BODY;
+}
+
void ASTDeclWriter::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
VisitRedeclarable(D);
@@ -1670,6 +1684,8 @@ void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
// For an expanded parameter pack, record the number of expansion types here
// so that it's easier for deserialization to allocate the right amount of
// memory.
+ Expr *TypeConstraint = D->getPlaceholderTypeConstraint();
+ Record.push_back(!!TypeConstraint);
if (D->isExpandedParameterPack())
Record.push_back(D->getNumExpansionTypes());
@@ -1677,6 +1693,8 @@ void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
// TemplateParmPosition.
Record.push_back(D->getDepth());
Record.push_back(D->getPosition());
+ if (TypeConstraint)
+ Record.AddStmt(TypeConstraint);
if (D->isExpandedParameterPack()) {
for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
@@ -2101,7 +2119,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // SClass
Abv->Add(BitCodeAbbrevOp(0)); // TSCSpec
Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
- Abv->Add(BitCodeAbbrevOp(0)); // ARCPseudoStrong
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
Abv->Add(BitCodeAbbrevOp(0)); // Linkage
Abv->Add(BitCodeAbbrevOp(0)); // HasInit
Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
@@ -2266,13 +2284,13 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_DECL_REF));
//Stmt
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
//DeclRefExpr
@@ -2290,13 +2308,13 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_INTEGER_LITERAL));
//Stmt
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
//Integer Literal
@@ -2309,13 +2327,13 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_CHARACTER_LITERAL));
//Stmt
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
//Character Literal
@@ -2328,13 +2346,13 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_IMPLICIT_CAST));
// Stmt
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsOMPStructuredBlock
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
// CastExpr
@@ -2414,12 +2432,12 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
SourceLocation Loc = D->getLocation();
unsigned Index = ID - FirstDeclID;
if (DeclOffsets.size() == Index)
- DeclOffsets.push_back(DeclOffset(Loc, Offset));
+ DeclOffsets.emplace_back(Loc, Offset, DeclTypesBlockStartOffset);
else if (DeclOffsets.size() < Index) {
// FIXME: Can/should this happen?
DeclOffsets.resize(Index+1);
DeclOffsets[Index].setLocation(Loc);
- DeclOffsets[Index].BitOffset = Offset;
+ DeclOffsets[Index].setBitOffset(Offset, DeclTypesBlockStartOffset);
} else {
llvm_unreachable("declarations should be emitted in ID order");
}
@@ -2442,9 +2460,8 @@ void ASTRecordWriter::AddFunctionDefinition(const FunctionDecl *FD) {
bool ModulesCodegen = false;
if (!FD->isDependentContext()) {
Optional<GVALinkage> Linkage;
- if ((Writer->WritingModule &&
- Writer->WritingModule->Kind == Module::ModuleInterfaceUnit) ||
- Writer->Context->getLangOpts().BuildingPCHWithObjectFile) {
+ if (Writer->WritingModule &&
+ Writer->WritingModule->Kind == Module::ModuleInterfaceUnit) {
// When building a C++ Modules TS module interface unit, a strong
// definition in the module interface is provided by the compilation of
// that module interface unit, not by its users. (Inline functions are
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index 9231f3b2b9ba..0767b3a24bf2 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -11,7 +11,9 @@
///
//===----------------------------------------------------------------------===//
+#include "clang/AST/ExprOpenMP.h"
#include "clang/Serialization/ASTRecordWriter.h"
+#include "clang/Sema/DeclSpec.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -67,7 +69,6 @@ void ASTStmtWriter::AddTemplateKWAndArgsInfo(
}
void ASTStmtWriter::VisitStmt(Stmt *S) {
- Record.push_back(S->StmtBits.IsOMPStructuredBlock);
}
void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
@@ -193,6 +194,8 @@ void ASTStmtWriter::VisitWhileStmt(WhileStmt *S) {
Record.AddDeclRef(S->getConditionVariable());
Record.AddSourceLocation(S->getWhileLoc());
+ Record.AddSourceLocation(S->getLParenLoc());
+ Record.AddSourceLocation(S->getRParenLoc());
Code = serialization::STMT_WHILE;
}
@@ -388,19 +391,9 @@ void ASTStmtWriter::VisitDependentCoawaitExpr(DependentCoawaitExpr *E) {
Code = serialization::EXPR_DEPENDENT_COAWAIT;
}
-void ASTStmtWriter::VisitConceptSpecializationExpr(
- ConceptSpecializationExpr *E) {
- VisitExpr(E);
- ArrayRef<TemplateArgument> TemplateArgs = E->getTemplateArguments();
- Record.push_back(TemplateArgs.size());
- Record.AddNestedNameSpecifierLoc(E->getNestedNameSpecifierLoc());
- Record.AddSourceLocation(E->getTemplateKWLoc());
- Record.AddDeclarationNameInfo(E->getConceptNameInfo());
- Record.AddDeclRef(E->getNamedConcept());
- Record.AddASTTemplateArgumentListInfo(E->getTemplateArgsAsWritten());
- for (const TemplateArgument &Arg : TemplateArgs)
- Record.AddTemplateArgument(Arg);
- const ASTConstraintSatisfaction &Satisfaction = E->getSatisfaction();
+static void
+addConstraintSatisfaction(ASTRecordWriter &Record,
+ const ASTConstraintSatisfaction &Satisfaction) {
Record.push_back(Satisfaction.IsSatisfied);
if (!Satisfaction.IsSatisfied) {
Record.push_back(Satisfaction.NumRecords);
@@ -418,10 +411,99 @@ void ASTStmtWriter::VisitConceptSpecializationExpr(
}
}
}
+}
+
+static void
+addSubstitutionDiagnostic(
+ ASTRecordWriter &Record,
+ const concepts::Requirement::SubstitutionDiagnostic *D) {
+ Record.AddString(D->SubstitutedEntity);
+ Record.AddSourceLocation(D->DiagLoc);
+ Record.AddString(D->DiagMessage);
+}
+
+void ASTStmtWriter::VisitConceptSpecializationExpr(
+ ConceptSpecializationExpr *E) {
+ VisitExpr(E);
+ ArrayRef<TemplateArgument> TemplateArgs = E->getTemplateArguments();
+ Record.push_back(TemplateArgs.size());
+ Record.AddNestedNameSpecifierLoc(E->getNestedNameSpecifierLoc());
+ Record.AddSourceLocation(E->getTemplateKWLoc());
+ Record.AddDeclarationNameInfo(E->getConceptNameInfo());
+ Record.AddDeclRef(E->getNamedConcept());
+ Record.AddDeclRef(E->getFoundDecl());
+ Record.AddASTTemplateArgumentListInfo(E->getTemplateArgsAsWritten());
+ for (const TemplateArgument &Arg : TemplateArgs)
+ Record.AddTemplateArgument(Arg);
+ if (!E->isValueDependent())
+ addConstraintSatisfaction(Record, E->getSatisfaction());
Code = serialization::EXPR_CONCEPT_SPECIALIZATION;
}
+void ASTStmtWriter::VisitRequiresExpr(RequiresExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getLocalParameters().size());
+ Record.push_back(E->getRequirements().size());
+ Record.AddSourceLocation(E->RequiresExprBits.RequiresKWLoc);
+ Record.push_back(E->RequiresExprBits.IsSatisfied);
+ Record.AddDeclRef(E->getBody());
+ for (ParmVarDecl *P : E->getLocalParameters())
+ Record.AddDeclRef(P);
+ for (concepts::Requirement *R : E->getRequirements()) {
+ if (auto *TypeReq = dyn_cast<concepts::TypeRequirement>(R)) {
+ Record.push_back(concepts::Requirement::RK_Type);
+ Record.push_back(TypeReq->Status);
+ if (TypeReq->Status == concepts::TypeRequirement::SS_SubstitutionFailure)
+ addSubstitutionDiagnostic(Record, TypeReq->getSubstitutionDiagnostic());
+ else
+ Record.AddTypeSourceInfo(TypeReq->getType());
+ } else if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(R)) {
+ Record.push_back(ExprReq->getKind());
+ Record.push_back(ExprReq->Status);
+ if (ExprReq->isExprSubstitutionFailure()) {
+ addSubstitutionDiagnostic(Record,
+ ExprReq->Value.get<concepts::Requirement::SubstitutionDiagnostic *>());
+ } else
+ Record.AddStmt(ExprReq->Value.get<Expr *>());
+ if (ExprReq->getKind() == concepts::Requirement::RK_Compound) {
+ Record.AddSourceLocation(ExprReq->NoexceptLoc);
+ const auto &RetReq = ExprReq->getReturnTypeRequirement();
+ if (RetReq.isSubstitutionFailure()) {
+ Record.push_back(2);
+ addSubstitutionDiagnostic(Record, RetReq.getSubstitutionDiagnostic());
+ } else if (RetReq.isTypeConstraint()) {
+ Record.push_back(1);
+ Record.AddTemplateParameterList(
+ RetReq.getTypeConstraintTemplateParameterList());
+ if (ExprReq->Status >=
+ concepts::ExprRequirement::SS_ConstraintsNotSatisfied)
+ Record.AddStmt(
+ ExprReq->getReturnTypeRequirementSubstitutedConstraintExpr());
+ } else {
+ assert(RetReq.isEmpty());
+ Record.push_back(0);
+ }
+ }
+ } else {
+ auto *NestedReq = cast<concepts::NestedRequirement>(R);
+ Record.push_back(concepts::Requirement::RK_Nested);
+ Record.push_back(NestedReq->isSubstitutionFailure());
+ if (NestedReq->isSubstitutionFailure()){
+ addSubstitutionDiagnostic(Record,
+ NestedReq->getSubstitutionDiagnostic());
+ } else {
+ Record.AddStmt(NestedReq->Value.get<Expr *>());
+ if (!NestedReq->isDependent())
+ addConstraintSatisfaction(Record, *NestedReq->Satisfaction);
+ }
+ }
+ }
+ Record.AddSourceLocation(E->getEndLoc());
+
+ Code = serialization::EXPR_REQUIRES;
+}
+
void ASTStmtWriter::VisitCapturedStmt(CapturedStmt *S) {
VisitStmt(S);
@@ -461,22 +543,34 @@ void ASTStmtWriter::VisitExpr(Expr *E) {
Record.push_back(E->isValueDependent());
Record.push_back(E->isInstantiationDependent());
Record.push_back(E->containsUnexpandedParameterPack());
+ Record.push_back(E->containsErrors());
Record.push_back(E->getValueKind());
Record.push_back(E->getObjectKind());
}
void ASTStmtWriter::VisitConstantExpr(ConstantExpr *E) {
VisitExpr(E);
- Record.push_back(static_cast<uint64_t>(E->ConstantExprBits.ResultKind));
+ Record.push_back(E->ConstantExprBits.ResultKind);
+
+ Record.push_back(E->ConstantExprBits.APValueKind);
+ Record.push_back(E->ConstantExprBits.IsUnsigned);
+ Record.push_back(E->ConstantExprBits.BitWidth);
+ // HasCleanup not serialized since we can just query the APValue.
+ Record.push_back(E->ConstantExprBits.IsImmediateInvocation);
+
switch (E->ConstantExprBits.ResultKind) {
+ case ConstantExpr::RSK_None:
+ break;
case ConstantExpr::RSK_Int64:
Record.push_back(E->Int64Result());
- Record.push_back(E->ConstantExprBits.IsUnsigned |
- E->ConstantExprBits.BitWidth << 1);
break;
case ConstantExpr::RSK_APValue:
Record.AddAPValue(E->APValueResult());
+ break;
+ default:
+ llvm_unreachable("unexpected ResultKind!");
}
+
Record.AddStmt(E->getSubExpr());
Code = serialization::EXPR_CONSTANT;
}
@@ -548,8 +642,9 @@ void ASTStmtWriter::VisitIntegerLiteral(IntegerLiteral *E) {
void ASTStmtWriter::VisitFixedPointLiteral(FixedPointLiteral *E) {
VisitExpr(E);
Record.AddSourceLocation(E->getLocation());
+ Record.push_back(E->getScale());
Record.AddAPInt(E->getValue());
- Code = serialization::EXPR_INTEGER_LITERAL;
+ Code = serialization::EXPR_FIXEDPOINT_LITERAL;
}
void ASTStmtWriter::VisitFloatingLiteral(FloatingLiteral *E) {
@@ -620,10 +715,16 @@ void ASTStmtWriter::VisitParenListExpr(ParenListExpr *E) {
void ASTStmtWriter::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
+ bool HasFPFeatures = E->hasStoredFPFeatures();
+ // Write this first for easy access when deserializing, as they affect the
+ // size of the UnaryOperator.
+ Record.push_back(HasFPFeatures);
Record.AddStmt(E->getSubExpr());
Record.push_back(E->getOpcode()); // FIXME: stable encoding
Record.AddSourceLocation(E->getOperatorLoc());
Record.push_back(E->canOverflow());
+ if (HasFPFeatures)
+ Record.push_back(E->getStoredFPFeatures().getAsOpaqueInt());
Code = serialization::EXPR_UNARY_OPERATOR;
}
@@ -684,16 +785,66 @@ void ASTStmtWriter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
Code = serialization::EXPR_ARRAY_SUBSCRIPT;
}
+void ASTStmtWriter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
+ VisitExpr(E);
+ Record.AddStmt(E->getBase());
+ Record.AddStmt(E->getRowIdx());
+ Record.AddStmt(E->getColumnIdx());
+ Record.AddSourceLocation(E->getRBracketLoc());
+ Code = serialization::EXPR_ARRAY_SUBSCRIPT;
+}
+
void ASTStmtWriter::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
VisitExpr(E);
Record.AddStmt(E->getBase());
Record.AddStmt(E->getLowerBound());
Record.AddStmt(E->getLength());
- Record.AddSourceLocation(E->getColonLoc());
+ Record.AddStmt(E->getStride());
+ Record.AddSourceLocation(E->getColonLocFirst());
+ Record.AddSourceLocation(E->getColonLocSecond());
Record.AddSourceLocation(E->getRBracketLoc());
Code = serialization::EXPR_OMP_ARRAY_SECTION;
}
+void ASTStmtWriter::VisitOMPArrayShapingExpr(OMPArrayShapingExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getDimensions().size());
+ Record.AddStmt(E->getBase());
+ for (Expr *Dim : E->getDimensions())
+ Record.AddStmt(Dim);
+ for (SourceRange SR : E->getBracketsRanges())
+ Record.AddSourceRange(SR);
+ Record.AddSourceLocation(E->getLParenLoc());
+ Record.AddSourceLocation(E->getRParenLoc());
+ Code = serialization::EXPR_OMP_ARRAY_SHAPING;
+}
+
+void ASTStmtWriter::VisitOMPIteratorExpr(OMPIteratorExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->numOfIterators());
+ Record.AddSourceLocation(E->getIteratorKwLoc());
+ Record.AddSourceLocation(E->getLParenLoc());
+ Record.AddSourceLocation(E->getRParenLoc());
+ for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
+ Record.AddDeclRef(E->getIteratorDecl(I));
+ Record.AddSourceLocation(E->getAssignLoc(I));
+ OMPIteratorExpr::IteratorRange Range = E->getIteratorRange(I);
+ Record.AddStmt(Range.Begin);
+ Record.AddStmt(Range.End);
+ Record.AddStmt(Range.Step);
+ Record.AddSourceLocation(E->getColonLoc(I));
+ if (Range.Step)
+ Record.AddSourceLocation(E->getSecondColonLoc(I));
+ // Serialize helpers
+ OMPIteratorHelperData &HD = E->getHelper(I);
+ Record.AddDeclRef(HD.CounterVD);
+ Record.AddStmt(HD.Upper);
+ Record.AddStmt(HD.Update);
+ Record.AddStmt(HD.CounterUpdate);
+ }
+ Code = serialization::EXPR_OMP_ITERATOR;
+}
+
void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumArgs());
@@ -706,6 +857,16 @@ void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
Code = serialization::EXPR_CALL;
}
+void ASTStmtWriter::VisitRecoveryExpr(RecoveryExpr *E) {
+ VisitExpr(E);
+ Record.push_back(std::distance(E->children().begin(), E->children().end()));
+ Record.AddSourceLocation(E->getBeginLoc());
+ Record.AddSourceLocation(E->getEndLoc());
+ for (Stmt *Child : E->children())
+ Record.AddStmt(Child);
+ Code = serialization::EXPR_RECOVERY;
+}
+
void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
VisitExpr(E);
@@ -788,11 +949,16 @@ void ASTStmtWriter::VisitCastExpr(CastExpr *E) {
void ASTStmtWriter::VisitBinaryOperator(BinaryOperator *E) {
VisitExpr(E);
+ bool HasFPFeatures = E->hasStoredFPFeatures();
+ // Write this first for easy access when deserializing, as they affect the
+ // size of the UnaryOperator.
+ Record.push_back(HasFPFeatures);
+ Record.push_back(E->getOpcode()); // FIXME: stable encoding
Record.AddStmt(E->getLHS());
Record.AddStmt(E->getRHS());
- Record.push_back(E->getOpcode()); // FIXME: stable encoding
Record.AddSourceLocation(E->getOperatorLoc());
- Record.push_back(E->getFPFeatures().getInt());
+ if (HasFPFeatures)
+ Record.push_back(E->getStoredFPFeatures().getAsOpaqueInt());
Code = serialization::EXPR_BINARY_OPERATOR;
}
@@ -989,6 +1155,7 @@ void ASTStmtWriter::VisitStmtExpr(StmtExpr *E) {
Record.AddStmt(E->getSubStmt());
Record.AddSourceLocation(E->getLParenLoc());
Record.AddSourceLocation(E->getRParenLoc());
+ Record.push_back(E->getTemplateDepth());
Code = serialization::EXPR_STMT;
}
@@ -1382,8 +1549,8 @@ void ASTStmtWriter::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
Record.push_back(E->getOperator());
- Record.push_back(E->getFPFeatures().getInt());
Record.AddSourceRange(E->Range);
+ Record.push_back(E->getFPFeatures().getAsOpaqueInt());
Code = serialization::EXPR_CXX_OPERATOR_CALL;
}
@@ -1437,12 +1604,12 @@ void ASTStmtWriter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
void ASTStmtWriter::VisitLambdaExpr(LambdaExpr *E) {
VisitExpr(E);
- Record.push_back(E->NumCaptures);
+ Record.push_back(E->LambdaExprBits.NumCaptures);
Record.AddSourceRange(E->IntroducerRange);
- Record.push_back(E->CaptureDefault); // FIXME: stable encoding
+ Record.push_back(E->LambdaExprBits.CaptureDefault); // FIXME: stable encoding
Record.AddSourceLocation(E->CaptureDefaultLoc);
- Record.push_back(E->ExplicitParams);
- Record.push_back(E->ExplicitResultType);
+ Record.push_back(E->LambdaExprBits.ExplicitParams);
+ Record.push_back(E->LambdaExprBits.ExplicitResultType);
Record.AddSourceLocation(E->ClosingBrace);
// Add capture initializers.
@@ -1452,6 +1619,9 @@ void ASTStmtWriter::VisitLambdaExpr(LambdaExpr *E) {
Record.AddStmt(*C);
}
+ // Don't serialize the body. It belongs to the call operator declaration.
+ // LambdaExpr only stores a copy of the Stmt *.
+
Code = serialization::EXPR_LAMBDA;
}
@@ -1487,6 +1657,11 @@ void ASTStmtWriter::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
Code = serialization::EXPR_CXX_CONST_CAST;
}
+void ASTStmtWriter::VisitCXXAddrspaceCastExpr(CXXAddrspaceCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_ADDRSPACE_CAST;
+}
+
void ASTStmtWriter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
VisitExplicitCastExpr(E);
Record.AddSourceLocation(E->getLParenLoc());
@@ -1498,6 +1673,7 @@ void ASTStmtWriter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
VisitExplicitCastExpr(E);
Record.AddSourceLocation(E->getBeginLoc());
Record.AddSourceLocation(E->getEndLoc());
+ Code = serialization::EXPR_BUILTIN_BIT_CAST;
}
void ASTStmtWriter::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
@@ -1641,8 +1817,15 @@ void ASTStmtWriter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
void ASTStmtWriter::VisitExprWithCleanups(ExprWithCleanups *E) {
VisitExpr(E);
Record.push_back(E->getNumObjects());
- for (unsigned i = 0, e = E->getNumObjects(); i != e; ++i)
- Record.AddDeclRef(E->getObject(i));
+ for (auto &Obj : E->getObjects()) {
+ if (auto *BD = Obj.dyn_cast<BlockDecl *>()) {
+ Record.push_back(serialization::COK_Block);
+ Record.AddDeclRef(BD);
+ } else if (auto *CLE = Obj.dyn_cast<CompoundLiteralExpr *>()) {
+ Record.push_back(serialization::COK_CompoundLiteral);
+ Record.AddStmt(CLE);
+ }
+ }
Record.push_back(E->cleanupsHaveSideEffects());
Record.AddStmt(E->getSubExpr());
@@ -1931,7 +2114,7 @@ void ASTStmtWriter::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *E) {
void ASTStmtWriter::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
VisitExpr(E);
Record.AddSourceRange(E->getSourceRange());
- Record.AddString(E->getUuidStr());
+ Record.AddDeclRef(E->getGuidDecl());
if (E->isTypeOperand()) {
Record.AddTypeSourceInfo(E->getTypeOperandSourceInfo());
Code = serialization::EXPR_CXX_UUIDOF_TYPE;
@@ -2051,6 +2234,7 @@ void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_DIRECTIVE;
}
@@ -2062,6 +2246,7 @@ void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_FOR_DIRECTIVE;
}
@@ -2075,6 +2260,7 @@ void ASTStmtWriter::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_SECTIONS_DIRECTIVE;
}
@@ -2109,6 +2295,7 @@ void ASTStmtWriter::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
void ASTStmtWriter::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_FOR_DIRECTIVE;
}
@@ -2124,6 +2311,7 @@ void ASTStmtWriter::VisitOMPParallelMasterDirective(
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Code = serialization::STMT_OMP_PARALLEL_MASTER_DIRECTIVE;
}
@@ -2132,6 +2320,7 @@ void ASTStmtWriter::VisitOMPParallelSectionsDirective(
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE;
}
@@ -2192,12 +2381,15 @@ void ASTStmtWriter::VisitOMPTargetParallelDirective(
VisitStmt(D);
Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
+ Record.writeBool(D->hasCancel());
Code = serialization::STMT_OMP_TARGET_PARALLEL_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPTargetParallelForDirective(
OMPTargetParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_TARGET_PARALLEL_FOR_DIRECTIVE;
}
@@ -2235,6 +2427,20 @@ void ASTStmtWriter::VisitOMPFlushDirective(OMPFlushDirective *D) {
Code = serialization::STMT_OMP_FLUSH_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPDepobjDirective(OMPDepobjDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_DEPOBJ_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPScanDirective(OMPScanDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_SCAN_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
VisitStmt(D);
Record.push_back(D->getNumClauses());
@@ -2267,6 +2473,7 @@ void ASTStmtWriter::VisitOMPCancelDirective(OMPCancelDirective *D) {
void ASTStmtWriter::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_TASKLOOP_DIRECTIVE;
}
@@ -2278,6 +2485,7 @@ void ASTStmtWriter::VisitOMPTaskLoopSimdDirective(OMPTaskLoopSimdDirective *D) {
void ASTStmtWriter::VisitOMPMasterTaskLoopDirective(
OMPMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_MASTER_TASKLOOP_DIRECTIVE;
}
@@ -2290,6 +2498,7 @@ void ASTStmtWriter::VisitOMPMasterTaskLoopSimdDirective(
void ASTStmtWriter::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
+ Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE;
}
@@ -2314,6 +2523,7 @@ void ASTStmtWriter::VisitOMPTargetUpdateDirective(OMPTargetUpdateDirective *D) {
void ASTStmtWriter::VisitOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
@@ -2362,6 +2572,7 @@ void ASTStmtWriter::VisitOMPTeamsDistributeParallelForSimdDirective(
void ASTStmtWriter::VisitOMPTeamsDistributeParallelForDirective(
OMPTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
@@ -2382,6 +2593,7 @@ void ASTStmtWriter::VisitOMPTargetTeamsDistributeDirective(
void ASTStmtWriter::VisitOMPTargetTeamsDistributeParallelForDirective(
OMPTargetTeamsDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
+ Record.AddStmt(D->getTaskReductionRefExpr());
Record.push_back(D->hasCancel() ? 1 : 0);
Code = serialization::STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
}
diff --git a/clang/lib/Serialization/GeneratePCH.cpp b/clang/lib/Serialization/GeneratePCH.cpp
index 002233e49bb0..d869796b82c1 100644
--- a/clang/lib/Serialization/GeneratePCH.cpp
+++ b/clang/lib/Serialization/GeneratePCH.cpp
@@ -57,6 +57,11 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
}
}
+ // Errors that do not prevent the PCH from being written should not cause the
+ // overall compilation to fail either.
+ if (AllowASTWithErrors)
+ PP.getDiagnostics().getClient()->clear();
+
// Emit the PCH file to the Buffer.
assert(SemaPtr && "No Sema?");
Buffer->Signature =
diff --git a/clang/lib/Serialization/GlobalModuleIndex.cpp b/clang/lib/Serialization/GlobalModuleIndex.cpp
index 462d29c2a0f1..9192b3b476bb 100644
--- a/clang/lib/Serialization/GlobalModuleIndex.cpp
+++ b/clang/lib/Serialization/GlobalModuleIndex.cpp
@@ -321,7 +321,7 @@ bool GlobalModuleIndex::lookupIdentifier(StringRef Name, HitSet &Hits) {
= *static_cast<IdentifierIndexTable *>(IdentifierIndex);
IdentifierIndexTable::iterator Known = Table.find(Name);
if (Known == Table.end()) {
- return true;
+ return false;
}
SmallVector<unsigned, 2> ModuleIDs = *Known;
@@ -643,10 +643,10 @@ llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Skip the stored signature.
// FIXME: we could read the signature out of the import and validate it.
- ASTFileSignature StoredSignature = {
- {{(uint32_t)Record[Idx++], (uint32_t)Record[Idx++],
- (uint32_t)Record[Idx++], (uint32_t)Record[Idx++],
- (uint32_t)Record[Idx++]}}};
+ auto FirstSignatureByte = Record.begin() + Idx;
+ ASTFileSignature StoredSignature = ASTFileSignature::create(
+ FirstSignatureByte, FirstSignatureByte + ASTFileSignature::size);
+ Idx += ASTFileSignature::size;
// Skip the module name (currently this is only used for prebuilt
// modules while here we are only dealing with cached).
@@ -704,9 +704,8 @@ llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Get Signature.
if (State == DiagnosticOptionsBlock && Code == SIGNATURE)
- getModuleFileInfo(File).Signature = {
- {{(uint32_t)Record[0], (uint32_t)Record[1], (uint32_t)Record[2],
- (uint32_t)Record[3], (uint32_t)Record[4]}}};
+ getModuleFileInfo(File).Signature = ASTFileSignature::create(
+ Record.begin(), Record.begin() + ASTFileSignature::size);
// We don't care about this record.
}
diff --git a/clang/lib/Serialization/ModuleManager.cpp b/clang/lib/Serialization/ModuleManager.cpp
index daef502cdcb5..a42ed2f3c179 100644
--- a/clang/lib/Serialization/ModuleManager.cpp
+++ b/clang/lib/Serialization/ModuleManager.cpp
@@ -185,7 +185,14 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
Buf = llvm::MemoryBuffer::getSTDIN();
} else {
// Get a buffer of the file and close the file descriptor when done.
- Buf = FileMgr.getBufferForFile(NewModule->File, /*isVolatile=*/false);
+ // The file is volatile because in a parallel build we expect multiple
+ // compiler processes to use the same module file rebuilding it if needed.
+ //
+ // RequiresNullTerminator is false because module files don't need it, and
+ // this allows the file to still be mmapped.
+ Buf = FileMgr.getBufferForFile(NewModule->File,
+ /*IsVolatile=*/true,
+ /*RequiresNullTerminator=*/false);
}
if (!Buf) {
@@ -439,7 +446,7 @@ bool ModuleManager::lookupModuleFile(StringRef FileName,
// Open the file immediately to ensure there is no race between stat'ing and
// opening the file.
- auto FileOrErr = FileMgr.getFile(FileName, /*OpenFile=*/true,
+ auto FileOrErr = FileMgr.getFile(FileName, /*OpenFile=*/true,
/*CacheFailure=*/false);
if (!FileOrErr) {
File = nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
index 2ef50a727ece..0e8cbc60689a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
@@ -13,13 +13,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace clang;
using namespace ento;
@@ -27,24 +28,20 @@ using namespace ento;
namespace {
class AnalysisOrderChecker
- : public Checker<check::PreStmt<CastExpr>,
- check::PostStmt<CastExpr>,
- check::PreStmt<ArraySubscriptExpr>,
- check::PostStmt<ArraySubscriptExpr>,
- check::PreStmt<CXXNewExpr>,
- check::PostStmt<CXXNewExpr>,
- check::PreStmt<OffsetOfExpr>,
- check::PostStmt<OffsetOfExpr>,
- check::PreCall,
- check::PostCall,
- check::EndFunction,
- check::NewAllocator,
- check::Bind,
- check::PointerEscape,
- check::RegionChanges,
- check::LiveSymbols> {
-
- bool isCallbackEnabled(AnalyzerOptions &Opts, StringRef CallbackName) const {
+ : public Checker<
+ check::PreStmt<CastExpr>, check::PostStmt<CastExpr>,
+ check::PreStmt<ArraySubscriptExpr>,
+ check::PostStmt<ArraySubscriptExpr>, check::PreStmt<CXXNewExpr>,
+ check::PostStmt<CXXNewExpr>, check::PreStmt<CXXDeleteExpr>,
+ check::PostStmt<CXXDeleteExpr>, check::PreStmt<CXXConstructExpr>,
+ check::PostStmt<CXXConstructExpr>, check::PreStmt<OffsetOfExpr>,
+ check::PostStmt<OffsetOfExpr>, check::PreCall, check::PostCall,
+ check::EndFunction, check::EndAnalysis, check::NewAllocator,
+ check::Bind, check::PointerEscape, check::RegionChanges,
+ check::LiveSymbols, eval::Call> {
+
+ bool isCallbackEnabled(const AnalyzerOptions &Opts,
+ StringRef CallbackName) const {
return Opts.getCheckerBooleanOption(this, "*") ||
Opts.getCheckerBooleanOption(this, CallbackName);
}
@@ -95,6 +92,26 @@ public:
llvm::errs() << "PostStmt<CXXNewExpr>\n";
}
+ void checkPreStmt(const CXXDeleteExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PreStmtCXXDeleteExpr"))
+ llvm::errs() << "PreStmt<CXXDeleteExpr>\n";
+ }
+
+ void checkPostStmt(const CXXDeleteExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PostStmtCXXDeleteExpr"))
+ llvm::errs() << "PostStmt<CXXDeleteExpr>\n";
+ }
+
+ void checkPreStmt(const CXXConstructExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PreStmtCXXConstructExpr"))
+ llvm::errs() << "PreStmt<CXXConstructExpr>\n";
+ }
+
+ void checkPostStmt(const CXXConstructExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PostStmtCXXConstructExpr"))
+ llvm::errs() << "PostStmt<CXXConstructExpr>\n";
+ }
+
void checkPreStmt(const OffsetOfExpr *OOE, CheckerContext &C) const {
if (isCallbackEnabled(C, "PreStmtOffsetOfExpr"))
llvm::errs() << "PreStmt<OffsetOfExpr>\n";
@@ -105,11 +122,25 @@ public:
llvm::errs() << "PostStmt<OffsetOfExpr>\n";
}
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "EvalCall")) {
+ llvm::errs() << "EvalCall";
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Call.getDecl()))
+ llvm::errs() << " (" << ND->getQualifiedNameAsString() << ')';
+ llvm::errs() << " {argno: " << Call.getNumArgs() << '}';
+ llvm::errs() << " [" << Call.getKindAsString() << ']';
+ llvm::errs() << '\n';
+ return true;
+ }
+ return false;
+ }
+
void checkPreCall(const CallEvent &Call, CheckerContext &C) const {
if (isCallbackEnabled(C, "PreCall")) {
llvm::errs() << "PreCall";
if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Call.getDecl()))
llvm::errs() << " (" << ND->getQualifiedNameAsString() << ')';
+ llvm::errs() << " [" << Call.getKindAsString() << ']';
llvm::errs() << '\n';
}
}
@@ -119,6 +150,7 @@ public:
llvm::errs() << "PostCall";
if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Call.getDecl()))
llvm::errs() << " (" << ND->getQualifiedNameAsString() << ')';
+ llvm::errs() << " [" << Call.getKindAsString() << ']';
llvm::errs() << '\n';
}
}
@@ -140,7 +172,13 @@ public:
}
}
- void checkNewAllocator(const CXXNewExpr *CNE, SVal Target,
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
+ ExprEngine &Eng) const {
+ if (isCallbackEnabled(BR.getAnalyzerOptions(), "EndAnalysis"))
+ llvm::errs() << "EndAnalysis\n";
+ }
+
+ void checkNewAllocator(const CXXAllocatorCall &Call,
CheckerContext &C) const {
if (isCallbackEnabled(C, "NewAllocator"))
llvm::errs() << "NewAllocator\n";
@@ -186,6 +224,6 @@ void ento::registerAnalysisOrderChecker(CheckerManager &mgr) {
mgr.registerChecker<AnalysisOrderChecker>();
}
-bool ento::shouldRegisterAnalysisOrderChecker(const LangOptions &LO) {
+bool ento::shouldRegisterAnalysisOrderChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index 20f3008b4a4b..c06604b6cffe 100644
--- a/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -103,7 +103,7 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
NumBlocksUnreachable += unreachable;
NumBlocks += total;
- std::string NameOfRootFunction = output.str();
+ std::string NameOfRootFunction = std::string(output.str());
output << " -> Total CFGBlocks: " << total << " | Unreachable CFGBlocks: "
<< unreachable << " | Exhausted Block: "
@@ -140,6 +140,6 @@ void ento::registerAnalyzerStatsChecker(CheckerManager &mgr) {
mgr.registerChecker<AnalyzerStatsChecker>();
}
-bool ento::shouldRegisterAnalyzerStatsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterAnalyzerStatsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 8d4793e0802f..59163c1f31fa 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
@@ -54,12 +55,11 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
ProgramStateRef state = C.getState();
// Get the size of the array.
- DefinedOrUnknownSVal NumElements
- = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
- ER->getValueType());
+ DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
+ state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
- ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
@@ -92,6 +92,6 @@ void ento::registerArrayBoundChecker(CheckerManager &mgr) {
mgr.registerChecker<ArrayBoundChecker>();
}
-bool ento::shouldRegisterArrayBoundChecker(const LangOptions &LO) {
+bool ento::shouldRegisterArrayBoundChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 8f3bf138cae4..7c264bba4b6a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -12,13 +12,14 @@
//===----------------------------------------------------------------------===//
#include "Taint.h"
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -175,24 +176,23 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
}
do {
- // CHECK UPPER BOUND: Is byteOffset >= extent(baseRegion)? If so,
+ // CHECK UPPER BOUND: Is byteOffset >= size(baseRegion)? If so,
// we are doing a load/store after the last valid offset.
- DefinedOrUnknownSVal extentVal =
- rawOffset.getRegion()->getExtent(svalBuilder);
- if (!extentVal.getAs<NonLoc>())
+ const MemRegion *MR = rawOffset.getRegion();
+ DefinedOrUnknownSVal Size = getDynamicSize(state, MR, svalBuilder);
+ if (!Size.getAs<NonLoc>())
break;
- if (extentVal.getAs<nonloc::ConcreteInt>()) {
+ if (Size.getAs<nonloc::ConcreteInt>()) {
std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
getSimplifiedOffsets(rawOffset.getByteOffset(),
- extentVal.castAs<nonloc::ConcreteInt>(),
- svalBuilder);
+ Size.castAs<nonloc::ConcreteInt>(), svalBuilder);
rawOffsetVal = simplifiedOffsets.first;
- extentVal = simplifiedOffsets.second;
+ Size = simplifiedOffsets.second;
}
SVal upperbound = svalBuilder.evalBinOpNN(state, BO_GE, rawOffsetVal,
- extentVal.castAs<NonLoc>(),
+ Size.castAs<NonLoc>(),
svalBuilder.getConditionType());
Optional<NonLoc> upperboundToCheck = upperbound.getAs<NonLoc>();
@@ -356,6 +356,6 @@ void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
mgr.registerChecker<ArrayBoundCheckerV2>();
}
-bool ento::shouldRegisterArrayBoundCheckerV2(const LangOptions &LO) {
+bool ento::shouldRegisterArrayBoundCheckerV2(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 325952fe4ed4..918c6e361381 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -1243,7 +1243,7 @@ void ento::registerNilArgChecker(CheckerManager &mgr) {
mgr.registerChecker<NilArgChecker>();
}
-bool ento::shouldRegisterNilArgChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNilArgChecker(const CheckerManager &mgr) {
return true;
}
@@ -1251,7 +1251,7 @@ void ento::registerCFNumberChecker(CheckerManager &mgr) {
mgr.registerChecker<CFNumberChecker>();
}
-bool ento::shouldRegisterCFNumberChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCFNumberChecker(const CheckerManager &mgr) {
return true;
}
@@ -1259,7 +1259,7 @@ void ento::registerCFRetainReleaseChecker(CheckerManager &mgr) {
mgr.registerChecker<CFRetainReleaseChecker>();
}
-bool ento::shouldRegisterCFRetainReleaseChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCFRetainReleaseChecker(const CheckerManager &mgr) {
return true;
}
@@ -1267,7 +1267,7 @@ void ento::registerClassReleaseChecker(CheckerManager &mgr) {
mgr.registerChecker<ClassReleaseChecker>();
}
-bool ento::shouldRegisterClassReleaseChecker(const LangOptions &LO) {
+bool ento::shouldRegisterClassReleaseChecker(const CheckerManager &mgr) {
return true;
}
@@ -1275,7 +1275,7 @@ void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) {
mgr.registerChecker<VariadicMethodTypeChecker>();
}
-bool ento::shouldRegisterVariadicMethodTypeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterVariadicMethodTypeChecker(const CheckerManager &mgr) {
return true;
}
@@ -1283,7 +1283,7 @@ void ento::registerObjCLoopChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCLoopChecker>();
}
-bool ento::shouldRegisterObjCLoopChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCLoopChecker(const CheckerManager &mgr) {
return true;
}
@@ -1291,6 +1291,6 @@ void ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCNonNilReturnValueChecker>();
}
-bool ento::shouldRegisterObjCNonNilReturnValueChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCNonNilReturnValueChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index 0eb3c3d1d0e6..2752b37f9b3f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -184,6 +184,6 @@ void ento::registerBlockInCriticalSectionChecker(CheckerManager &mgr) {
mgr.registerChecker<BlockInCriticalSectionChecker>();
}
-bool ento::shouldRegisterBlockInCriticalSectionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterBlockInCriticalSectionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index 1423b9c39b26..6c0caf3c4e78 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -70,8 +70,8 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// Get the value of the right-hand side. We only care about values
// that are defined (UnknownVals and UndefinedVals are handled by other
// checkers).
- Optional<DefinedSVal> DV = val.getAs<DefinedSVal>();
- if (!DV)
+ Optional<NonLoc> NV = val.getAs<NonLoc>();
+ if (!NV)
return;
// Check if the assigned value meets our criteria for correctness. It must
@@ -79,84 +79,23 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// the value is possibly < 0 (for a negative value) or greater than 1.
ProgramStateRef state = C.getState();
SValBuilder &svalBuilder = C.getSValBuilder();
+ BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
ConstraintManager &CM = C.getConstraintManager();
- // First, ensure that the value is >= 0.
- DefinedSVal zeroVal = svalBuilder.makeIntVal(0, valTy);
- SVal greaterThanOrEqualToZeroVal =
- svalBuilder.evalBinOp(state, BO_GE, *DV, zeroVal,
- svalBuilder.getConditionType());
+ llvm::APSInt Zero = BVF.getValue(0, valTy);
+ llvm::APSInt One = BVF.getValue(1, valTy);
- Optional<DefinedSVal> greaterThanEqualToZero =
- greaterThanOrEqualToZeroVal.getAs<DefinedSVal>();
+ ProgramStateRef StIn, StOut;
+ std::tie(StIn, StOut) = CM.assumeInclusiveRangeDual(state, *NV, Zero, One);
- if (!greaterThanEqualToZero) {
- // The SValBuilder cannot construct a valid SVal for this condition.
- // This means we cannot properly reason about it.
- return;
- }
-
- ProgramStateRef stateLT, stateGE;
- std::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
-
- // Is it possible for the value to be less than zero?
- if (stateLT) {
- // It is possible for the value to be less than zero. We only
- // want to emit a warning, however, if that value is fully constrained.
- // If it it possible for the value to be >= 0, then essentially the
- // value is underconstrained and there is nothing left to be done.
- if (!stateGE)
- emitReport(stateLT, C);
-
- // In either case, we are done.
- return;
- }
-
- // If we reach here, it must be the case that the value is constrained
- // to only be >= 0.
- assert(stateGE == state);
-
- // At this point we know that the value is >= 0.
- // Now check to ensure that the value is <= 1.
- DefinedSVal OneVal = svalBuilder.makeIntVal(1, valTy);
- SVal lessThanEqToOneVal =
- svalBuilder.evalBinOp(state, BO_LE, *DV, OneVal,
- svalBuilder.getConditionType());
-
- Optional<DefinedSVal> lessThanEqToOne =
- lessThanEqToOneVal.getAs<DefinedSVal>();
-
- if (!lessThanEqToOne) {
- // The SValBuilder cannot construct a valid SVal for this condition.
- // This means we cannot properly reason about it.
- return;
- }
-
- ProgramStateRef stateGT, stateLE;
- std::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
-
- // Is it possible for the value to be greater than one?
- if (stateGT) {
- // It is possible for the value to be greater than one. We only
- // want to emit a warning, however, if that value is fully constrained.
- // If it is possible for the value to be <= 1, then essentially the
- // value is underconstrained and there is nothing left to be done.
- if (!stateLE)
- emitReport(stateGT, C);
-
- // In either case, we are done.
- return;
- }
-
- // If we reach here, it must be the case that the value is constrained
- // to only be <= 1.
- assert(stateLE == state);
+ if (!StIn)
+ emitReport(StOut, C);
}
void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
mgr.registerChecker<BoolAssignmentChecker>();
}
-bool ento::shouldRegisterBoolAssignmentChecker(const LangOptions &LO) {
+bool ento::shouldRegisterBoolAssignmentChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 10594e331cbe..233ce57c3ac9 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -10,12 +10,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/Basic/Builtins.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
using namespace clang;
using namespace ento;
@@ -63,10 +64,12 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
case Builtin::BI__builtin_unpredictable:
case Builtin::BI__builtin_expect:
+ case Builtin::BI__builtin_expect_with_probability:
case Builtin::BI__builtin_assume_aligned:
case Builtin::BI__builtin_addressof: {
- // For __builtin_unpredictable, __builtin_expect, and
- // __builtin_assume_aligned, just return the value of the subexpression.
+ // For __builtin_unpredictable, __builtin_expect,
+ // __builtin_expect_with_probability and __builtin_assume_aligned,
+ // just return the value of the subexpression.
// __builtin_addressof is going from a reference to a pointer, but those
// are represented the same way in the analyzer.
assert (Call.getNumArgs() > 0);
@@ -90,10 +93,10 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
return true; // Return true to model purity.
SValBuilder& svalBuilder = C.getSValBuilder();
- DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
- DefinedOrUnknownSVal extentMatchesSizeArg =
- svalBuilder.evalEQ(state, Extent, Size.castAs<DefinedOrUnknownSVal>());
- state = state->assume(extentMatchesSizeArg, true);
+ DefinedOrUnknownSVal DynSize = getDynamicSize(state, R, svalBuilder);
+ DefinedOrUnknownSVal DynSizeMatchesSizeArg =
+ svalBuilder.evalEQ(state, DynSize, Size.castAs<DefinedOrUnknownSVal>());
+ state = state->assume(DynSizeMatchesSizeArg, true);
assert(state && "The region should not have any previous constraints");
C.addTransition(state->BindExpr(CE, LCtx, loc::MemRegionVal(R)));
@@ -134,6 +137,6 @@ void ento::registerBuiltinFunctionChecker(CheckerManager &mgr) {
mgr.registerChecker<BuiltinFunctionChecker>();
}
-bool ento::shouldRegisterBuiltinFunctionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterBuiltinFunctionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 21c4bbc60264..30fd62f887c4 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -11,23 +11,66 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "InterCheckerAPI.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
+struct AnyArgExpr {
+ // FIXME: Remove constructor in C++17 to turn it into an aggregate.
+ AnyArgExpr(const Expr *Expression, unsigned ArgumentIndex)
+ : Expression{Expression}, ArgumentIndex{ArgumentIndex} {}
+ const Expr *Expression;
+ unsigned ArgumentIndex;
+};
+
+struct SourceArgExpr : AnyArgExpr {
+ using AnyArgExpr::AnyArgExpr; // FIXME: Remove using in C++17.
+};
+
+struct DestinationArgExpr : AnyArgExpr {
+ using AnyArgExpr::AnyArgExpr; // FIXME: Same.
+};
+
+struct SizeArgExpr : AnyArgExpr {
+ using AnyArgExpr::AnyArgExpr; // FIXME: Same.
+};
+
+using ErrorMessage = SmallString<128>;
+enum class AccessKind { write, read };
+
+static ErrorMessage createOutOfBoundErrorMsg(StringRef FunctionDescription,
+ AccessKind Access) {
+ ErrorMessage Message;
+ llvm::raw_svector_ostream Os(Message);
+
+ // Function classification like: Memory copy function
+ Os << toUppercase(FunctionDescription.front())
+ << &FunctionDescription.data()[1];
+
+ if (Access == AccessKind::write) {
+ Os << " overflows the destination buffer";
+ } else { // read access
+ Os << " accesses out-of-bound array element";
+ }
+
+ return Message;
+}
+
enum class ConcatFnKind { none = 0, strcat = 1, strlcat = 2 };
class CStringChecker : public Checker< eval::Call,
check::PreStmt<DeclStmt>,
@@ -111,12 +154,9 @@ public:
void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
void evalBcopy(CheckerContext &C, const CallExpr *CE) const;
void evalCopyCommon(CheckerContext &C, const CallExpr *CE,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *Source,
- const Expr *Dest,
- bool Restricted = false,
- bool IsMempcpy = false) const;
+ ProgramStateRef state, SizeArgExpr Size,
+ DestinationArgExpr Dest, SourceArgExpr Source,
+ bool Restricted, bool IsMempcpy) const;
void evalMemcmp(CheckerContext &C, const CallExpr *CE) const;
@@ -193,40 +233,17 @@ public:
ProgramStateRef &State);
// Re-usable checks
- ProgramStateRef checkNonNull(CheckerContext &C,
- ProgramStateRef state,
- const Expr *S,
- SVal l,
- unsigned IdxOfArg) const;
- ProgramStateRef CheckLocation(CheckerContext &C,
- ProgramStateRef state,
- const Expr *S,
- SVal l,
- const char *message = nullptr) const;
- ProgramStateRef CheckBufferAccess(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *FirstBuf,
- const Expr *SecondBuf,
- const char *firstMessage = nullptr,
- const char *secondMessage = nullptr,
- bool WarnAboutSize = false) const;
-
- ProgramStateRef CheckBufferAccess(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *Buf,
- const char *message = nullptr,
- bool WarnAboutSize = false) const {
- // This is a convenience overload.
- return CheckBufferAccess(C, state, Size, Buf, nullptr, message, nullptr,
- WarnAboutSize);
- }
- ProgramStateRef CheckOverlap(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *First,
- const Expr *Second) const;
+ ProgramStateRef checkNonNull(CheckerContext &C, ProgramStateRef State,
+ AnyArgExpr Arg, SVal l) const;
+ ProgramStateRef CheckLocation(CheckerContext &C, ProgramStateRef state,
+ AnyArgExpr Buffer, SVal Element,
+ AccessKind Access) const;
+ ProgramStateRef CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
+ AnyArgExpr Buffer, SizeArgExpr Size,
+ AccessKind Access) const;
+ ProgramStateRef CheckOverlap(CheckerContext &C, ProgramStateRef state,
+ SizeArgExpr Size, AnyArgExpr First,
+ AnyArgExpr Second) const;
void emitOverlapBug(CheckerContext &C,
ProgramStateRef state,
const Stmt *First,
@@ -275,26 +292,26 @@ CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
}
ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
- ProgramStateRef state,
- const Expr *S, SVal l,
- unsigned IdxOfArg) const {
+ ProgramStateRef State,
+ AnyArgExpr Arg, SVal l) const {
// If a previous check has failed, propagate the failure.
- if (!state)
+ if (!State)
return nullptr;
ProgramStateRef stateNull, stateNonNull;
- std::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
+ std::tie(stateNull, stateNonNull) =
+ assumeZero(C, State, l, Arg.Expression->getType());
if (stateNull && !stateNonNull) {
if (Filter.CheckCStringNullArg) {
SmallString<80> buf;
llvm::raw_svector_ostream OS(buf);
assert(CurrentFunctionDescription);
- OS << "Null pointer passed as " << IdxOfArg
- << llvm::getOrdinalSuffix(IdxOfArg) << " argument to "
+ OS << "Null pointer passed as " << (Arg.ArgumentIndex + 1)
+ << llvm::getOrdinalSuffix(Arg.ArgumentIndex + 1) << " argument to "
<< CurrentFunctionDescription;
- emitNullArgBug(C, stateNull, S, OS.str());
+ emitNullArgBug(C, stateNull, Arg.Expression, OS.str());
}
return nullptr;
}
@@ -306,19 +323,20 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
- ProgramStateRef state,
- const Expr *S, SVal l,
- const char *warningMsg) const {
+ ProgramStateRef state,
+ AnyArgExpr Buffer, SVal Element,
+ AccessKind Access) const {
+
// If a previous check has failed, propagate the failure.
if (!state)
return nullptr;
// Check for out of bound array element access.
- const MemRegion *R = l.getAsRegion();
+ const MemRegion *R = Element.getAsRegion();
if (!R)
return state;
- const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ const auto *ER = dyn_cast<ElementRegion>(R);
if (!ER)
return state;
@@ -326,11 +344,9 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
return state;
// Get the size of the array.
- const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
- SValBuilder &svalBuilder = C.getSValBuilder();
- SVal Extent =
- svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
- DefinedOrUnknownSVal Size = Extent.castAs<DefinedOrUnknownSVal>();
+ const auto *superReg = cast<SubRegion>(ER->getSuperRegion());
+ DefinedOrUnknownSVal Size =
+ getDynamicSize(state, superReg, C.getSValBuilder());
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
@@ -343,20 +359,11 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
// In the latter case we only do modeling but do not emit warning.
if (!Filter.CheckCStringOutOfBounds)
return nullptr;
- // Emit a bug report.
- if (warningMsg) {
- emitOutOfBoundsBug(C, StOutBound, S, warningMsg);
- } else {
- assert(CurrentFunctionDescription);
- assert(CurrentFunctionDescription[0] != '\0');
- SmallString<80> buf;
- llvm::raw_svector_ostream os(buf);
- os << toUppercase(CurrentFunctionDescription[0])
- << &CurrentFunctionDescription[1]
- << " accesses out-of-bound array element";
- emitOutOfBoundsBug(C, StOutBound, S, os.str());
- }
+ // Emit a bug report.
+ ErrorMessage Message =
+ createOutOfBoundErrorMsg(CurrentFunctionDescription, Access);
+ emitOutOfBoundsBug(C, StOutBound, Buffer.Expression, Message);
return nullptr;
}
@@ -366,89 +373,68 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
}
ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *FirstBuf,
- const Expr *SecondBuf,
- const char *firstMessage,
- const char *secondMessage,
- bool WarnAboutSize) const {
+ ProgramStateRef State,
+ AnyArgExpr Buffer,
+ SizeArgExpr Size,
+ AccessKind Access) const {
// If a previous check has failed, propagate the failure.
- if (!state)
+ if (!State)
return nullptr;
SValBuilder &svalBuilder = C.getSValBuilder();
ASTContext &Ctx = svalBuilder.getContext();
- const LocationContext *LCtx = C.getLocationContext();
- QualType sizeTy = Size->getType();
+ QualType SizeTy = Size.Expression->getType();
QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
// Check that the first buffer is non-null.
- SVal BufVal = C.getSVal(FirstBuf);
- state = checkNonNull(C, state, FirstBuf, BufVal, 1);
- if (!state)
+ SVal BufVal = C.getSVal(Buffer.Expression);
+ State = checkNonNull(C, State, Buffer, BufVal);
+ if (!State)
return nullptr;
// If out-of-bounds checking is turned off, skip the rest.
if (!Filter.CheckCStringOutOfBounds)
- return state;
+ return State;
// Get the access length and make sure it is known.
// FIXME: This assumes the caller has already checked that the access length
// is positive. And that it's unsigned.
- SVal LengthVal = C.getSVal(Size);
+ SVal LengthVal = C.getSVal(Size.Expression);
Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
- return state;
+ return State;
// Compute the offset of the last element to be accessed: size-1.
- NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
- SVal Offset = svalBuilder.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy);
+ NonLoc One = svalBuilder.makeIntVal(1, SizeTy).castAs<NonLoc>();
+ SVal Offset = svalBuilder.evalBinOpNN(State, BO_Sub, *Length, One, SizeTy);
if (Offset.isUnknown())
return nullptr;
NonLoc LastOffset = Offset.castAs<NonLoc>();
// Check that the first buffer is sufficiently long.
- SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
+ SVal BufStart =
+ svalBuilder.evalCast(BufVal, PtrTy, Buffer.Expression->getType());
if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
- const Expr *warningExpr = (WarnAboutSize ? Size : FirstBuf);
- SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
- LastOffset, PtrTy);
- state = CheckLocation(C, state, warningExpr, BufEnd, firstMessage);
+ SVal BufEnd =
+ svalBuilder.evalBinOpLN(State, BO_Add, *BufLoc, LastOffset, PtrTy);
- // If the buffer isn't large enough, abort.
- if (!state)
- return nullptr;
- }
+ State = CheckLocation(C, State, Buffer, BufEnd, Access);
- // If there's a second buffer, check it as well.
- if (SecondBuf) {
- BufVal = state->getSVal(SecondBuf, LCtx);
- state = checkNonNull(C, state, SecondBuf, BufVal, 2);
- if (!state)
+ // If the buffer isn't large enough, abort.
+ if (!State)
return nullptr;
-
- BufStart = svalBuilder.evalCast(BufVal, PtrTy, SecondBuf->getType());
- if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
- const Expr *warningExpr = (WarnAboutSize ? Size : SecondBuf);
-
- SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
- LastOffset, PtrTy);
- state = CheckLocation(C, state, warningExpr, BufEnd, secondMessage);
- }
}
// Large enough or not, return this state!
- return state;
+ return State;
}
ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Size,
- const Expr *First,
- const Expr *Second) const {
+ ProgramStateRef state,
+ SizeArgExpr Size, AnyArgExpr First,
+ AnyArgExpr Second) const {
if (!Filter.CheckCStringBufferOverlap)
return state;
@@ -464,8 +450,8 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Get the buffer values and make sure they're known locations.
const LocationContext *LCtx = C.getLocationContext();
- SVal firstVal = state->getSVal(First, LCtx);
- SVal secondVal = state->getSVal(Second, LCtx);
+ SVal firstVal = state->getSVal(First.Expression, LCtx);
+ SVal secondVal = state->getSVal(Second.Expression, LCtx);
Optional<Loc> firstLoc = firstVal.getAs<Loc>();
if (!firstLoc)
@@ -478,11 +464,11 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Are the two values the same?
SValBuilder &svalBuilder = C.getSValBuilder();
std::tie(stateTrue, stateFalse) =
- state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
+ state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
if (stateTrue && !stateFalse) {
// If the values are known to be equal, that's automatically an overlap.
- emitOverlapBug(C, stateTrue, First, Second);
+ emitOverlapBug(C, stateTrue, First.Expression, Second.Expression);
return nullptr;
}
@@ -492,8 +478,8 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Which value comes first?
QualType cmpTy = svalBuilder.getConditionType();
- SVal reverse = svalBuilder.evalBinOpLL(state, BO_GT,
- *firstLoc, *secondLoc, cmpTy);
+ SVal reverse =
+ svalBuilder.evalBinOpLL(state, BO_GT, *firstLoc, *secondLoc, cmpTy);
Optional<DefinedOrUnknownSVal> reverseTest =
reverse.getAs<DefinedOrUnknownSVal>();
if (!reverseTest)
@@ -514,7 +500,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
}
// Get the length, and make sure it too is known.
- SVal LengthVal = state->getSVal(Size, LCtx);
+ SVal LengthVal = state->getSVal(Size.Expression, LCtx);
Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
return state;
@@ -523,22 +509,22 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Bail out if the cast fails.
ASTContext &Ctx = svalBuilder.getContext();
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
- SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy,
- First->getType());
+ SVal FirstStart =
+ svalBuilder.evalCast(*firstLoc, CharPtrTy, First.Expression->getType());
Optional<Loc> FirstStartLoc = FirstStart.getAs<Loc>();
if (!FirstStartLoc)
return state;
// Compute the end of the first buffer. Bail out if THAT fails.
- SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add,
- *FirstStartLoc, *Length, CharPtrTy);
+ SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add, *FirstStartLoc,
+ *Length, CharPtrTy);
Optional<Loc> FirstEndLoc = FirstEnd.getAs<Loc>();
if (!FirstEndLoc)
return state;
// Is the end of the first buffer past the start of the second buffer?
- SVal Overlap = svalBuilder.evalBinOpLL(state, BO_GT,
- *FirstEndLoc, *secondLoc, cmpTy);
+ SVal Overlap =
+ svalBuilder.evalBinOpLL(state, BO_GT, *FirstEndLoc, *secondLoc, cmpTy);
Optional<DefinedOrUnknownSVal> OverlapTest =
Overlap.getAs<DefinedOrUnknownSVal>();
if (!OverlapTest)
@@ -548,7 +534,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
if (stateTrue && !stateFalse) {
// Overlap!
- emitOverlapBug(C, stateTrue, First, Second);
+ emitOverlapBug(C, stateTrue, First.Expression, Second.Expression);
return nullptr;
}
@@ -723,7 +709,8 @@ ProgramStateRef CStringChecker::setCStringLength(ProgramStateRef state,
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
// These are the types we can currently track string lengths for.
@@ -828,7 +815,8 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
}
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
return getCStringLengthForRegion(C, state, Ex, MR, hypothetical);
@@ -935,14 +923,12 @@ bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
// Get the size of the array.
const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
- SVal Extent =
- svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
- DefinedOrUnknownSVal ExtentSize = Extent.castAs<DefinedOrUnknownSVal>();
+ DefinedOrUnknownSVal SizeDV = getDynamicSize(state, superReg, svalBuilder);
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
- ProgramStateRef StInBound = state->assumeInBound(Idx, ExtentSize, true);
+ ProgramStateRef StInBound = state->assumeInBound(Idx, SizeDV, true);
return static_cast<bool>(StInBound);
}
@@ -1025,10 +1011,14 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
os << "a C++ temp object of type "
<< cast<TypedValueRegion>(MR)->getValueType().getAsString();
return true;
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
os << "a variable of type"
<< cast<TypedValueRegion>(MR)->getValueType().getAsString();
return true;
+ case MemRegion::ParamVarRegionKind:
+ os << "a parameter of type"
+ << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ return true;
case MemRegion::FieldRegionKind:
os << "a field of type "
<< cast<TypedValueRegion>(MR)->getValueType().getAsString();
@@ -1069,13 +1059,12 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
// For now we can only handle the case of offset is 0 and concrete char value.
if (Offset.isValid() && !Offset.hasSymbolicOffset() &&
Offset.getOffset() == 0) {
- // Get the base region's extent.
- auto *SubReg = cast<SubRegion>(BR);
- DefinedOrUnknownSVal Extent = SubReg->getExtent(svalBuilder);
+ // Get the base region's size.
+ DefinedOrUnknownSVal SizeDV = getDynamicSize(State, BR, svalBuilder);
ProgramStateRef StateWholeReg, StateNotWholeReg;
std::tie(StateWholeReg, StateNotWholeReg) =
- State->assume(svalBuilder.evalEQ(State, Extent, *SizeNL));
+ State->assume(svalBuilder.evalEQ(State, SizeDV, *SizeNL));
// With the semantic of 'memset()', we should convert the CharVal to
// unsigned char.
@@ -1134,25 +1123,24 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
// evaluation of individual function calls.
//===----------------------------------------------------------------------===//
-void CStringChecker::evalCopyCommon(CheckerContext &C,
- const CallExpr *CE,
- ProgramStateRef state,
- const Expr *Size, const Expr *Dest,
- const Expr *Source, bool Restricted,
+void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
+ ProgramStateRef state, SizeArgExpr Size,
+ DestinationArgExpr Dest,
+ SourceArgExpr Source, bool Restricted,
bool IsMempcpy) const {
CurrentFunctionDescription = "memory copy function";
// See if the size argument is zero.
const LocationContext *LCtx = C.getLocationContext();
- SVal sizeVal = state->getSVal(Size, LCtx);
- QualType sizeTy = Size->getType();
+ SVal sizeVal = state->getSVal(Size.Expression, LCtx);
+ QualType sizeTy = Size.Expression->getType();
ProgramStateRef stateZeroSize, stateNonZeroSize;
std::tie(stateZeroSize, stateNonZeroSize) =
- assumeZero(C, state, sizeVal, sizeTy);
+ assumeZero(C, state, sizeVal, sizeTy);
// Get the value of the Dest.
- SVal destVal = state->getSVal(Dest, LCtx);
+ SVal destVal = state->getSVal(Dest.Expression, LCtx);
// If the size is zero, there won't be any actual memory access, so
// just bind the return value to the destination buffer and return.
@@ -1168,24 +1156,23 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
// Ensure the destination is not null. If it is NULL there will be a
// NULL pointer dereference.
- state = checkNonNull(C, state, Dest, destVal, 1);
+ state = checkNonNull(C, state, Dest, destVal);
if (!state)
return;
// Get the value of the Src.
- SVal srcVal = state->getSVal(Source, LCtx);
+ SVal srcVal = state->getSVal(Source.Expression, LCtx);
// Ensure the source is not null. If it is NULL there will be a
// NULL pointer dereference.
- state = checkNonNull(C, state, Source, srcVal, 2);
+ state = checkNonNull(C, state, Source, srcVal);
if (!state)
return;
// Ensure the accesses are valid and that the buffers do not overlap.
- const char * const writeWarning =
- "Memory copy function overflows destination buffer";
- state = CheckBufferAccess(C, state, Size, Dest, Source,
- writeWarning, /* sourceWarning = */ nullptr);
+ state = CheckBufferAccess(C, state, Dest, Size, AccessKind::write);
+ state = CheckBufferAccess(C, state, Source, Size, AccessKind::read);
+
if (Restricted)
state = CheckOverlap(C, state, Size, Dest, Source);
@@ -1200,9 +1187,9 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
ASTContext &Ctx = SvalBuilder.getContext();
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
SVal DestRegCharVal =
- SvalBuilder.evalCast(destVal, CharPtrTy, Dest->getType());
+ SvalBuilder.evalCast(destVal, CharPtrTy, Dest.Expression->getType());
SVal lastElement = C.getSValBuilder().evalBinOp(
- state, BO_Add, DestRegCharVal, sizeVal, Dest->getType());
+ state, BO_Add, DestRegCharVal, sizeVal, Dest.Expression->getType());
// If we don't know how much we copied, we can at least
// conjure a return value for later.
if (lastElement.isUnknown())
@@ -1223,120 +1210,136 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// copied region, but that's still an improvement over blank invalidation.
- state = InvalidateBuffer(C, state, Dest, C.getSVal(Dest),
- /*IsSourceBuffer*/false, Size);
+ state =
+ InvalidateBuffer(C, state, Dest.Expression, C.getSVal(Dest.Expression),
+ /*IsSourceBuffer*/ false, Size.Expression);
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, Source, C.getSVal(Source),
- /*IsSourceBuffer*/true, nullptr);
+ state = InvalidateBuffer(C, state, Source.Expression,
+ C.getSVal(Source.Expression),
+ /*IsSourceBuffer*/ true, nullptr);
C.addTransition(state);
}
}
-
void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
// void *memcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is the address of the destination buffer.
- const Expr *Dest = CE->getArg(0);
- ProgramStateRef state = C.getState();
+ DestinationArgExpr Dest = {CE->getArg(0), 0};
+ SourceArgExpr Src = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
+
+ ProgramStateRef State = C.getState();
- evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true);
+ constexpr bool IsRestricted = true;
+ constexpr bool IsMempcpy = false;
+ evalCopyCommon(C, CE, State, Size, Dest, Src, IsRestricted, IsMempcpy);
}
void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
// void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is a pointer to the byte following the last written byte.
- const Expr *Dest = CE->getArg(0);
- ProgramStateRef state = C.getState();
+ DestinationArgExpr Dest = {CE->getArg(0), 0};
+ SourceArgExpr Src = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
- evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true, true);
+ constexpr bool IsRestricted = true;
+ constexpr bool IsMempcpy = true;
+ evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
}
void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
// void *memmove(void *dst, const void *src, size_t n);
// The return value is the address of the destination buffer.
- const Expr *Dest = CE->getArg(0);
- ProgramStateRef state = C.getState();
+ DestinationArgExpr Dest = {CE->getArg(0), 0};
+ SourceArgExpr Src = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
- evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1));
+ constexpr bool IsRestricted = false;
+ constexpr bool IsMempcpy = false;
+ evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
}
void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
// void bcopy(const void *src, void *dst, size_t n);
- evalCopyCommon(C, CE, C.getState(),
- CE->getArg(2), CE->getArg(1), CE->getArg(0));
+ SourceArgExpr Src(CE->getArg(0), 0);
+ DestinationArgExpr Dest = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
+
+ constexpr bool IsRestricted = false;
+ constexpr bool IsMempcpy = false;
+ evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
}
void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
// int memcmp(const void *s1, const void *s2, size_t n);
CurrentFunctionDescription = "memory comparison function";
- const Expr *Left = CE->getArg(0);
- const Expr *Right = CE->getArg(1);
- const Expr *Size = CE->getArg(2);
+ AnyArgExpr Left = {CE->getArg(0), 0};
+ AnyArgExpr Right = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
- ProgramStateRef state = C.getState();
- SValBuilder &svalBuilder = C.getSValBuilder();
+ ProgramStateRef State = C.getState();
+ SValBuilder &Builder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getLocationContext();
// See if the size argument is zero.
- const LocationContext *LCtx = C.getLocationContext();
- SVal sizeVal = state->getSVal(Size, LCtx);
- QualType sizeTy = Size->getType();
+ SVal sizeVal = State->getSVal(Size.Expression, LCtx);
+ QualType sizeTy = Size.Expression->getType();
ProgramStateRef stateZeroSize, stateNonZeroSize;
std::tie(stateZeroSize, stateNonZeroSize) =
- assumeZero(C, state, sizeVal, sizeTy);
+ assumeZero(C, State, sizeVal, sizeTy);
// If the size can be zero, the result will be 0 in that case, and we don't
// have to check either of the buffers.
if (stateZeroSize) {
- state = stateZeroSize;
- state = state->BindExpr(CE, LCtx,
- svalBuilder.makeZeroVal(CE->getType()));
- C.addTransition(state);
+ State = stateZeroSize;
+ State = State->BindExpr(CE, LCtx, Builder.makeZeroVal(CE->getType()));
+ C.addTransition(State);
}
// If the size can be nonzero, we have to check the other arguments.
if (stateNonZeroSize) {
- state = stateNonZeroSize;
+ State = stateNonZeroSize;
// If we know the two buffers are the same, we know the result is 0.
// First, get the two buffers' addresses. Another checker will have already
// made sure they're not undefined.
DefinedOrUnknownSVal LV =
- state->getSVal(Left, LCtx).castAs<DefinedOrUnknownSVal>();
+ State->getSVal(Left.Expression, LCtx).castAs<DefinedOrUnknownSVal>();
DefinedOrUnknownSVal RV =
- state->getSVal(Right, LCtx).castAs<DefinedOrUnknownSVal>();
+ State->getSVal(Right.Expression, LCtx).castAs<DefinedOrUnknownSVal>();
// See if they are the same.
- DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
- ProgramStateRef StSameBuf, StNotSameBuf;
- std::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+ ProgramStateRef SameBuffer, NotSameBuffer;
+ std::tie(SameBuffer, NotSameBuffer) =
+ State->assume(Builder.evalEQ(State, LV, RV));
// If the two arguments are the same buffer, we know the result is 0,
// and we only need to check one size.
- if (StSameBuf && !StNotSameBuf) {
- state = StSameBuf;
- state = CheckBufferAccess(C, state, Size, Left);
- if (state) {
- state = StSameBuf->BindExpr(CE, LCtx,
- svalBuilder.makeZeroVal(CE->getType()));
- C.addTransition(state);
+ if (SameBuffer && !NotSameBuffer) {
+ State = SameBuffer;
+ State = CheckBufferAccess(C, State, Left, Size, AccessKind::read);
+ if (State) {
+ State =
+ SameBuffer->BindExpr(CE, LCtx, Builder.makeZeroVal(CE->getType()));
+ C.addTransition(State);
}
return;
}
// If the two arguments might be different buffers, we have to check
// the size of both of them.
- assert(StNotSameBuf);
- state = CheckBufferAccess(C, state, Size, Left, Right);
- if (state) {
+ assert(NotSameBuffer);
+ State = CheckBufferAccess(C, State, Right, Size, AccessKind::read);
+ State = CheckBufferAccess(C, State, Left, Size, AccessKind::read);
+ if (State) {
// The return value is the comparison result, which we don't know.
- SVal CmpV =
- svalBuilder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
- state = state->BindExpr(CE, LCtx, CmpV);
- C.addTransition(state);
+ SVal CmpV = Builder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ State = State->BindExpr(CE, LCtx, CmpV);
+ C.addTransition(State);
}
}
}
@@ -1384,15 +1387,14 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
}
// Check that the string argument is non-null.
- const Expr *Arg = CE->getArg(0);
- SVal ArgVal = state->getSVal(Arg, LCtx);
-
- state = checkNonNull(C, state, Arg, ArgVal, 1);
+ AnyArgExpr Arg = {CE->getArg(0), 0};
+ SVal ArgVal = state->getSVal(Arg.Expression, LCtx);
+ state = checkNonNull(C, state, Arg, ArgVal);
if (!state)
return;
- SVal strLength = getCStringLength(C, state, Arg, ArgVal);
+ SVal strLength = getCStringLength(C, state, Arg.Expression, ArgVal);
// If the argument isn't a valid C string, there's no valid state to
// transition to.
@@ -1540,30 +1542,30 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
CurrentFunctionDescription = "string copy function";
else
CurrentFunctionDescription = "string concatenation function";
+
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
// Check that the destination is non-null.
- const Expr *Dst = CE->getArg(0);
- SVal DstVal = state->getSVal(Dst, LCtx);
-
- state = checkNonNull(C, state, Dst, DstVal, 1);
+ DestinationArgExpr Dst = {CE->getArg(0), 0};
+ SVal DstVal = state->getSVal(Dst.Expression, LCtx);
+ state = checkNonNull(C, state, Dst, DstVal);
if (!state)
return;
// Check that the source is non-null.
- const Expr *srcExpr = CE->getArg(1);
- SVal srcVal = state->getSVal(srcExpr, LCtx);
- state = checkNonNull(C, state, srcExpr, srcVal, 2);
+ SourceArgExpr srcExpr = {CE->getArg(1), 1};
+ SVal srcVal = state->getSVal(srcExpr.Expression, LCtx);
+ state = checkNonNull(C, state, srcExpr, srcVal);
if (!state)
return;
// Get the string length of the source.
- SVal strLength = getCStringLength(C, state, srcExpr, srcVal);
+ SVal strLength = getCStringLength(C, state, srcExpr.Expression, srcVal);
Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
// Get the string length of the destination buffer.
- SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
+ SVal dstStrLength = getCStringLength(C, state, Dst.Expression, DstVal);
Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>();
// If the source isn't a valid C string, give up.
@@ -1581,8 +1583,13 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
SVal maxLastElementIndex = UnknownVal();
const char *boundWarning = nullptr;
- state = CheckOverlap(C, state, IsBounded ? CE->getArg(2) : CE->getArg(1), Dst,
- srcExpr);
+ // FIXME: Why do we choose the srcExpr if the access has no size?
+ // Note that the 3rd argument of the call would be the size parameter.
+ SizeArgExpr SrcExprAsSizeDummy = {srcExpr.Expression, srcExpr.ArgumentIndex};
+ state = CheckOverlap(
+ C, state,
+ (IsBounded ? SizeArgExpr{CE->getArg(2), 2} : SrcExprAsSizeDummy), Dst,
+ srcExpr);
if (!state)
return;
@@ -1590,11 +1597,12 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If the function is strncpy, strncat, etc... it is bounded.
if (IsBounded) {
// Get the max number of characters to copy.
- const Expr *lenExpr = CE->getArg(2);
- SVal lenVal = state->getSVal(lenExpr, LCtx);
+ SizeArgExpr lenExpr = {CE->getArg(2), 2};
+ SVal lenVal = state->getSVal(lenExpr.Expression, LCtx);
// Protect against misdeclared strncpy().
- lenVal = svalBuilder.evalCast(lenVal, sizeTy, lenExpr->getType());
+ lenVal =
+ svalBuilder.evalCast(lenVal, sizeTy, lenExpr.Expression->getType());
Optional<NonLoc> lenValNL = lenVal.getAs<NonLoc>();
@@ -1837,19 +1845,17 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// record the new string length.
if (Optional<loc::MemRegionVal> dstRegVal =
DstVal.getAs<loc::MemRegionVal>()) {
- QualType ptrTy = Dst->getType();
+ QualType ptrTy = Dst.Expression->getType();
// If we have an exact value on a bounded copy, use that to check for
// overflows, rather than our estimate about how much is actually copied.
- if (boundWarning) {
- if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
- SVal maxLastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
- *maxLastNL, ptrTy);
- state = CheckLocation(C, state, CE->getArg(2), maxLastElement,
- boundWarning);
- if (!state)
- return;
- }
+ if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
+ SVal maxLastElement =
+ svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal, *maxLastNL, ptrTy);
+
+ state = CheckLocation(C, state, Dst, maxLastElement, AccessKind::write);
+ if (!state)
+ return;
}
// Then, if the final length is known...
@@ -1859,9 +1865,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// ...and we haven't checked the bound, we'll check the actual copy.
if (!boundWarning) {
- const char * const warningMsg =
- "String copy function overflows destination buffer";
- state = CheckLocation(C, state, Dst, lastElement, warningMsg);
+ state = CheckLocation(C, state, Dst, lastElement, AccessKind::write);
if (!state)
return;
}
@@ -1878,13 +1882,13 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// string, but that's still an improvement over blank invalidation.
- state = InvalidateBuffer(C, state, Dst, *dstRegVal,
- /*IsSourceBuffer*/false, nullptr);
+ state = InvalidateBuffer(C, state, Dst.Expression, *dstRegVal,
+ /*IsSourceBuffer*/ false, nullptr);
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, srcExpr, srcVal, /*IsSourceBuffer*/true,
- nullptr);
+ state = InvalidateBuffer(C, state, srcExpr.Expression, srcVal,
+ /*IsSourceBuffer*/ true, nullptr);
// Set the C string length of the destination, if we know it.
if (IsBounded && (appendK == ConcatFnKind::none)) {
@@ -1941,34 +1945,34 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
const LocationContext *LCtx = C.getLocationContext();
// Check that the first string is non-null
- const Expr *s1 = CE->getArg(0);
- SVal s1Val = state->getSVal(s1, LCtx);
- state = checkNonNull(C, state, s1, s1Val, 1);
+ AnyArgExpr Left = {CE->getArg(0), 0};
+ SVal LeftVal = state->getSVal(Left.Expression, LCtx);
+ state = checkNonNull(C, state, Left, LeftVal);
if (!state)
return;
// Check that the second string is non-null.
- const Expr *s2 = CE->getArg(1);
- SVal s2Val = state->getSVal(s2, LCtx);
- state = checkNonNull(C, state, s2, s2Val, 2);
+ AnyArgExpr Right = {CE->getArg(1), 1};
+ SVal RightVal = state->getSVal(Right.Expression, LCtx);
+ state = checkNonNull(C, state, Right, RightVal);
if (!state)
return;
// Get the string length of the first string or give up.
- SVal s1Length = getCStringLength(C, state, s1, s1Val);
- if (s1Length.isUndef())
+ SVal LeftLength = getCStringLength(C, state, Left.Expression, LeftVal);
+ if (LeftLength.isUndef())
return;
// Get the string length of the second string or give up.
- SVal s2Length = getCStringLength(C, state, s2, s2Val);
- if (s2Length.isUndef())
+ SVal RightLength = getCStringLength(C, state, Right.Expression, RightVal);
+ if (RightLength.isUndef())
return;
// If we know the two buffers are the same, we know the result is 0.
// First, get the two buffers' addresses. Another checker will have already
// made sure they're not undefined.
- DefinedOrUnknownSVal LV = s1Val.castAs<DefinedOrUnknownSVal>();
- DefinedOrUnknownSVal RV = s2Val.castAs<DefinedOrUnknownSVal>();
+ DefinedOrUnknownSVal LV = LeftVal.castAs<DefinedOrUnknownSVal>();
+ DefinedOrUnknownSVal RV = RightVal.castAs<DefinedOrUnknownSVal>();
// See if they are the same.
SValBuilder &svalBuilder = C.getSValBuilder();
@@ -1995,15 +1999,17 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// For now, we only do this if they're both known string literals.
// Attempt to extract string literals from both expressions.
- const StringLiteral *s1StrLiteral = getCStringLiteral(C, state, s1, s1Val);
- const StringLiteral *s2StrLiteral = getCStringLiteral(C, state, s2, s2Val);
+ const StringLiteral *LeftStrLiteral =
+ getCStringLiteral(C, state, Left.Expression, LeftVal);
+ const StringLiteral *RightStrLiteral =
+ getCStringLiteral(C, state, Right.Expression, RightVal);
bool canComputeResult = false;
SVal resultVal = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
C.blockCount());
- if (s1StrLiteral && s2StrLiteral) {
- StringRef s1StrRef = s1StrLiteral->getString();
- StringRef s2StrRef = s2StrLiteral->getString();
+ if (LeftStrLiteral && RightStrLiteral) {
+ StringRef LeftStrRef = LeftStrLiteral->getString();
+ StringRef RightStrRef = RightStrLiteral->getString();
if (IsBounded) {
// Get the max number of characters to compare.
@@ -2013,8 +2019,8 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// If the length is known, we can get the right substrings.
if (const llvm::APSInt *len = svalBuilder.getKnownValue(state, lenVal)) {
// Create substrings of each to compare the prefix.
- s1StrRef = s1StrRef.substr(0, (size_t)len->getZExtValue());
- s2StrRef = s2StrRef.substr(0, (size_t)len->getZExtValue());
+ LeftStrRef = LeftStrRef.substr(0, (size_t)len->getZExtValue());
+ RightStrRef = RightStrRef.substr(0, (size_t)len->getZExtValue());
canComputeResult = true;
}
} else {
@@ -2024,17 +2030,17 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
if (canComputeResult) {
// Real strcmp stops at null characters.
- size_t s1Term = s1StrRef.find('\0');
+ size_t s1Term = LeftStrRef.find('\0');
if (s1Term != StringRef::npos)
- s1StrRef = s1StrRef.substr(0, s1Term);
+ LeftStrRef = LeftStrRef.substr(0, s1Term);
- size_t s2Term = s2StrRef.find('\0');
+ size_t s2Term = RightStrRef.find('\0');
if (s2Term != StringRef::npos)
- s2StrRef = s2StrRef.substr(0, s2Term);
+ RightStrRef = RightStrRef.substr(0, s2Term);
// Use StringRef's comparison methods to compute the actual result.
- int compareRes = IgnoreCase ? s1StrRef.compare_lower(s2StrRef)
- : s1StrRef.compare(s2StrRef);
+ int compareRes = IgnoreCase ? LeftStrRef.compare_lower(RightStrRef)
+ : LeftStrRef.compare(RightStrRef);
// The strcmp function returns an integer greater than, equal to, or less
// than zero, [c11, p7.24.4.2].
@@ -2064,8 +2070,9 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
//char *strsep(char **stringp, const char *delim);
// Sanity: does the search string parameter match the return type?
- const Expr *SearchStrPtr = CE->getArg(0);
- QualType CharPtrTy = SearchStrPtr->getType()->getPointeeType();
+ SourceArgExpr SearchStrPtr = {CE->getArg(0), 0};
+
+ QualType CharPtrTy = SearchStrPtr.Expression->getType()->getPointeeType();
if (CharPtrTy.isNull() ||
CE->getType().getUnqualifiedType() != CharPtrTy.getUnqualifiedType())
return;
@@ -2076,15 +2083,15 @@ void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
// Check that the search string pointer is non-null (though it may point to
// a null string).
- SVal SearchStrVal = State->getSVal(SearchStrPtr, LCtx);
- State = checkNonNull(C, State, SearchStrPtr, SearchStrVal, 1);
+ SVal SearchStrVal = State->getSVal(SearchStrPtr.Expression, LCtx);
+ State = checkNonNull(C, State, SearchStrPtr, SearchStrVal);
if (!State)
return;
// Check that the delimiter string is non-null.
- const Expr *DelimStr = CE->getArg(1);
- SVal DelimStrVal = State->getSVal(DelimStr, LCtx);
- State = checkNonNull(C, State, DelimStr, DelimStrVal, 2);
+ AnyArgExpr DelimStr = {CE->getArg(1), 1};
+ SVal DelimStrVal = State->getSVal(DelimStr.Expression, LCtx);
+ State = checkNonNull(C, State, DelimStr, DelimStrVal);
if (!State)
return;
@@ -2096,8 +2103,8 @@ void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
// Invalidate the search string, representing the change of one delimiter
// character to NUL.
- State = InvalidateBuffer(C, State, SearchStrPtr, Result,
- /*IsSourceBuffer*/false, nullptr);
+ State = InvalidateBuffer(C, State, SearchStrPtr.Expression, Result,
+ /*IsSourceBuffer*/ false, nullptr);
// Overwrite the search string pointer. The new value is either an address
// further along in the same string, or NULL if there are no more tokens.
@@ -2158,65 +2165,67 @@ void CStringChecker::evalStdCopyCommon(CheckerContext &C,
}
void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
+ // void *memset(void *s, int c, size_t n);
CurrentFunctionDescription = "memory set function";
- const Expr *Mem = CE->getArg(0);
- const Expr *CharE = CE->getArg(1);
- const Expr *Size = CE->getArg(2);
+ DestinationArgExpr Buffer = {CE->getArg(0), 0};
+ AnyArgExpr CharE = {CE->getArg(1), 1};
+ SizeArgExpr Size = {CE->getArg(2), 2};
+
ProgramStateRef State = C.getState();
// See if the size argument is zero.
const LocationContext *LCtx = C.getLocationContext();
- SVal SizeVal = State->getSVal(Size, LCtx);
- QualType SizeTy = Size->getType();
+ SVal SizeVal = C.getSVal(Size.Expression);
+ QualType SizeTy = Size.Expression->getType();
- ProgramStateRef StateZeroSize, StateNonZeroSize;
- std::tie(StateZeroSize, StateNonZeroSize) =
- assumeZero(C, State, SizeVal, SizeTy);
+ ProgramStateRef ZeroSize, NonZeroSize;
+ std::tie(ZeroSize, NonZeroSize) = assumeZero(C, State, SizeVal, SizeTy);
// Get the value of the memory area.
- SVal MemVal = State->getSVal(Mem, LCtx);
+ SVal BufferPtrVal = C.getSVal(Buffer.Expression);
// If the size is zero, there won't be any actual memory access, so
- // just bind the return value to the Mem buffer and return.
- if (StateZeroSize && !StateNonZeroSize) {
- StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, MemVal);
- C.addTransition(StateZeroSize);
+ // just bind the return value to the buffer and return.
+ if (ZeroSize && !NonZeroSize) {
+ ZeroSize = ZeroSize->BindExpr(CE, LCtx, BufferPtrVal);
+ C.addTransition(ZeroSize);
return;
}
// Ensure the memory area is not null.
// If it is NULL there will be a NULL pointer dereference.
- State = checkNonNull(C, StateNonZeroSize, Mem, MemVal, 1);
+ State = checkNonNull(C, NonZeroSize, Buffer, BufferPtrVal);
if (!State)
return;
- State = CheckBufferAccess(C, State, Size, Mem);
+ State = CheckBufferAccess(C, State, Buffer, Size, AccessKind::write);
if (!State)
return;
// According to the values of the arguments, bind the value of the second
// argument to the destination buffer and set string length, or just
// invalidate the destination buffer.
- if (!memsetAux(Mem, C.getSVal(CharE), Size, C, State))
+ if (!memsetAux(Buffer.Expression, C.getSVal(CharE.Expression),
+ Size.Expression, C, State))
return;
- State = State->BindExpr(CE, LCtx, MemVal);
+ State = State->BindExpr(CE, LCtx, BufferPtrVal);
C.addTransition(State);
}
void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
CurrentFunctionDescription = "memory clearance function";
- const Expr *Mem = CE->getArg(0);
- const Expr *Size = CE->getArg(1);
+ DestinationArgExpr Buffer = {CE->getArg(0), 0};
+ SizeArgExpr Size = {CE->getArg(1), 1};
SVal Zero = C.getSValBuilder().makeZeroVal(C.getASTContext().IntTy);
ProgramStateRef State = C.getState();
// See if the size argument is zero.
- SVal SizeVal = C.getSVal(Size);
- QualType SizeTy = Size->getType();
+ SVal SizeVal = C.getSVal(Size.Expression);
+ QualType SizeTy = Size.Expression->getType();
ProgramStateRef StateZeroSize, StateNonZeroSize;
std::tie(StateZeroSize, StateNonZeroSize) =
@@ -2230,19 +2239,19 @@ void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
}
// Get the value of the memory area.
- SVal MemVal = C.getSVal(Mem);
+ SVal MemVal = C.getSVal(Buffer.Expression);
// Ensure the memory area is not null.
// If it is NULL there will be a NULL pointer dereference.
- State = checkNonNull(C, StateNonZeroSize, Mem, MemVal, 1);
+ State = checkNonNull(C, StateNonZeroSize, Buffer, MemVal);
if (!State)
return;
- State = CheckBufferAccess(C, State, Size, Mem);
+ State = CheckBufferAccess(C, State, Buffer, Size, AccessKind::write);
if (!State)
return;
- if (!memsetAux(Mem, Zero, Size, C, State))
+ if (!memsetAux(Buffer.Expression, Zero, Size.Expression, C, State))
return;
C.addTransition(State);
@@ -2434,7 +2443,7 @@ void ento::registerCStringModeling(CheckerManager &Mgr) {
Mgr.registerChecker<CStringChecker>();
}
-bool ento::shouldRegisterCStringModeling(const LangOptions &LO) {
+bool ento::shouldRegisterCStringModeling(const CheckerManager &mgr) {
return true;
}
@@ -2445,7 +2454,7 @@ bool ento::shouldRegisterCStringModeling(const LangOptions &LO) {
checker->Filter.CheckName##name = mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { return true; }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(CStringNullArg)
REGISTER_CHECKER(CStringOutOfBounds)
diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
index d84fcc69a492..888724f7ea3b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -291,6 +291,6 @@ void ento::registerCStringSyntaxChecker(CheckerManager &mgr) {
mgr.registerChecker<CStringSyntaxChecker>();
}
-bool ento::shouldRegisterCStringSyntaxChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCStringSyntaxChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
index 01f5b9c889e3..24776338ce10 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
@@ -53,21 +53,21 @@ void CXXSelfAssignmentChecker::checkBeginFunction(CheckerContext &C) const {
ProgramStateRef SelfAssignState = State->bindLoc(Param, ThisVal, LCtx);
const NoteTag *SelfAssignTag =
- C.getNoteTag([MD](BugReport &BR) -> std::string {
+ C.getNoteTag([MD](PathSensitiveBugReport &BR) -> std::string {
SmallString<256> Msg;
llvm::raw_svector_ostream Out(Msg);
Out << "Assuming " << MD->getParamDecl(0)->getName() << " == *this";
- return Out.str();
+ return std::string(Out.str());
});
C.addTransition(SelfAssignState, SelfAssignTag);
ProgramStateRef NonSelfAssignState = State->bindLoc(Param, ParamVal, LCtx);
const NoteTag *NonSelfAssignTag =
- C.getNoteTag([MD](BugReport &BR) -> std::string {
+ C.getNoteTag([MD](PathSensitiveBugReport &BR) -> std::string {
SmallString<256> Msg;
llvm::raw_svector_ostream Out(Msg);
Out << "Assuming " << MD->getParamDecl(0)->getName() << " != *this";
- return Out.str();
+ return std::string(Out.str());
});
C.addTransition(NonSelfAssignState, NonSelfAssignTag);
}
@@ -76,6 +76,6 @@ void ento::registerCXXSelfAssignmentChecker(CheckerManager &Mgr) {
Mgr.registerChecker<CXXSelfAssignmentChecker>();
}
-bool ento::shouldRegisterCXXSelfAssignmentChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCXXSelfAssignmentChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 2fcb765cd4ee..3e46e2372516 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -11,9 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -21,6 +22,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -29,11 +31,8 @@ using namespace ento;
namespace {
class CallAndMessageChecker
- : public Checker< check::PreStmt<CallExpr>,
- check::PreStmt<CXXDeleteExpr>,
- check::PreObjCMessage,
- check::ObjCMessageNil,
- check::PreCall > {
+ : public Checker<check::PreObjCMessage, check::ObjCMessageNil,
+ check::PreCall> {
mutable std::unique_ptr<BugType> BT_call_null;
mutable std::unique_ptr<BugType> BT_call_undef;
mutable std::unique_ptr<BugType> BT_cxx_call_null;
@@ -48,11 +47,37 @@ class CallAndMessageChecker
mutable std::unique_ptr<BugType> BT_call_few_args;
public:
- DefaultBool Check_CallAndMessageUnInitRefArg;
- CheckerNameRef CheckName_CallAndMessageUnInitRefArg;
+ // These correspond with the checker options. Looking at other checkers such
+ // as MallocChecker and CStringChecker, this is similar as to how they pull
+ // off having a modeling class, but emitting diagnostics under a smaller
+ // checker's name that can be safely disabled without disturbing the
+ // underlaying modeling engine.
+ // The reason behind having *checker options* rather then actual *checkers*
+ // here is that CallAndMessage is among the oldest checkers out there, and can
+ // be responsible for the majority of the reports on any given project. This
+ // is obviously not ideal, but changing checker name has the consequence of
+ // changing the issue hashes associated with the reports, and databases
+ // relying on this (CodeChecker, for instance) would suffer greatly.
+ // If we ever end up making changes to the issue hash generation algorithm, or
+ // the warning messages here, we should totally jump on the opportunity to
+ // convert these to actual checkers.
+ enum CheckKind {
+ CK_FunctionPointer,
+ CK_ParameterCount,
+ CK_CXXThisMethodCall,
+ CK_CXXDeallocationArg,
+ CK_ArgInitializedness,
+ CK_ArgPointeeInitializedness,
+ CK_NilReceiver,
+ CK_UndefReceiver,
+ CK_NumCheckKinds
+ };
+
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ // The original core.CallAndMessage checker name. This should rather be an
+ // array, as seen in MallocChecker and CStringChecker.
+ CheckerNameRef OriginalName;
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
/// Fill in the return value that results from messaging nil based on the
@@ -62,6 +87,25 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ ProgramStateRef checkFunctionPointerCall(const CallExpr *CE,
+ CheckerContext &C,
+ ProgramStateRef State) const;
+
+ ProgramStateRef checkCXXMethodCall(const CXXInstanceCall *CC,
+ CheckerContext &C,
+ ProgramStateRef State) const;
+
+ ProgramStateRef checkParameterCount(const CallEvent &Call, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ ProgramStateRef checkCXXDeallocation(const CXXDeallocatorCall *DC,
+ CheckerContext &C,
+ ProgramStateRef State) const;
+
+ ProgramStateRef checkArgInitializedness(const CallEvent &Call,
+ CheckerContext &C,
+ ProgramStateRef State) const;
+
private:
bool PreVisitProcessArg(CheckerContext &C, SVal V, SourceRange ArgRange,
const Expr *ArgEx, int ArgumentNumber,
@@ -79,7 +123,7 @@ private:
void LazyInit_BT(const char *desc, std::unique_ptr<BugType> &BT) const {
if (!BT)
- BT.reset(new BuiltinBug(this, desc));
+ BT.reset(new BuiltinBug(OriginalName, desc));
}
bool uninitRefOrPointer(CheckerContext &C, const SVal &V,
SourceRange ArgRange, const Expr *ArgEx,
@@ -144,7 +188,10 @@ bool CallAndMessageChecker::uninitRefOrPointer(
CheckerContext &C, const SVal &V, SourceRange ArgRange, const Expr *ArgEx,
std::unique_ptr<BugType> &BT, const ParmVarDecl *ParamDecl, const char *BD,
int ArgumentNumber) const {
- if (!Check_CallAndMessageUnInitRefArg)
+
+ // The pointee being uninitialized is a sign of code smell, not a bug, no need
+ // to sink here.
+ if (!ChecksEnabled[CK_ArgPointeeInitializedness])
return false;
// No parameter declaration available, i.e. variadic function argument.
@@ -246,6 +293,10 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
return true;
if (V.isUndef()) {
+ if (!ChecksEnabled[CK_ArgInitializedness]) {
+ C.addSink();
+ return true;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
LazyInit_BT(BD, BT);
// Generate a report for this bug.
@@ -272,6 +323,10 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
D->getStore());
if (F.Find(D->getRegion())) {
+ if (!ChecksEnabled[CK_ArgInitializedness]) {
+ C.addSink();
+ return true;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
LazyInit_BT(BD, BT);
SmallString<512> Str;
@@ -311,126 +366,158 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
return false;
}
-void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
- CheckerContext &C) const{
+ProgramStateRef CallAndMessageChecker::checkFunctionPointerCall(
+ const CallExpr *CE, CheckerContext &C, ProgramStateRef State) const {
const Expr *Callee = CE->getCallee()->IgnoreParens();
- ProgramStateRef State = C.getState();
const LocationContext *LCtx = C.getLocationContext();
SVal L = State->getSVal(Callee, LCtx);
if (L.isUndef()) {
+ if (!ChecksEnabled[CK_FunctionPointer]) {
+ C.addSink(State);
+ return nullptr;
+ }
if (!BT_call_undef)
BT_call_undef.reset(new BuiltinBug(
- this, "Called function pointer is an uninitialized pointer value"));
+ OriginalName,
+ "Called function pointer is an uninitialized pointer value"));
emitBadCall(BT_call_undef.get(), C, Callee);
- return;
+ return nullptr;
}
ProgramStateRef StNonNull, StNull;
std::tie(StNonNull, StNull) = State->assume(L.castAs<DefinedOrUnknownSVal>());
if (StNull && !StNonNull) {
+ if (!ChecksEnabled[CK_FunctionPointer]) {
+ C.addSink(StNull);
+ return nullptr;
+ }
if (!BT_call_null)
BT_call_null.reset(new BuiltinBug(
- this, "Called function pointer is null (null dereference)"));
+ OriginalName, "Called function pointer is null (null dereference)"));
emitBadCall(BT_call_null.get(), C, Callee);
- return;
+ return nullptr;
}
- C.addTransition(StNonNull);
+ return StNonNull;
}
-void CallAndMessageChecker::checkPreStmt(const CXXDeleteExpr *DE,
- CheckerContext &C) const {
+ProgramStateRef CallAndMessageChecker::checkParameterCount(
+ const CallEvent &Call, CheckerContext &C, ProgramStateRef State) const {
- SVal Arg = C.getSVal(DE->getArgument());
- if (Arg.isUndef()) {
- StringRef Desc;
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- return;
- if (!BT_cxx_delete_undef)
- BT_cxx_delete_undef.reset(
- new BuiltinBug(this, "Uninitialized argument value"));
- if (DE->isArrayFormAsWritten())
- Desc = "Argument to 'delete[]' is uninitialized";
- else
- Desc = "Argument to 'delete' is uninitialized";
- BugType *BT = BT_cxx_delete_undef.get();
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, Desc, N);
- bugreporter::trackExpressionValue(N, DE, *R);
- C.emitReport(std::move(R));
- return;
+ // If we have a function or block declaration, we can make sure we pass
+ // enough parameters.
+ unsigned Params = Call.parameters().size();
+ if (Call.getNumArgs() >= Params)
+ return State;
+
+ if (!ChecksEnabled[CK_ParameterCount]) {
+ C.addSink(State);
+ return nullptr;
+ }
+
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return nullptr;
+
+ LazyInit_BT("Function call with too few arguments", BT_call_few_args);
+
+ SmallString<512> Str;
+ llvm::raw_svector_ostream os(Str);
+ if (isa<AnyFunctionCall>(Call)) {
+ os << "Function ";
+ } else {
+ assert(isa<BlockCall>(Call));
+ os << "Block ";
}
+ os << "taking " << Params << " argument" << (Params == 1 ? "" : "s")
+ << " is called with fewer (" << Call.getNumArgs() << ")";
+
+ C.emitReport(
+ std::make_unique<PathSensitiveBugReport>(*BT_call_few_args, os.str(), N));
+ return nullptr;
}
-void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
+ProgramStateRef CallAndMessageChecker::checkCXXMethodCall(
+ const CXXInstanceCall *CC, CheckerContext &C, ProgramStateRef State) const {
- // If this is a call to a C++ method, check if the callee is null or
- // undefined.
- if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
- SVal V = CC->getCXXThisVal();
- if (V.isUndef()) {
- if (!BT_cxx_call_undef)
- BT_cxx_call_undef.reset(
- new BuiltinBug(this, "Called C++ object pointer is uninitialized"));
- emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
- return;
+ SVal V = CC->getCXXThisVal();
+ if (V.isUndef()) {
+ if (!ChecksEnabled[CK_CXXThisMethodCall]) {
+ C.addSink(State);
+ return nullptr;
}
+ if (!BT_cxx_call_undef)
+ BT_cxx_call_undef.reset(new BuiltinBug(
+ OriginalName, "Called C++ object pointer is uninitialized"));
+ emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
+ return nullptr;
+ }
- ProgramStateRef StNonNull, StNull;
- std::tie(StNonNull, StNull) =
- State->assume(V.castAs<DefinedOrUnknownSVal>());
+ ProgramStateRef StNonNull, StNull;
+ std::tie(StNonNull, StNull) = State->assume(V.castAs<DefinedOrUnknownSVal>());
- if (StNull && !StNonNull) {
- if (!BT_cxx_call_null)
- BT_cxx_call_null.reset(
- new BuiltinBug(this, "Called C++ object pointer is null"));
- emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
- return;
+ if (StNull && !StNonNull) {
+ if (!ChecksEnabled[CK_CXXThisMethodCall]) {
+ C.addSink(StNull);
+ return nullptr;
}
-
- State = StNonNull;
+ if (!BT_cxx_call_null)
+ BT_cxx_call_null.reset(
+ new BuiltinBug(OriginalName, "Called C++ object pointer is null"));
+ emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
+ return nullptr;
}
- const Decl *D = Call.getDecl();
- if (D && (isa<FunctionDecl>(D) || isa<BlockDecl>(D))) {
- // If we have a function or block declaration, we can make sure we pass
- // enough parameters.
- unsigned Params = Call.parameters().size();
- if (Call.getNumArgs() < Params) {
- ExplodedNode *N = C.generateErrorNode();
- if (!N)
- return;
-
- LazyInit_BT("Function call with too few arguments", BT_call_few_args);
-
- SmallString<512> Str;
- llvm::raw_svector_ostream os(Str);
- if (isa<FunctionDecl>(D)) {
- os << "Function ";
- } else {
- assert(isa<BlockDecl>(D));
- os << "Block ";
- }
- os << "taking " << Params << " argument"
- << (Params == 1 ? "" : "s") << " is called with fewer ("
- << Call.getNumArgs() << ")";
+ return StNonNull;
+}
- C.emitReport(std::make_unique<PathSensitiveBugReport>(*BT_call_few_args,
- os.str(), N));
- }
+ProgramStateRef
+CallAndMessageChecker::checkCXXDeallocation(const CXXDeallocatorCall *DC,
+ CheckerContext &C,
+ ProgramStateRef State) const {
+ const CXXDeleteExpr *DE = DC->getOriginExpr();
+ assert(DE);
+ SVal Arg = C.getSVal(DE->getArgument());
+ if (!Arg.isUndef())
+ return State;
+
+ if (!ChecksEnabled[CK_CXXDeallocationArg]) {
+ C.addSink(State);
+ return nullptr;
}
+ StringRef Desc;
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return nullptr;
+ if (!BT_cxx_delete_undef)
+ BT_cxx_delete_undef.reset(
+ new BuiltinBug(OriginalName, "Uninitialized argument value"));
+ if (DE->isArrayFormAsWritten())
+ Desc = "Argument to 'delete[]' is uninitialized";
+ else
+ Desc = "Argument to 'delete' is uninitialized";
+ BugType *BT = BT_cxx_delete_undef.get();
+ auto R = std::make_unique<PathSensitiveBugReport>(*BT, Desc, N);
+ bugreporter::trackExpressionValue(N, DE, *R);
+ C.emitReport(std::move(R));
+ return nullptr;
+}
+
+ProgramStateRef CallAndMessageChecker::checkArgInitializedness(
+ const CallEvent &Call, CheckerContext &C, ProgramStateRef State) const {
+
+ const Decl *D = Call.getDecl();
+
// Don't check for uninitialized field values in arguments if the
// caller has a body that is available and we have the chance to inline it.
// This is a hack, but is a reasonable compromise betweens sometimes warning
// and sometimes not depending on if we decide to inline a function.
const bool checkUninitFields =
- !(C.getAnalysisManager().shouldInlineCall() && (D && D->getBody()));
+ !(C.getAnalysisManager().shouldInlineCall() && (D && D->getBody()));
std::unique_ptr<BugType> *BT;
if (isa<ObjCMethodCall>(Call))
@@ -441,13 +528,45 @@ void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
for (unsigned i = 0, e = Call.getNumArgs(); i != e; ++i) {
const ParmVarDecl *ParamDecl = nullptr;
- if(FD && i < FD->getNumParams())
+ if (FD && i < FD->getNumParams())
ParamDecl = FD->getParamDecl(i);
if (PreVisitProcessArg(C, Call.getArgSVal(i), Call.getArgSourceRange(i),
- Call.getArgExpr(i), i,
- checkUninitFields, Call, *BT, ParamDecl))
- return;
+ Call.getArgExpr(i), i, checkUninitFields, Call, *BT,
+ ParamDecl))
+ return nullptr;
}
+ return State;
+}
+
+void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ if (const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr()))
+ State = checkFunctionPointerCall(CE, C, State);
+
+ if (!State)
+ return;
+
+ if (Call.getDecl())
+ State = checkParameterCount(Call, C, State);
+
+ if (!State)
+ return;
+
+ if (const auto *CC = dyn_cast<CXXInstanceCall>(&Call))
+ State = checkCXXMethodCall(CC, C, State);
+
+ if (!State)
+ return;
+
+ if (const auto *DC = dyn_cast<CXXDeallocatorCall>(&Call))
+ State = checkCXXDeallocation(DC, C, State);
+
+ if (!State)
+ return;
+
+ State = checkArgInitializedness(Call, C, State);
// If we make it here, record our assumptions about the callee.
C.addTransition(State);
@@ -457,12 +576,16 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
SVal recVal = msg.getReceiverSVal();
if (recVal.isUndef()) {
+ if (!ChecksEnabled[CK_UndefReceiver]) {
+ C.addSink();
+ return;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
BugType *BT = nullptr;
switch (msg.getMessageKind()) {
case OCM_Message:
if (!BT_msg_undef)
- BT_msg_undef.reset(new BuiltinBug(this,
+ BT_msg_undef.reset(new BuiltinBug(OriginalName,
"Receiver in message expression "
"is an uninitialized value"));
BT = BT_msg_undef.get();
@@ -470,13 +593,15 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
case OCM_PropertyAccess:
if (!BT_objc_prop_undef)
BT_objc_prop_undef.reset(new BuiltinBug(
- this, "Property access on an uninitialized object pointer"));
+ OriginalName,
+ "Property access on an uninitialized object pointer"));
BT = BT_objc_prop_undef.get();
break;
case OCM_Subscript:
if (!BT_objc_subscript_undef)
BT_objc_subscript_undef.reset(new BuiltinBug(
- this, "Subscript access on an uninitialized object pointer"));
+ OriginalName,
+ "Subscript access on an uninitialized object pointer"));
BT = BT_objc_subscript_undef.get();
break;
}
@@ -503,10 +628,14 @@ void CallAndMessageChecker::checkObjCMessageNil(const ObjCMethodCall &msg,
void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
const ObjCMethodCall &msg,
ExplodedNode *N) const {
+ if (!ChecksEnabled[CK_NilReceiver]) {
+ C.addSink();
+ return;
+ }
if (!BT_msg_ret)
- BT_msg_ret.reset(
- new BuiltinBug(this, "Receiver in message expression is 'nil'"));
+ BT_msg_ret.reset(new BuiltinBug(OriginalName,
+ "Receiver in message expression is 'nil'"));
const ObjCMessageExpr *ME = msg.getOriginExpr();
@@ -601,20 +730,34 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
C.addTransition(state);
}
-void ento::registerCallAndMessageChecker(CheckerManager &mgr) {
+void ento::registerCallAndMessageModeling(CheckerManager &mgr) {
mgr.registerChecker<CallAndMessageChecker>();
}
-bool ento::shouldRegisterCallAndMessageChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCallAndMessageModeling(const CheckerManager &mgr) {
return true;
}
-void ento::registerCallAndMessageUnInitRefArg(CheckerManager &mgr) {
- CallAndMessageChecker *Checker = mgr.getChecker<CallAndMessageChecker>();
- Checker->Check_CallAndMessageUnInitRefArg = true;
- Checker->CheckName_CallAndMessageUnInitRefArg = mgr.getCurrentCheckerName();
+void ento::registerCallAndMessageChecker(CheckerManager &mgr) {
+ CallAndMessageChecker *checker = mgr.getChecker<CallAndMessageChecker>();
+
+ checker->OriginalName = mgr.getCurrentCheckerName();
+
+#define QUERY_CHECKER_OPTION(OPTION) \
+ checker->ChecksEnabled[CallAndMessageChecker::CK_##OPTION] = \
+ mgr.getAnalyzerOptions().getCheckerBooleanOption( \
+ mgr.getCurrentCheckerName(), #OPTION);
+
+ QUERY_CHECKER_OPTION(FunctionPointer)
+ QUERY_CHECKER_OPTION(ParameterCount)
+ QUERY_CHECKER_OPTION(CXXThisMethodCall)
+ QUERY_CHECKER_OPTION(CXXDeallocationArg)
+ QUERY_CHECKER_OPTION(ArgInitializedness)
+ QUERY_CHECKER_OPTION(ArgPointeeInitializedness)
+ QUERY_CHECKER_OPTION(NilReceiver)
+ QUERY_CHECKER_OPTION(UndefReceiver)
}
-bool ento::shouldRegisterCallAndMessageUnInitRefArg(const LangOptions &LO) {
+bool ento::shouldRegisterCallAndMessageChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 51c1d4409929..a498f252e693 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -10,12 +10,14 @@
// whether the size of the symbolic region is a multiple of the size of T.
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+
#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
using namespace clang;
using namespace ento;
@@ -109,12 +111,13 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
return;
SValBuilder &svalBuilder = C.getSValBuilder();
- SVal extent = SR->getExtent(svalBuilder);
- const llvm::APSInt *extentInt = svalBuilder.getKnownValue(state, extent);
- if (!extentInt)
+
+ DefinedOrUnknownSVal Size = getDynamicSize(state, SR, svalBuilder);
+ const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
+ if (!SizeInt)
return;
- CharUnits regionSize = CharUnits::fromQuantity(extentInt->getSExtValue());
+ CharUnits regionSize = CharUnits::fromQuantity(SizeInt->getZExtValue());
CharUnits typeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
// Ignore void, and a few other un-sizeable types.
@@ -143,10 +146,11 @@ void ento::registerCastSizeChecker(CheckerManager &mgr) {
mgr.registerChecker<CastSizeChecker>();
}
-bool ento::shouldRegisterCastSizeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCastSizeChecker(const CheckerManager &mgr) {
// PR31226: C++ is more complicated than what this checker currently supports.
// There are derived-to-base casts, there are different rules for 0-size
// structures, no flexible arrays, etc.
// FIXME: Disabled on C++ for now.
+ const LangOptions &LO = mgr.getLangOpts();
return !LO.CPlusPlus;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index 93665596be29..e674ec43bcd9 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -120,6 +120,6 @@ void ento::registerCastToStructChecker(CheckerManager &mgr) {
mgr.registerChecker<CastToStructChecker>();
}
-bool ento::shouldRegisterCastToStructChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCastToStructChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
index cc1c9a66b90e..1ef70b650414 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
@@ -30,7 +30,7 @@ using namespace clang;
using namespace ento;
namespace {
-class CastValueChecker : public Checker<eval::Call> {
+class CastValueChecker : public Checker<check::DeadSymbols, eval::Call> {
enum class CallKind { Function, Method, InstanceOf };
using CastCheck =
@@ -51,6 +51,7 @@ public:
// 1) isa: The parameter is non-null, returns boolean.
// 2) isa_and_nonnull: The parameter is null or non-null, returns boolean.
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
private:
// These are known in the LLVM project. The pairs are in the following form:
@@ -129,7 +130,7 @@ static const NoteTag *getNoteTag(CheckerContext &C,
Out << ' ' << (CastSucceeds ? "is a" : "is not a") << " '" << CastToName
<< '\'';
- return Out.str();
+ return std::string(Out.str());
},
/*IsPrunable=*/true);
}
@@ -432,10 +433,15 @@ bool CastValueChecker::evalCall(const CallEvent &Call,
return true;
}
+void CastValueChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ C.addTransition(removeDeadCasts(C.getState(), SR));
+}
+
void ento::registerCastValueChecker(CheckerManager &Mgr) {
Mgr.registerChecker<CastValueChecker>();
}
-bool ento::shouldRegisterCastValueChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCastValueChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 50b872bd8682..13836f08a61e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -1088,7 +1088,8 @@ void ento::registerObjCDeallocChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCDeallocChecker>();
}
-bool ento::shouldRegisterObjCDeallocChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCDeallocChecker(const CheckerManager &mgr) {
// These checker only makes sense under MRR.
+ const LangOptions &LO = mgr.getLangOpts();
return LO.getGC() != LangOptions::GCOnly && !LO.ObjCAutoRefCount;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
index 1694c237cda4..175dfcef0df4 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -138,6 +138,6 @@ void ento::registerObjCMethSigsChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCMethSigsChecker>();
}
-bool ento::shouldRegisterObjCMethSigsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCMethSigsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
index 48fee4a0ffb7..dc9cd717be9e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
@@ -1,6 +1,19 @@
+//==- CheckPlacementNew.cpp - Check for placement new operation --*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a check for misuse of the default placement new operator.
+//
+//===----------------------------------------------------------------------===//
+
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "llvm/Support/FormatVariadic.h"
using namespace clang;
@@ -12,51 +25,59 @@ public:
void checkPreStmt(const CXXNewExpr *NE, CheckerContext &C) const;
private:
+ bool checkPlaceCapacityIsSufficient(const CXXNewExpr *NE,
+ CheckerContext &C) const;
+
+ bool checkPlaceIsAlignedProperly(const CXXNewExpr *NE,
+ CheckerContext &C) const;
+
// Returns the size of the target in a placement new expression.
// E.g. in "new (&s) long" it returns the size of `long`.
- SVal getExtentSizeOfNewTarget(const CXXNewExpr *NE, ProgramStateRef State,
- CheckerContext &C) const;
+ SVal getExtentSizeOfNewTarget(const CXXNewExpr *NE, CheckerContext &C,
+ bool &IsArray) const;
// Returns the size of the place in a placement new expression.
// E.g. in "new (&s) long" it returns the size of `s`.
- SVal getExtentSizeOfPlace(const Expr *NE, ProgramStateRef State,
- CheckerContext &C) const;
- BugType BT{this, "Insufficient storage for placement new",
- categories::MemoryError};
+ SVal getExtentSizeOfPlace(const CXXNewExpr *NE, CheckerContext &C) const;
+
+ void emitBadAlignReport(const Expr *P, CheckerContext &C,
+ unsigned AllocatedTAlign,
+ unsigned StorageTAlign) const;
+ unsigned getStorageAlign(CheckerContext &C, const ValueDecl *VD) const;
+
+ void checkElementRegionAlign(const ElementRegion *R, CheckerContext &C,
+ const Expr *P, unsigned AllocatedTAlign) const;
+
+ void checkFieldRegionAlign(const FieldRegion *R, CheckerContext &C,
+ const Expr *P, unsigned AllocatedTAlign) const;
+
+ bool isVarRegionAlignedProperly(const VarRegion *R, CheckerContext &C,
+ const Expr *P,
+ unsigned AllocatedTAlign) const;
+
+ BugType SBT{this, "Insufficient storage for placement new",
+ categories::MemoryError};
+ BugType ABT{this, "Bad align storage for placement new",
+ categories::MemoryError};
};
} // namespace
-SVal PlacementNewChecker::getExtentSizeOfPlace(const Expr *Place,
- ProgramStateRef State,
+SVal PlacementNewChecker::getExtentSizeOfPlace(const CXXNewExpr *NE,
CheckerContext &C) const {
- const MemRegion *MRegion = C.getSVal(Place).getAsRegion();
- if (!MRegion)
- return UnknownVal();
- RegionOffset Offset = MRegion->getAsOffset();
- if (Offset.hasSymbolicOffset())
- return UnknownVal();
- const MemRegion *BaseRegion = MRegion->getBaseRegion();
- if (!BaseRegion)
- return UnknownVal();
-
- SValBuilder &SvalBuilder = C.getSValBuilder();
- NonLoc OffsetInBytes = SvalBuilder.makeArrayIndex(
- Offset.getOffset() / C.getASTContext().getCharWidth());
- DefinedOrUnknownSVal ExtentInBytes =
- BaseRegion->castAs<SubRegion>()->getExtent(SvalBuilder);
-
- return SvalBuilder.evalBinOp(State, BinaryOperator::Opcode::BO_Sub,
- ExtentInBytes, OffsetInBytes,
- SvalBuilder.getArrayIndexType());
+ const Expr *Place = NE->getPlacementArg(0);
+ return getDynamicSizeWithOffset(C.getState(), C.getSVal(Place));
}
SVal PlacementNewChecker::getExtentSizeOfNewTarget(const CXXNewExpr *NE,
- ProgramStateRef State,
- CheckerContext &C) const {
+ CheckerContext &C,
+ bool &IsArray) const {
+ ProgramStateRef State = C.getState();
SValBuilder &SvalBuilder = C.getSValBuilder();
QualType ElementType = NE->getAllocatedType();
ASTContext &AstContext = C.getASTContext();
CharUnits TypeSize = AstContext.getTypeSizeInChars(ElementType);
+ IsArray = false;
if (NE->isArray()) {
+ IsArray = true;
const Expr *SizeExpr = *NE->getArraySize();
SVal ElementCount = C.getSVal(SizeExpr);
if (auto ElementCountNL = ElementCount.getAs<NonLoc>()) {
@@ -78,44 +99,218 @@ SVal PlacementNewChecker::getExtentSizeOfNewTarget(const CXXNewExpr *NE,
return UnknownVal();
}
-void PlacementNewChecker::checkPreStmt(const CXXNewExpr *NE,
- CheckerContext &C) const {
- // Check only the default placement new.
- if (!NE->getOperatorNew()->isReservedGlobalPlacementOperator())
- return;
- if (NE->getNumPlacementArgs() == 0)
- return;
-
- ProgramStateRef State = C.getState();
- SVal SizeOfTarget = getExtentSizeOfNewTarget(NE, State, C);
- const Expr *Place = NE->getPlacementArg(0);
- SVal SizeOfPlace = getExtentSizeOfPlace(Place, State, C);
+bool PlacementNewChecker::checkPlaceCapacityIsSufficient(
+ const CXXNewExpr *NE, CheckerContext &C) const {
+ bool IsArrayTypeAllocated;
+ SVal SizeOfTarget = getExtentSizeOfNewTarget(NE, C, IsArrayTypeAllocated);
+ SVal SizeOfPlace = getExtentSizeOfPlace(NE, C);
const auto SizeOfTargetCI = SizeOfTarget.getAs<nonloc::ConcreteInt>();
if (!SizeOfTargetCI)
- return;
+ return true;
const auto SizeOfPlaceCI = SizeOfPlace.getAs<nonloc::ConcreteInt>();
if (!SizeOfPlaceCI)
- return;
+ return true;
- if (SizeOfPlaceCI->getValue() < SizeOfTargetCI->getValue()) {
- if (ExplodedNode *N = C.generateErrorNode(State)) {
- std::string Msg =
- llvm::formatv("Storage provided to placement new is only {0} bytes, "
- "whereas the allocated type requires {1} bytes",
- SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue());
+ if ((SizeOfPlaceCI->getValue() < SizeOfTargetCI->getValue()) ||
+ (IsArrayTypeAllocated &&
+ SizeOfPlaceCI->getValue() >= SizeOfTargetCI->getValue())) {
+ if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
+ std::string Msg;
+ // TODO: use clang constant
+ if (IsArrayTypeAllocated &&
+ SizeOfPlaceCI->getValue() > SizeOfTargetCI->getValue())
+ Msg = std::string(llvm::formatv(
+ "{0} bytes is possibly not enough for array allocation which "
+ "requires {1} bytes. Current overhead requires the size of {2} "
+ "bytes",
+ SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue(),
+ SizeOfPlaceCI->getValue() - SizeOfTargetCI->getValue()));
+ else if (IsArrayTypeAllocated &&
+ SizeOfPlaceCI->getValue() == SizeOfTargetCI->getValue())
+ Msg = std::string(llvm::formatv(
+ "Storage provided to placement new is only {0} bytes, "
+ "whereas the allocated array type requires more space for "
+ "internal needs",
+ SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue()));
+ else
+ Msg = std::string(llvm::formatv(
+ "Storage provided to placement new is only {0} bytes, "
+ "whereas the allocated type requires {1} bytes",
+ SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue()));
- auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
- bugreporter::trackExpressionValue(N, Place, *R);
+ auto R = std::make_unique<PathSensitiveBugReport>(SBT, Msg, N);
+ bugreporter::trackExpressionValue(N, NE->getPlacementArg(0), *R);
C.emitReport(std::move(R));
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void PlacementNewChecker::emitBadAlignReport(const Expr *P, CheckerContext &C,
+ unsigned AllocatedTAlign,
+ unsigned StorageTAlign) const {
+ ProgramStateRef State = C.getState();
+ if (ExplodedNode *N = C.generateErrorNode(State)) {
+ std::string Msg(llvm::formatv("Storage type is aligned to {0} bytes but "
+ "allocated type is aligned to {1} bytes",
+ StorageTAlign, AllocatedTAlign));
+
+ auto R = std::make_unique<PathSensitiveBugReport>(ABT, Msg, N);
+ bugreporter::trackExpressionValue(N, P, *R);
+ C.emitReport(std::move(R));
+ }
+}
+
+unsigned PlacementNewChecker::getStorageAlign(CheckerContext &C,
+ const ValueDecl *VD) const {
+ unsigned StorageTAlign = C.getASTContext().getTypeAlign(VD->getType());
+ if (unsigned SpecifiedAlignment = VD->getMaxAlignment())
+ StorageTAlign = SpecifiedAlignment;
+
+ return StorageTAlign / C.getASTContext().getCharWidth();
+}
+
+void PlacementNewChecker::checkElementRegionAlign(
+ const ElementRegion *R, CheckerContext &C, const Expr *P,
+ unsigned AllocatedTAlign) const {
+ auto IsBaseRegionAlignedProperly = [this, R, &C, P,
+ AllocatedTAlign]() -> bool {
+ // Unwind nested ElementRegion`s to get the type.
+ const MemRegion *SuperRegion = R;
+ while (true) {
+ if (SuperRegion->getKind() == MemRegion::ElementRegionKind) {
+ SuperRegion = cast<SubRegion>(SuperRegion)->getSuperRegion();
+ continue;
+ }
+
+ break;
+ }
+
+ const DeclRegion *TheElementDeclRegion = SuperRegion->getAs<DeclRegion>();
+ if (!TheElementDeclRegion)
+ return false;
+
+ const DeclRegion *BaseDeclRegion = R->getBaseRegion()->getAs<DeclRegion>();
+ if (!BaseDeclRegion)
+ return false;
+
+ unsigned BaseRegionAlign = 0;
+ // We must use alignment TheElementDeclRegion if it has its own alignment
+ // specifier
+ if (TheElementDeclRegion->getDecl()->getMaxAlignment())
+ BaseRegionAlign = getStorageAlign(C, TheElementDeclRegion->getDecl());
+ else
+ BaseRegionAlign = getStorageAlign(C, BaseDeclRegion->getDecl());
+
+ if (AllocatedTAlign > BaseRegionAlign) {
+ emitBadAlignReport(P, C, AllocatedTAlign, BaseRegionAlign);
+ return false;
+ }
+
+ return true;
+ };
+
+ auto CheckElementRegionOffset = [this, R, &C, P, AllocatedTAlign]() -> void {
+ RegionOffset TheOffsetRegion = R->getAsOffset();
+ if (TheOffsetRegion.hasSymbolicOffset())
return;
+
+ unsigned Offset =
+ TheOffsetRegion.getOffset() / C.getASTContext().getCharWidth();
+ unsigned AddressAlign = Offset % AllocatedTAlign;
+ if (AddressAlign != 0) {
+ emitBadAlignReport(P, C, AllocatedTAlign, AddressAlign);
+ return;
+ }
+ };
+
+ if (IsBaseRegionAlignedProperly()) {
+ CheckElementRegionOffset();
+ }
+}
+
+void PlacementNewChecker::checkFieldRegionAlign(
+ const FieldRegion *R, CheckerContext &C, const Expr *P,
+ unsigned AllocatedTAlign) const {
+ const MemRegion *BaseRegion = R->getBaseRegion();
+ if (!BaseRegion)
+ return;
+
+ if (const VarRegion *TheVarRegion = BaseRegion->getAs<VarRegion>()) {
+ if (isVarRegionAlignedProperly(TheVarRegion, C, P, AllocatedTAlign)) {
+ // We've checked type align but, unless FieldRegion
+ // offset is zero, we also need to check its own
+ // align.
+ RegionOffset Offset = R->getAsOffset();
+ if (Offset.hasSymbolicOffset())
+ return;
+
+ int64_t OffsetValue =
+ Offset.getOffset() / C.getASTContext().getCharWidth();
+ unsigned AddressAlign = OffsetValue % AllocatedTAlign;
+ if (AddressAlign != 0)
+ emitBadAlignReport(P, C, AllocatedTAlign, AddressAlign);
}
}
}
+bool PlacementNewChecker::isVarRegionAlignedProperly(
+ const VarRegion *R, CheckerContext &C, const Expr *P,
+ unsigned AllocatedTAlign) const {
+ const VarDecl *TheVarDecl = R->getDecl();
+ unsigned StorageTAlign = getStorageAlign(C, TheVarDecl);
+ if (AllocatedTAlign > StorageTAlign) {
+ emitBadAlignReport(P, C, AllocatedTAlign, StorageTAlign);
+
+ return false;
+ }
+
+ return true;
+}
+
+bool PlacementNewChecker::checkPlaceIsAlignedProperly(const CXXNewExpr *NE,
+ CheckerContext &C) const {
+ const Expr *Place = NE->getPlacementArg(0);
+
+ QualType AllocatedT = NE->getAllocatedType();
+ unsigned AllocatedTAlign = C.getASTContext().getTypeAlign(AllocatedT) /
+ C.getASTContext().getCharWidth();
+
+ SVal PlaceVal = C.getSVal(Place);
+ if (const MemRegion *MRegion = PlaceVal.getAsRegion()) {
+ if (const ElementRegion *TheElementRegion = MRegion->getAs<ElementRegion>())
+ checkElementRegionAlign(TheElementRegion, C, Place, AllocatedTAlign);
+ else if (const FieldRegion *TheFieldRegion = MRegion->getAs<FieldRegion>())
+ checkFieldRegionAlign(TheFieldRegion, C, Place, AllocatedTAlign);
+ else if (const VarRegion *TheVarRegion = MRegion->getAs<VarRegion>())
+ isVarRegionAlignedProperly(TheVarRegion, C, Place, AllocatedTAlign);
+ }
+
+ return true;
+}
+
+void PlacementNewChecker::checkPreStmt(const CXXNewExpr *NE,
+ CheckerContext &C) const {
+ // Check only the default placement new.
+ if (!NE->getOperatorNew()->isReservedGlobalPlacementOperator())
+ return;
+
+ if (NE->getNumPlacementArgs() == 0)
+ return;
+
+ if (!checkPlaceCapacityIsSufficient(NE, C))
+ return;
+
+ checkPlaceIsAlignedProperly(NE, C);
+}
+
void ento::registerPlacementNewChecker(CheckerManager &mgr) {
mgr.registerChecker<PlacementNewChecker>();
}
-bool ento::shouldRegisterPlacementNewChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPlacementNewChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index d9ffa562c0aa..d06c87631bfb 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -1076,7 +1076,7 @@ void ento::registerSecuritySyntaxChecker(CheckerManager &mgr) {
mgr.registerChecker<SecuritySyntaxChecker>();
}
-bool ento::shouldRegisterSecuritySyntaxChecker(const LangOptions &LO) {
+bool ento::shouldRegisterSecuritySyntaxChecker(const CheckerManager &mgr) {
return true;
}
@@ -1087,7 +1087,7 @@ bool ento::shouldRegisterSecuritySyntaxChecker(const LangOptions &LO) {
checker->filter.checkName_##name = mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { return true; }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(bcmp)
REGISTER_CHECKER(bcopy)
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
index ec401cfa8985..0d2551f11583 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -91,6 +91,6 @@ void ento::registerSizeofPointerChecker(CheckerManager &mgr) {
mgr.registerChecker<SizeofPointerChecker>();
}
-bool ento::shouldRegisterSizeofPointerChecker(const LangOptions &LO) {
+bool ento::shouldRegisterSizeofPointerChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index 7a41a7b6b216..fd53c04f4bbf 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -136,6 +136,6 @@ void ento::registerChrootChecker(CheckerManager &mgr) {
mgr.registerChecker<ChrootChecker>();
}
-bool ento::shouldRegisterChrootChecker(const LangOptions &LO) {
+bool ento::shouldRegisterChrootChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
index ce45b5be34c9..7968aed85e1b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
@@ -208,6 +208,6 @@ void ento::registerCloneChecker(CheckerManager &Mgr) {
.getCheckerStringOption(Checker, "IgnoredFilesPattern");
}
-bool ento::shouldRegisterCloneChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCloneChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
new file mode 100644
index 000000000000..73c6517fd0eb
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
@@ -0,0 +1,1083 @@
+//===-- ContainerModeling.cpp -------------------------------------*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a modeling-checker for modeling STL container-like containers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
+
+#include "Iterator.h"
+
+#include <utility>
+
+using namespace clang;
+using namespace ento;
+using namespace iterator;
+
+namespace {
+
+class ContainerModeling
+ : public Checker<check::PostCall, check::LiveSymbols, check::DeadSymbols> {
+
+ void handleBegin(CheckerContext &C, const Expr *CE, SVal RetVal,
+ SVal Cont) const;
+ void handleEnd(CheckerContext &C, const Expr *CE, SVal RetVal,
+ SVal Cont) const;
+ void handleAssignment(CheckerContext &C, SVal Cont, const Expr *CE = nullptr,
+ SVal OldCont = UndefinedVal()) const;
+ void handleAssign(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handleClear(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handlePushBack(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handlePopBack(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handlePushFront(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handlePopFront(CheckerContext &C, SVal Cont, const Expr *ContE) const;
+ void handleInsert(CheckerContext &C, SVal Cont, SVal Iter) const;
+ void handleErase(CheckerContext &C, SVal Cont, SVal Iter) const;
+ void handleErase(CheckerContext &C, SVal Cont, SVal Iter1, SVal Iter2) const;
+ void handleEraseAfter(CheckerContext &C, SVal Cont, SVal Iter) const;
+ void handleEraseAfter(CheckerContext &C, SVal Cont, SVal Iter1,
+ SVal Iter2) const;
+ const NoteTag *getChangeTag(CheckerContext &C, StringRef Text,
+ const MemRegion *ContReg,
+ const Expr *ContE) const;
+ void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const char *Sep) const override;
+
+public:
+ ContainerModeling() = default;
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+
+ using NoItParamFn = void (ContainerModeling::*)(CheckerContext &, SVal,
+ const Expr *) const;
+ using OneItParamFn = void (ContainerModeling::*)(CheckerContext &, SVal,
+ SVal) const;
+ using TwoItParamFn = void (ContainerModeling::*)(CheckerContext &, SVal, SVal,
+ SVal) const;
+
+ CallDescriptionMap<NoItParamFn> NoIterParamFunctions = {
+ {{0, "clear", 0},
+ &ContainerModeling::handleClear},
+ {{0, "assign", 2},
+ &ContainerModeling::handleAssign},
+ {{0, "push_back", 1},
+ &ContainerModeling::handlePushBack},
+ {{0, "emplace_back", 1},
+ &ContainerModeling::handlePushBack},
+ {{0, "pop_back", 0},
+ &ContainerModeling::handlePopBack},
+ {{0, "push_front", 1},
+ &ContainerModeling::handlePushFront},
+ {{0, "emplace_front", 1},
+ &ContainerModeling::handlePushFront},
+ {{0, "pop_front", 0},
+ &ContainerModeling::handlePopFront},
+ };
+
+ CallDescriptionMap<OneItParamFn> OneIterParamFunctions = {
+ {{0, "insert", 2},
+ &ContainerModeling::handleInsert},
+ {{0, "emplace", 2},
+ &ContainerModeling::handleInsert},
+ {{0, "erase", 1},
+ &ContainerModeling::handleErase},
+ {{0, "erase_after", 1},
+ &ContainerModeling::handleEraseAfter},
+ };
+
+ CallDescriptionMap<TwoItParamFn> TwoIterParamFunctions = {
+ {{0, "erase", 2},
+ &ContainerModeling::handleErase},
+ {{0, "erase_after", 2},
+ &ContainerModeling::handleEraseAfter},
+ };
+
+};
+
+bool isBeginCall(const FunctionDecl *Func);
+bool isEndCall(const FunctionDecl *Func);
+bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg);
+bool frontModifiable(ProgramStateRef State, const MemRegion *Reg);
+bool backModifiable(ProgramStateRef State, const MemRegion *Reg);
+SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont);
+SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont);
+ProgramStateRef createContainerBegin(ProgramStateRef State,
+ const MemRegion *Cont, const Expr *E,
+ QualType T, const LocationContext *LCtx,
+ unsigned BlockCount);
+ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
+ const Expr *E, QualType T,
+ const LocationContext *LCtx,
+ unsigned BlockCount);
+ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
+ const ContainerData &CData);
+ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont);
+ProgramStateRef
+invalidateAllIteratorPositionsExcept(ProgramStateRef State,
+ const MemRegion *Cont, SymbolRef Offset,
+ BinaryOperator::Opcode Opc);
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc);
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset1,
+ BinaryOperator::Opcode Opc1,
+ SymbolRef Offset2,
+ BinaryOperator::Opcode Opc2);
+ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont);
+ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc);
+ProgramStateRef rebaseSymbolInIteratorPositionsIf(
+ ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
+ SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc);
+SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB, SymbolRef Expr,
+ SymbolRef OldSym, SymbolRef NewSym);
+bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont);
+
+} // namespace
+
+void ContainerModeling::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *Func = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!Func)
+ return;
+
+ if (Func->isOverloadedOperator()) {
+ const auto Op = Func->getOverloadedOperator();
+ if (Op == OO_Equal) {
+ // Overloaded 'operator=' must be a non-static member function.
+ const auto *InstCall = cast<CXXInstanceCall>(&Call);
+ if (cast<CXXMethodDecl>(Func)->isMoveAssignmentOperator()) {
+ handleAssignment(C, InstCall->getCXXThisVal(), Call.getOriginExpr(),
+ Call.getArgSVal(0));
+ return;
+ }
+
+ handleAssignment(C, InstCall->getCXXThisVal());
+ return;
+ }
+ } else {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ const NoItParamFn *Handler0 = NoIterParamFunctions.lookup(Call);
+ if (Handler0) {
+ (this->**Handler0)(C, InstCall->getCXXThisVal(),
+ InstCall->getCXXThisExpr());
+ return;
+ }
+
+ const OneItParamFn *Handler1 = OneIterParamFunctions.lookup(Call);
+ if (Handler1) {
+ (this->**Handler1)(C, InstCall->getCXXThisVal(), Call.getArgSVal(0));
+ return;
+ }
+
+ const TwoItParamFn *Handler2 = TwoIterParamFunctions.lookup(Call);
+ if (Handler2) {
+ (this->**Handler2)(C, InstCall->getCXXThisVal(), Call.getArgSVal(0),
+ Call.getArgSVal(1));
+ return;
+ }
+
+ const auto *OrigExpr = Call.getOriginExpr();
+ if (!OrigExpr)
+ return;
+
+ if (isBeginCall(Func)) {
+ handleBegin(C, OrigExpr, Call.getReturnValue(),
+ InstCall->getCXXThisVal());
+ return;
+ }
+
+ if (isEndCall(Func)) {
+ handleEnd(C, OrigExpr, Call.getReturnValue(),
+ InstCall->getCXXThisVal());
+ return;
+ }
+ }
+ }
+}
+
+void ContainerModeling::checkLiveSymbols(ProgramStateRef State,
+ SymbolReaper &SR) const {
+ // Keep symbolic expressions of container begins and ends alive
+ auto ContMap = State->get<ContainerMap>();
+ for (const auto &Cont : ContMap) {
+ const auto CData = Cont.second;
+ if (CData.getBegin()) {
+ SR.markLive(CData.getBegin());
+ if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getBegin()))
+ SR.markLive(SIE->getLHS());
+ }
+ if (CData.getEnd()) {
+ SR.markLive(CData.getEnd());
+ if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getEnd()))
+ SR.markLive(SIE->getLHS());
+ }
+ }
+}
+
+void ContainerModeling::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ // Cleanup
+ auto State = C.getState();
+
+ auto ContMap = State->get<ContainerMap>();
+ for (const auto &Cont : ContMap) {
+ if (!SR.isLiveRegion(Cont.first)) {
+ // We must keep the container data while it has live iterators to be able
+ // to compare them to the begin and the end of the container.
+ if (!hasLiveIterators(State, Cont.first)) {
+ State = State->remove<ContainerMap>(Cont.first);
+ }
+ }
+ }
+
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleBegin(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // If the container already has a begin symbol then use it. Otherwise first
+ // create a new one.
+ auto State = C.getState();
+ auto BeginSym = getContainerBegin(State, ContReg);
+ if (!BeginSym) {
+ State = createContainerBegin(State, ContReg, CE, C.getASTContext().LongTy,
+ C.getLocationContext(), C.blockCount());
+ BeginSym = getContainerBegin(State, ContReg);
+ }
+ State = setIteratorPosition(State, RetVal,
+ IteratorPosition::getPosition(ContReg, BeginSym));
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleEnd(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // If the container already has an end symbol then use it. Otherwise first
+ // create a new one.
+ auto State = C.getState();
+ auto EndSym = getContainerEnd(State, ContReg);
+ if (!EndSym) {
+ State = createContainerEnd(State, ContReg, CE, C.getASTContext().LongTy,
+ C.getLocationContext(), C.blockCount());
+ EndSym = getContainerEnd(State, ContReg);
+ }
+ State = setIteratorPosition(State, RetVal,
+ IteratorPosition::getPosition(ContReg, EndSym));
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleAssignment(CheckerContext &C, SVal Cont,
+ const Expr *CE, SVal OldCont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // Assignment of a new value to a container always invalidates all its
+ // iterators
+ auto State = C.getState();
+ const auto CData = getContainerData(State, ContReg);
+ if (CData) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ }
+
+ // In case of move, iterators of the old container (except the past-end
+ // iterators) remain valid but refer to the new container
+ if (!OldCont.isUndef()) {
+ const auto *OldContReg = OldCont.getAsRegion();
+ if (OldContReg) {
+ OldContReg = OldContReg->getMostDerivedObjectRegion();
+ const auto OldCData = getContainerData(State, OldContReg);
+ if (OldCData) {
+ if (const auto OldEndSym = OldCData->getEnd()) {
+ // If we already assigned an "end" symbol to the old container, then
+ // first reassign all iterator positions to the new container which
+ // are not past the container (thus not greater or equal to the
+ // current "end" symbol).
+ State = reassignAllIteratorPositionsUnless(State, OldContReg, ContReg,
+ OldEndSym, BO_GE);
+ auto &SymMgr = C.getSymbolManager();
+ auto &SVB = C.getSValBuilder();
+ // Then generate and assign a new "end" symbol for the new container.
+ auto NewEndSym =
+ SymMgr.conjureSymbol(CE, C.getLocationContext(),
+ C.getASTContext().LongTy, C.blockCount());
+ State = assumeNoOverflow(State, NewEndSym, 4);
+ if (CData) {
+ State = setContainerData(State, ContReg, CData->newEnd(NewEndSym));
+ } else {
+ State = setContainerData(State, ContReg,
+ ContainerData::fromEnd(NewEndSym));
+ }
+ // Finally, replace the old "end" symbol in the already reassigned
+ // iterator positions with the new "end" symbol.
+ State = rebaseSymbolInIteratorPositionsIf(
+ State, SVB, OldEndSym, NewEndSym, OldEndSym, BO_LT);
+ } else {
+ // There was no "end" symbol assigned yet to the old container,
+ // so reassign all iterator positions to the new container.
+ State = reassignAllIteratorPositions(State, OldContReg, ContReg);
+ }
+ if (const auto OldBeginSym = OldCData->getBegin()) {
+ // If we already assigned a "begin" symbol to the old container, then
+ // assign it to the new container and remove it from the old one.
+ if (CData) {
+ State =
+ setContainerData(State, ContReg, CData->newBegin(OldBeginSym));
+ } else {
+ State = setContainerData(State, ContReg,
+ ContainerData::fromBegin(OldBeginSym));
+ }
+ State =
+ setContainerData(State, OldContReg, OldCData->newBegin(nullptr));
+ }
+ } else {
+ // There was neither "begin" nor "end" symbol assigned yet to the old
+ // container, so reassign all iterator positions to the new container.
+ State = reassignAllIteratorPositions(State, OldContReg, ContReg);
+ }
+ }
+ }
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleAssign(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // The assign() operation invalidates all the iterators
+ auto State = C.getState();
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleClear(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // The clear() operation invalidates all the iterators, except the past-end
+ // iterators of list-like containers
+ auto State = C.getState();
+ if (!hasSubscriptOperator(State, ContReg) ||
+ !backModifiable(State, ContReg)) {
+ const auto CData = getContainerData(State, ContReg);
+ if (CData) {
+ if (const auto EndSym = CData->getEnd()) {
+ State =
+ invalidateAllIteratorPositionsExcept(State, ContReg, EndSym, BO_GE);
+ C.addTransition(State);
+ return;
+ }
+ }
+ }
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "became empty", ContReg, ContE);
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State, ChangeTag);
+}
+
+void ContainerModeling::handlePushBack(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // For deque-like containers invalidate all iterator positions
+ auto State = C.getState();
+ if (hasSubscriptOperator(State, ContReg) && frontModifiable(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State);
+ return;
+ }
+
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ // For vector-like containers invalidate the past-end iterator positions
+ if (const auto EndSym = CData->getEnd()) {
+ if (hasSubscriptOperator(State, ContReg)) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ }
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto newEndSym =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(EndSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(EndSym)).getAsSymbol();
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "extended to the back by 1 position", ContReg, ContE);
+ State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
+ C.addTransition(State, ChangeTag);
+ }
+}
+
+void ContainerModeling::handlePopBack(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ if (const auto EndSym = CData->getEnd()) {
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto BackSym =
+ SVB.evalBinOp(State, BO_Sub,
+ nonloc::SymbolVal(EndSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(EndSym)).getAsSymbol();
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "shrank from the back by 1 position", ContReg, ContE);
+ // For vector-like and deque-like containers invalidate the last and the
+ // past-end iterator positions. For list-like containers only invalidate
+ // the last position
+ if (hasSubscriptOperator(State, ContReg) &&
+ backModifiable(State, ContReg)) {
+ State = invalidateIteratorPositions(State, BackSym, BO_GE);
+ State = setContainerData(State, ContReg, CData->newEnd(nullptr));
+ } else {
+ State = invalidateIteratorPositions(State, BackSym, BO_EQ);
+ }
+ auto newEndSym = BackSym;
+ State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
+ C.addTransition(State, ChangeTag);
+ }
+}
+
+void ContainerModeling::handlePushFront(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // For deque-like containers invalidate all iterator positions
+ auto State = C.getState();
+ if (hasSubscriptOperator(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State);
+ } else {
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ if (const auto BeginSym = CData->getBegin()) {
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto newBeginSym =
+ SVB.evalBinOp(State, BO_Sub,
+ nonloc::SymbolVal(BeginSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(BeginSym)).getAsSymbol();
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "extended to the front by 1 position", ContReg, ContE);
+ State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
+ C.addTransition(State, ChangeTag);
+ }
+ }
+}
+
+void ContainerModeling::handlePopFront(CheckerContext &C, SVal Cont,
+ const Expr *ContE) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For list-like
+ // iterators only invalidate the first position
+ if (const auto BeginSym = CData->getBegin()) {
+ if (hasSubscriptOperator(State, ContReg)) {
+ State = invalidateIteratorPositions(State, BeginSym, BO_LE);
+ } else {
+ State = invalidateIteratorPositions(State, BeginSym, BO_EQ);
+ }
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto newBeginSym =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(BeginSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(BeginSym)).getAsSymbol();
+ const NoteTag *ChangeTag =
+ getChangeTag(C, "shrank from the front by 1 position", ContReg, ContE);
+ State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
+ C.addTransition(State, ChangeTag);
+ }
+}
+
+void ContainerModeling::handleInsert(CheckerContext &C, SVal Cont,
+ SVal Iter) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (!Pos)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For
+ // vector-like containers invalidate iterator positions after the insertion.
+ if (hasSubscriptOperator(State, ContReg) && backModifiable(State, ContReg)) {
+ if (frontModifiable(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ } else {
+ State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
+ }
+ if (const auto *CData = getContainerData(State, ContReg)) {
+ if (const auto EndSym = CData->getEnd()) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ State = setContainerData(State, ContReg, CData->newEnd(nullptr));
+ }
+ }
+ C.addTransition(State);
+ }
+}
+
+void ContainerModeling::handleErase(CheckerContext &C, SVal Cont,
+ SVal Iter) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (!Pos)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For
+ // vector-like containers invalidate iterator positions at and after the
+ // deletion. For list-like containers only invalidate the deleted position.
+ if (hasSubscriptOperator(State, ContReg) && backModifiable(State, ContReg)) {
+ if (frontModifiable(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ } else {
+ State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
+ }
+ if (const auto *CData = getContainerData(State, ContReg)) {
+ if (const auto EndSym = CData->getEnd()) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ State = setContainerData(State, ContReg, CData->newEnd(nullptr));
+ }
+ }
+ } else {
+ State = invalidateIteratorPositions(State, Pos->getOffset(), BO_EQ);
+ }
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleErase(CheckerContext &C, SVal Cont, SVal Iter1,
+ SVal Iter2) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+ auto State = C.getState();
+ const auto *Pos1 = getIteratorPosition(State, Iter1);
+ const auto *Pos2 = getIteratorPosition(State, Iter2);
+ if (!Pos1 || !Pos2)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For
+ // vector-like containers invalidate iterator positions at and after the
+ // deletion range. For list-like containers only invalidate the deleted
+ // position range [first..last].
+ if (hasSubscriptOperator(State, ContReg) && backModifiable(State, ContReg)) {
+ if (frontModifiable(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ } else {
+ State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE);
+ }
+ if (const auto *CData = getContainerData(State, ContReg)) {
+ if (const auto EndSym = CData->getEnd()) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ State = setContainerData(State, ContReg, CData->newEnd(nullptr));
+ }
+ }
+ } else {
+ State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE,
+ Pos2->getOffset(), BO_LT);
+ }
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleEraseAfter(CheckerContext &C, SVal Cont,
+ SVal Iter) const {
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (!Pos)
+ return;
+
+ // Invalidate the deleted iterator position, which is the position of the
+ // parameter plus one.
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto NextSym =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(Pos->getOffset()),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(Pos->getOffset())).getAsSymbol();
+ State = invalidateIteratorPositions(State, NextSym, BO_EQ);
+ C.addTransition(State);
+}
+
+void ContainerModeling::handleEraseAfter(CheckerContext &C, SVal Cont,
+ SVal Iter1, SVal Iter2) const {
+ auto State = C.getState();
+ const auto *Pos1 = getIteratorPosition(State, Iter1);
+ const auto *Pos2 = getIteratorPosition(State, Iter2);
+ if (!Pos1 || !Pos2)
+ return;
+
+ // Invalidate the deleted iterator position range (first..last)
+ State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GT,
+ Pos2->getOffset(), BO_LT);
+ C.addTransition(State);
+}
+
+const NoteTag *ContainerModeling::getChangeTag(CheckerContext &C,
+ StringRef Text,
+ const MemRegion *ContReg,
+ const Expr *ContE) const {
+ StringRef Name;
+ // First try to get the name of the variable from the region
+ if (const auto *DR = dyn_cast<DeclRegion>(ContReg)) {
+ Name = DR->getDecl()->getName();
+ // If the region is not a `DeclRegion` then use the expression instead
+ } else if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(ContE->IgnoreParenCasts())) {
+ Name = DRE->getDecl()->getName();
+ }
+
+ return C.getNoteTag(
+ [Text, Name, ContReg](PathSensitiveBugReport &BR) -> std::string {
+ if (!BR.isInteresting(ContReg))
+ return "";
+
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+ Out << "Container " << (!Name.empty() ? ("'" + Name.str() + "' ") : "" )
+ << Text;
+ return std::string(Out.str());
+ });
+}
+
+void ContainerModeling::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+ auto ContMap = State->get<ContainerMap>();
+
+ if (!ContMap.isEmpty()) {
+ Out << Sep << "Container Data :" << NL;
+ for (const auto &Cont : ContMap) {
+ Cont.first->dumpToStream(Out);
+ Out << " : [ ";
+ const auto CData = Cont.second;
+ if (CData.getBegin())
+ CData.getBegin()->dumpToStream(Out);
+ else
+ Out << "<Unknown>";
+ Out << " .. ";
+ if (CData.getEnd())
+ CData.getEnd()->dumpToStream(Out);
+ else
+ Out << "<Unknown>";
+ Out << " ]";
+ }
+ }
+}
+
+namespace {
+
+bool isBeginCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ return IdInfo->getName().endswith_lower("begin");
+}
+
+bool isEndCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ return IdInfo->getName().endswith_lower("end");
+}
+
+const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
+ const MemRegion *Reg) {
+ auto TI = getDynamicTypeInfo(State, Reg);
+ if (!TI.isValid())
+ return nullptr;
+
+ auto Type = TI.getType();
+ if (const auto *RefT = Type->getAs<ReferenceType>()) {
+ Type = RefT->getPointeeType();
+ }
+
+ return Type->getUnqualifiedDesugaredType()->getAsCXXRecordDecl();
+}
+
+bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg) {
+ const auto *CRD = getCXXRecordDecl(State, Reg);
+ if (!CRD)
+ return false;
+
+ for (const auto *Method : CRD->methods()) {
+ if (!Method->isOverloadedOperator())
+ continue;
+ const auto OPK = Method->getOverloadedOperator();
+ if (OPK == OO_Subscript) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool frontModifiable(ProgramStateRef State, const MemRegion *Reg) {
+ const auto *CRD = getCXXRecordDecl(State, Reg);
+ if (!CRD)
+ return false;
+
+ for (const auto *Method : CRD->methods()) {
+ if (!Method->getDeclName().isIdentifier())
+ continue;
+ if (Method->getName() == "push_front" || Method->getName() == "pop_front") {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool backModifiable(ProgramStateRef State, const MemRegion *Reg) {
+ const auto *CRD = getCXXRecordDecl(State, Reg);
+ if (!CRD)
+ return false;
+
+ for (const auto *Method : CRD->methods()) {
+ if (!Method->getDeclName().isIdentifier())
+ continue;
+ if (Method->getName() == "push_back" || Method->getName() == "pop_back") {
+ return true;
+ }
+ }
+ return false;
+}
+
+SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont) {
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (!CDataPtr)
+ return nullptr;
+
+ return CDataPtr->getBegin();
+}
+
+SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont) {
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (!CDataPtr)
+ return nullptr;
+
+ return CDataPtr->getEnd();
+}
+
+ProgramStateRef createContainerBegin(ProgramStateRef State,
+ const MemRegion *Cont, const Expr *E,
+ QualType T, const LocationContext *LCtx,
+ unsigned BlockCount) {
+ // Only create if it does not exist
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (CDataPtr && CDataPtr->getBegin())
+ return State;
+
+ auto &SymMgr = State->getSymbolManager();
+ const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
+ "begin");
+ State = assumeNoOverflow(State, Sym, 4);
+
+ if (CDataPtr) {
+ const auto CData = CDataPtr->newBegin(Sym);
+ return setContainerData(State, Cont, CData);
+ }
+
+ const auto CData = ContainerData::fromBegin(Sym);
+ return setContainerData(State, Cont, CData);
+}
+
+ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
+ const Expr *E, QualType T,
+ const LocationContext *LCtx,
+ unsigned BlockCount) {
+ // Only create if it does not exist
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (CDataPtr && CDataPtr->getEnd())
+ return State;
+
+ auto &SymMgr = State->getSymbolManager();
+ const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
+ "end");
+ State = assumeNoOverflow(State, Sym, 4);
+
+ if (CDataPtr) {
+ const auto CData = CDataPtr->newEnd(Sym);
+ return setContainerData(State, Cont, CData);
+ }
+
+ const auto CData = ContainerData::fromEnd(Sym);
+ return setContainerData(State, Cont, CData);
+}
+
+ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
+ const ContainerData &CData) {
+ return State->set<ContainerMap>(Cont, CData);
+}
+
+template <typename Condition, typename Process>
+ProgramStateRef processIteratorPositions(ProgramStateRef State, Condition Cond,
+ Process Proc) {
+ auto &RegionMapFactory = State->get_context<IteratorRegionMap>();
+ auto RegionMap = State->get<IteratorRegionMap>();
+ bool Changed = false;
+ for (const auto &Reg : RegionMap) {
+ if (Cond(Reg.second)) {
+ RegionMap = RegionMapFactory.add(RegionMap, Reg.first, Proc(Reg.second));
+ Changed = true;
+ }
+ }
+
+ if (Changed)
+ State = State->set<IteratorRegionMap>(RegionMap);
+
+ auto &SymbolMapFactory = State->get_context<IteratorSymbolMap>();
+ auto SymbolMap = State->get<IteratorSymbolMap>();
+ Changed = false;
+ for (const auto &Sym : SymbolMap) {
+ if (Cond(Sym.second)) {
+ SymbolMap = SymbolMapFactory.add(SymbolMap, Sym.first, Proc(Sym.second));
+ Changed = true;
+ }
+ }
+
+ if (Changed)
+ State = State->set<IteratorSymbolMap>(SymbolMap);
+
+ return State;
+}
+
+ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont) {
+ auto MatchCont = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont;
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, MatchCont, Invalidate);
+}
+
+ProgramStateRef
+invalidateAllIteratorPositionsExcept(ProgramStateRef State,
+ const MemRegion *Cont, SymbolRef Offset,
+ BinaryOperator::Opcode Opc) {
+ auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont &&
+ !compare(State, Pos.getOffset(), Offset, Opc);
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, MatchContAndCompare, Invalidate);
+}
+
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc) {
+ auto Compare = [&](const IteratorPosition &Pos) {
+ return compare(State, Pos.getOffset(), Offset, Opc);
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, Compare, Invalidate);
+}
+
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset1,
+ BinaryOperator::Opcode Opc1,
+ SymbolRef Offset2,
+ BinaryOperator::Opcode Opc2) {
+ auto Compare = [&](const IteratorPosition &Pos) {
+ return compare(State, Pos.getOffset(), Offset1, Opc1) &&
+ compare(State, Pos.getOffset(), Offset2, Opc2);
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, Compare, Invalidate);
+}
+
+ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont) {
+ auto MatchCont = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont;
+ };
+ auto ReAssign = [&](const IteratorPosition &Pos) {
+ return Pos.reAssign(NewCont);
+ };
+ return processIteratorPositions(State, MatchCont, ReAssign);
+}
+
+ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc) {
+ auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont &&
+ !compare(State, Pos.getOffset(), Offset, Opc);
+ };
+ auto ReAssign = [&](const IteratorPosition &Pos) {
+ return Pos.reAssign(NewCont);
+ };
+ return processIteratorPositions(State, MatchContAndCompare, ReAssign);
+}
+
+// This function rebases symbolic expression `OldSym + Int` to `NewSym + Int`,
+// `OldSym - Int` to `NewSym - Int` and `OldSym` to `NewSym` in any iterator
+// position offsets where `CondSym` is true.
+ProgramStateRef rebaseSymbolInIteratorPositionsIf(
+ ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
+ SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc) {
+ auto LessThanEnd = [&](const IteratorPosition &Pos) {
+ return compare(State, Pos.getOffset(), CondSym, Opc);
+ };
+ auto RebaseSymbol = [&](const IteratorPosition &Pos) {
+ return Pos.setTo(rebaseSymbol(State, SVB, Pos.getOffset(), OldSym,
+ NewSym));
+ };
+ return processIteratorPositions(State, LessThanEnd, RebaseSymbol);
+}
+
+// This function rebases symbolic expression `OldExpr + Int` to `NewExpr + Int`,
+// `OldExpr - Int` to `NewExpr - Int` and `OldExpr` to `NewExpr` in expression
+// `OrigExpr`.
+SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB,
+ SymbolRef OrigExpr, SymbolRef OldExpr,
+ SymbolRef NewSym) {
+ auto &SymMgr = SVB.getSymbolManager();
+ auto Diff = SVB.evalBinOpNN(State, BO_Sub, nonloc::SymbolVal(OrigExpr),
+ nonloc::SymbolVal(OldExpr),
+ SymMgr.getType(OrigExpr));
+
+ const auto DiffInt = Diff.getAs<nonloc::ConcreteInt>();
+ if (!DiffInt)
+ return OrigExpr;
+
+ return SVB.evalBinOpNN(State, BO_Add, *DiffInt, nonloc::SymbolVal(NewSym),
+ SymMgr.getType(OrigExpr)).getAsSymbol();
+}
+
+bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont) {
+ auto RegionMap = State->get<IteratorRegionMap>();
+ for (const auto &Reg : RegionMap) {
+ if (Reg.second.getContainer() == Cont)
+ return true;
+ }
+
+ auto SymbolMap = State->get<IteratorSymbolMap>();
+ for (const auto &Sym : SymbolMap) {
+ if (Sym.second.getContainer() == Cont)
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace
+
+void ento::registerContainerModeling(CheckerManager &mgr) {
+ mgr.registerChecker<ContainerModeling>();
+}
+
+bool ento::shouldRegisterContainerModeling(const CheckerManager &mgr) {
+ if (!mgr.getLangOpts().CPlusPlus)
+ return false;
+
+ if (!mgr.getAnalyzerOptions().ShouldAggressivelySimplifyBinaryOperation) {
+ mgr.getASTContext().getDiagnostics().Report(
+ diag::err_analyzer_checker_incompatible_analyzer_option)
+ << "aggressive-binary-operation-simplification" << "false";
+ return false;
+ }
+
+ return true;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
index 8dd3132f07e2..4216a6883119 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
@@ -196,6 +196,6 @@ void ento::registerConversionChecker(CheckerManager &mgr) {
mgr.registerChecker<ConversionChecker>();
}
-bool ento::shouldRegisterConversionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterConversionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 61441889fc64..6bc186aa2755 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -540,6 +540,6 @@ void ento::registerDeadStoresChecker(CheckerManager &Mgr) {
AnOpts.getCheckerBooleanOption(Chk, "ShowFixIts");
}
-bool ento::shouldRegisterDeadStoresChecker(const LangOptions &LO) {
+bool ento::shouldRegisterDeadStoresChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 0cb4be2c7fdc..03b7cbd1c833 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -47,7 +47,7 @@ void ento::registerDominatorsTreeDumper(CheckerManager &mgr) {
mgr.registerChecker<DominatorsTreeDumper>();
}
-bool ento::shouldRegisterDominatorsTreeDumper(const LangOptions &LO) {
+bool ento::shouldRegisterDominatorsTreeDumper(const CheckerManager &mgr) {
return true;
}
@@ -73,7 +73,7 @@ void ento::registerPostDominatorsTreeDumper(CheckerManager &mgr) {
mgr.registerChecker<PostDominatorsTreeDumper>();
}
-bool ento::shouldRegisterPostDominatorsTreeDumper(const LangOptions &LO) {
+bool ento::shouldRegisterPostDominatorsTreeDumper(const CheckerManager &mgr) {
return true;
}
@@ -98,7 +98,7 @@ void ento::registerControlDependencyTreeDumper(CheckerManager &mgr) {
mgr.registerChecker<ControlDependencyTreeDumper>();
}
-bool ento::shouldRegisterControlDependencyTreeDumper(const LangOptions &LO) {
+bool ento::shouldRegisterControlDependencyTreeDumper(const CheckerManager &mgr) {
return true;
}
@@ -122,7 +122,7 @@ void ento::registerLiveVariablesDumper(CheckerManager &mgr) {
mgr.registerChecker<LiveVariablesDumper>();
}
-bool ento::shouldRegisterLiveVariablesDumper(const LangOptions &LO) {
+bool ento::shouldRegisterLiveVariablesDumper(const CheckerManager &mgr) {
return true;
}
@@ -145,7 +145,7 @@ void ento::registerLiveStatementsDumper(CheckerManager &mgr) {
mgr.registerChecker<LiveStatementsDumper>();
}
-bool ento::shouldRegisterLiveStatementsDumper(const LangOptions &LO) {
+bool ento::shouldRegisterLiveStatementsDumper(const CheckerManager &mgr) {
return true;
}
@@ -169,7 +169,7 @@ void ento::registerCFGViewer(CheckerManager &mgr) {
mgr.registerChecker<CFGViewer>();
}
-bool ento::shouldRegisterCFGViewer(const LangOptions &LO) {
+bool ento::shouldRegisterCFGViewer(const CheckerManager &mgr) {
return true;
}
@@ -199,7 +199,7 @@ void ento::registerCFGDumper(CheckerManager &mgr) {
mgr.registerChecker<CFGDumper>();
}
-bool ento::shouldRegisterCFGDumper(const LangOptions &LO) {
+bool ento::shouldRegisterCFGDumper(const CheckerManager &mgr) {
return true;
}
@@ -223,7 +223,7 @@ void ento::registerCallGraphViewer(CheckerManager &mgr) {
mgr.registerChecker<CallGraphViewer>();
}
-bool ento::shouldRegisterCallGraphViewer(const LangOptions &LO) {
+bool ento::shouldRegisterCallGraphViewer(const CheckerManager &mgr) {
return true;
}
@@ -247,7 +247,7 @@ void ento::registerCallGraphDumper(CheckerManager &mgr) {
mgr.registerChecker<CallGraphDumper>();
}
-bool ento::shouldRegisterCallGraphDumper(const LangOptions &LO) {
+bool ento::shouldRegisterCallGraphDumper(const CheckerManager &mgr) {
return true;
}
@@ -281,8 +281,6 @@ public:
llvm::errs() << Keys[I]->getKey() << " = "
<< (Keys[I]->second.empty() ? "\"\"" : Keys[I]->second)
<< '\n';
-
- llvm::errs() << "[stats]\n" << "num-entries = " << Keys.size() << '\n';
}
};
}
@@ -291,7 +289,7 @@ void ento::registerConfigDumper(CheckerManager &mgr) {
mgr.registerChecker<ConfigDumper>();
}
-bool ento::shouldRegisterConfigDumper(const LangOptions &LO) {
+bool ento::shouldRegisterConfigDumper(const CheckerManager &mgr) {
return true;
}
@@ -314,7 +312,7 @@ void ento::registerExplodedGraphViewer(CheckerManager &mgr) {
mgr.registerChecker<ExplodedGraphViewer>();
}
-bool ento::shouldRegisterExplodedGraphViewer(const LangOptions &LO) {
+bool ento::shouldRegisterExplodedGraphViewer(const CheckerManager &mgr) {
return true;
}
@@ -346,6 +344,6 @@ void ento::registerReportStmts(CheckerManager &mgr) {
mgr.registerChecker<ReportStmts>();
}
-bool ento::shouldRegisterReportStmts(const LangOptions &LO) {
+bool ento::shouldRegisterReportStmts(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
new file mode 100644
index 000000000000..6fed999ffc80
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
@@ -0,0 +1,150 @@
+//==-- DebugContainerModeling.cpp ---------------------------------*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a checker for debugging iterator modeling.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+#include "Iterator.h"
+
+using namespace clang;
+using namespace ento;
+using namespace iterator;
+
+namespace {
+
+class DebugContainerModeling
+ : public Checker<eval::Call> {
+
+ std::unique_ptr<BugType> DebugMsgBugType;
+
+ template <typename Getter>
+ void analyzerContainerDataField(const CallExpr *CE, CheckerContext &C,
+ Getter get) const;
+ void analyzerContainerBegin(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerContainerEnd(const CallExpr *CE, CheckerContext &C) const;
+ ExplodedNode *reportDebugMsg(llvm::StringRef Msg, CheckerContext &C) const;
+
+ typedef void (DebugContainerModeling::*FnCheck)(const CallExpr *,
+ CheckerContext &) const;
+
+ CallDescriptionMap<FnCheck> Callbacks = {
+ {{0, "clang_analyzer_container_begin", 1},
+ &DebugContainerModeling::analyzerContainerBegin},
+ {{0, "clang_analyzer_container_end", 1},
+ &DebugContainerModeling::analyzerContainerEnd},
+ };
+
+public:
+ DebugContainerModeling();
+
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+};
+
+} //namespace
+
+DebugContainerModeling::DebugContainerModeling() {
+ DebugMsgBugType.reset(
+ new BugType(this, "Checking analyzer assumptions", "debug",
+ /*SuppressOnSink=*/true));
+}
+
+bool DebugContainerModeling::evalCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
+ const FnCheck *Handler = Callbacks.lookup(Call);
+ if (!Handler)
+ return false;
+
+ (this->**Handler)(CE, C);
+ return true;
+}
+
+template <typename Getter>
+void DebugContainerModeling::analyzerContainerDataField(const CallExpr *CE,
+ CheckerContext &C,
+ Getter get) const {
+ if (CE->getNumArgs() == 0) {
+ reportDebugMsg("Missing container argument", C);
+ return;
+ }
+
+ auto State = C.getState();
+ const MemRegion *Cont = C.getSVal(CE->getArg(0)).getAsRegion();
+ if (Cont) {
+ const auto *Data = getContainerData(State, Cont);
+ if (Data) {
+ SymbolRef Field = get(Data);
+ if (Field) {
+ State = State->BindExpr(CE, C.getLocationContext(),
+ nonloc::SymbolVal(Field));
+
+ // Progpagate interestingness from the container's data (marked
+ // interesting by an `ExprInspection` debug call to the container
+ // itself.
+ const NoteTag *InterestingTag =
+ C.getNoteTag(
+ [Cont, Field](PathSensitiveBugReport &BR) -> std::string {
+ if (BR.isInteresting(Field)) {
+ BR.markInteresting(Cont);
+ }
+ return "";
+ });
+ C.addTransition(State, InterestingTag);
+ return;
+ }
+ }
+ }
+
+ auto &BVF = C.getSValBuilder().getBasicValueFactory();
+ State = State->BindExpr(CE, C.getLocationContext(),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(0))));
+}
+
+void DebugContainerModeling::analyzerContainerBegin(const CallExpr *CE,
+ CheckerContext &C) const {
+ analyzerContainerDataField(CE, C, [](const ContainerData *D) {
+ return D->getBegin();
+ });
+}
+
+void DebugContainerModeling::analyzerContainerEnd(const CallExpr *CE,
+ CheckerContext &C) const {
+ analyzerContainerDataField(CE, C, [](const ContainerData *D) {
+ return D->getEnd();
+ });
+}
+
+ExplodedNode *DebugContainerModeling::reportDebugMsg(llvm::StringRef Msg,
+ CheckerContext &C) const {
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return nullptr;
+
+ auto &BR = C.getBugReporter();
+ BR.emitReport(std::make_unique<PathSensitiveBugReport>(*DebugMsgBugType,
+ Msg, N));
+ return N;
+}
+
+void ento::registerDebugContainerModeling(CheckerManager &mgr) {
+ mgr.registerChecker<DebugContainerModeling>();
+}
+
+bool ento::shouldRegisterDebugContainerModeling(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
index 4717fef96341..5833eea56da8 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
@@ -30,11 +30,6 @@ class DebugIteratorModeling
std::unique_ptr<BugType> DebugMsgBugType;
template <typename Getter>
- void analyzerContainerDataField(const CallExpr *CE, CheckerContext &C,
- Getter get) const;
- void analyzerContainerBegin(const CallExpr *CE, CheckerContext &C) const;
- void analyzerContainerEnd(const CallExpr *CE, CheckerContext &C) const;
- template <typename Getter>
void analyzerIteratorDataField(const CallExpr *CE, CheckerContext &C,
Getter get, SVal Default) const;
void analyzerIteratorPosition(const CallExpr *CE, CheckerContext &C) const;
@@ -46,10 +41,6 @@ class DebugIteratorModeling
CheckerContext &) const;
CallDescriptionMap<FnCheck> Callbacks = {
- {{0, "clang_analyzer_container_begin", 1},
- &DebugIteratorModeling::analyzerContainerBegin},
- {{0, "clang_analyzer_container_end", 1},
- &DebugIteratorModeling::analyzerContainerEnd},
{{0, "clang_analyzer_iterator_position", 1},
&DebugIteratorModeling::analyzerIteratorPosition},
{{0, "clang_analyzer_iterator_container", 1},
@@ -87,49 +78,6 @@ bool DebugIteratorModeling::evalCall(const CallEvent &Call,
}
template <typename Getter>
-void DebugIteratorModeling::analyzerContainerDataField(const CallExpr *CE,
- CheckerContext &C,
- Getter get) const {
- if (CE->getNumArgs() == 0) {
- reportDebugMsg("Missing container argument", C);
- return;
- }
-
- auto State = C.getState();
- const MemRegion *Cont = C.getSVal(CE->getArg(0)).getAsRegion();
- if (Cont) {
- const auto *Data = getContainerData(State, Cont);
- if (Data) {
- SymbolRef Field = get(Data);
- if (Field) {
- State = State->BindExpr(CE, C.getLocationContext(),
- nonloc::SymbolVal(Field));
- C.addTransition(State);
- return;
- }
- }
- }
-
- auto &BVF = C.getSValBuilder().getBasicValueFactory();
- State = State->BindExpr(CE, C.getLocationContext(),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(0))));
-}
-
-void DebugIteratorModeling::analyzerContainerBegin(const CallExpr *CE,
- CheckerContext &C) const {
- analyzerContainerDataField(CE, C, [](const ContainerData *D) {
- return D->getBegin();
- });
-}
-
-void DebugIteratorModeling::analyzerContainerEnd(const CallExpr *CE,
- CheckerContext &C) const {
- analyzerContainerDataField(CE, C, [](const ContainerData *D) {
- return D->getEnd();
- });
-}
-
-template <typename Getter>
void DebugIteratorModeling::analyzerIteratorDataField(const CallExpr *CE,
CheckerContext &C,
Getter get,
@@ -191,6 +139,6 @@ void ento::registerDebugIteratorModeling(CheckerManager &mgr) {
mgr.registerChecker<DebugIteratorModeling>();
}
-bool ento::shouldRegisterDebugIteratorModeling(const LangOptions &LO) {
+bool ento::shouldRegisterDebugIteratorModeling(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
index 45c1984c5e15..7c5833762008 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
@@ -92,6 +92,8 @@ void DeleteWithNonVirtualDtorChecker::checkPreStmt(const CXXDeleteExpr *DE,
"Logic error"));
ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
auto R = std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N);
// Mark region of problematic base class for later use in the BugVisitor.
@@ -148,6 +150,6 @@ void ento::registerDeleteWithNonVirtualDtorChecker(CheckerManager &mgr) {
}
bool ento::shouldRegisterDeleteWithNonVirtualDtorChecker(
- const LangOptions &LO) {
+ const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 46100cd1dace..2411f0e2d058 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -304,6 +304,6 @@ void ento::registerDereferenceChecker(CheckerManager &mgr) {
mgr.registerChecker<DereferenceChecker>();
}
-bool ento::shouldRegisterDereferenceChecker(const LangOptions &LO) {
+bool ento::shouldRegisterDereferenceChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
index 0c46447e1985..df88b71ff063 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -219,19 +219,12 @@ static bool AttrFilter(const ObjCMethodDecl *M) {
// Register the checker that checks for direct accesses in all functions,
// except for the initialization and copy routines.
void ento::registerDirectIvarAssignment(CheckerManager &mgr) {
- mgr.registerChecker<DirectIvarAssignment>();
+ auto Chk = mgr.registerChecker<DirectIvarAssignment>();
+ if (mgr.getAnalyzerOptions().getCheckerBooleanOption(Chk,
+ "AnnotatedFunctions"))
+ Chk->ShouldSkipMethod = &AttrFilter;
}
-bool ento::shouldRegisterDirectIvarAssignment(const LangOptions &LO) {
- return true;
-}
-
-void ento::registerDirectIvarAssignmentForAnnotatedFunctions(
- CheckerManager &mgr) {
- mgr.getChecker<DirectIvarAssignment>()->ShouldSkipMethod = &AttrFilter;
-}
-
-bool ento::shouldRegisterDirectIvarAssignmentForAnnotatedFunctions(
- const LangOptions &LO) {
+bool ento::shouldRegisterDirectIvarAssignment(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 8798bde88dcd..2b3164ba4a2c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -101,6 +101,6 @@ void ento::registerDivZeroChecker(CheckerManager &mgr) {
mgr.registerChecker<DivZeroChecker>();
}
-bool ento::shouldRegisterDivZeroChecker(const LangOptions &LO) {
+bool ento::shouldRegisterDivZeroChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
index 8cc38f9735f3..dbc930d7d37b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
@@ -203,6 +203,6 @@ void ento::registerDynamicTypeChecker(CheckerManager &mgr) {
mgr.registerChecker<DynamicTypeChecker>();
}
-bool ento::shouldRegisterDynamicTypeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterDynamicTypeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index cce3449b8873..14ba5d769969 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -55,6 +55,7 @@ class DynamicTypePropagation:
check::PostStmt<CXXNewExpr>,
check::PreObjCMessage,
check::PostObjCMessage > {
+
const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
CheckerContext &C) const;
@@ -69,8 +70,8 @@ class DynamicTypePropagation:
mutable std::unique_ptr<BugType> ObjCGenericsBugType;
void initBugType() const {
if (!ObjCGenericsBugType)
- ObjCGenericsBugType.reset(
- new BugType(this, "Generics", categories::CoreFoundationObjectiveC));
+ ObjCGenericsBugType.reset(new BugType(
+ GenericCheckName, "Generics", categories::CoreFoundationObjectiveC));
}
class GenericsBugVisitor : public BugReporterVisitor {
@@ -108,12 +109,129 @@ public:
/// This value is set to true, when the Generics checker is turned on.
DefaultBool CheckGenerics;
+ CheckerNameRef GenericCheckName;
+};
+
+bool isObjCClassType(QualType Type) {
+ if (const auto *PointerType = dyn_cast<ObjCObjectPointerType>(Type)) {
+ return PointerType->getObjectType()->isObjCClass();
+ }
+ return false;
+}
+
+struct RuntimeType {
+ const ObjCObjectType *Type = nullptr;
+ bool Precise = false;
+
+ operator bool() const { return Type != nullptr; }
};
+
+RuntimeType inferReceiverType(const ObjCMethodCall &Message,
+ CheckerContext &C) {
+ const ObjCMessageExpr *MessageExpr = Message.getOriginExpr();
+
+ // Check if we can statically infer the actual type precisely.
+ //
+ // 1. Class is written directly in the message:
+ // \code
+ // [ActualClass classMethod];
+ // \endcode
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::Class) {
+ return {MessageExpr->getClassReceiver()->getAs<ObjCObjectType>(),
+ /*Precise=*/true};
+ }
+
+ // 2. Receiver is 'super' from a class method (a.k.a 'super' is a
+ // class object).
+ // \code
+ // [super classMethod];
+ // \endcode
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::SuperClass) {
+ return {MessageExpr->getSuperType()->getAs<ObjCObjectType>(),
+ /*Precise=*/true};
+ }
+
+ // 3. Receiver is 'super' from an instance method (a.k.a 'super' is an
+ // instance of a super class).
+ // \code
+ // [super instanceMethod];
+ // \encode
+ if (MessageExpr->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
+ if (const auto *ObjTy =
+ MessageExpr->getSuperType()->getAs<ObjCObjectPointerType>())
+ return {ObjTy->getObjectType(), /*Precise=*/true};
+ }
+
+ const Expr *RecE = MessageExpr->getInstanceReceiver();
+
+ if (!RecE)
+ return {};
+
+ // Otherwise, let's try to get type information from our estimations of
+ // runtime types.
+ QualType InferredType;
+ SVal ReceiverSVal = C.getSVal(RecE);
+ ProgramStateRef State = C.getState();
+
+ if (const MemRegion *ReceiverRegion = ReceiverSVal.getAsRegion()) {
+ if (DynamicTypeInfo DTI = getDynamicTypeInfo(State, ReceiverRegion)) {
+ InferredType = DTI.getType().getCanonicalType();
+ }
+ }
+
+ if (SymbolRef ReceiverSymbol = ReceiverSVal.getAsSymbol()) {
+ if (InferredType.isNull()) {
+ InferredType = ReceiverSymbol->getType();
+ }
+
+ // If receiver is a Class object, we want to figure out the type it
+ // represents.
+ if (isObjCClassType(InferredType)) {
+ // We actually might have some info on what type is contained in there.
+ if (DynamicTypeInfo DTI =
+ getClassObjectDynamicTypeInfo(State, ReceiverSymbol)) {
+
+ // Types in Class objects can be ONLY Objective-C types
+ return {cast<ObjCObjectType>(DTI.getType()), !DTI.canBeASubClass()};
+ }
+
+ SVal SelfSVal = State->getSelfSVal(C.getLocationContext());
+
+ // Another way we can guess what is in Class object, is when it is a
+ // 'self' variable of the current class method.
+ if (ReceiverSVal == SelfSVal) {
+ // In this case, we should return the type of the enclosing class
+ // declaration.
+ if (const ObjCMethodDecl *MD =
+ dyn_cast<ObjCMethodDecl>(C.getStackFrame()->getDecl()))
+ if (const ObjCObjectType *ObjTy = dyn_cast<ObjCObjectType>(
+ MD->getClassInterface()->getTypeForDecl()))
+ return {ObjTy};
+ }
+ }
+ }
+
+ // Unfortunately, it seems like we have no idea what that type is.
+ if (InferredType.isNull()) {
+ return {};
+ }
+
+ // We can end up here if we got some dynamic type info and the
+ // receiver is not one of the known Class objects.
+ if (const auto *ReceiverInferredType =
+ dyn_cast<ObjCObjectPointerType>(InferredType)) {
+ return {ReceiverInferredType->getObjectType()};
+ }
+
+ // Any other type (like 'Class') is not really useful at this point.
+ return {};
+}
} // end anonymous namespace
void DynamicTypePropagation::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
ProgramStateRef State = removeDeadTypes(C.getState(), SR);
+ State = removeDeadClassObjectTypes(State, SR);
MostSpecializedTypeArgsMapTy TyArgMap =
State->get<MostSpecializedTypeArgsMap>();
@@ -209,12 +327,21 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
case OMF_alloc:
case OMF_new: {
// Get the type of object that will get created.
- const ObjCMessageExpr *MsgE = Msg->getOriginExpr();
- const ObjCObjectType *ObjTy = getObjectTypeForAllocAndNew(MsgE, C);
+ RuntimeType ObjTy = inferReceiverType(*Msg, C);
+
if (!ObjTy)
return;
+
QualType DynResTy =
- C.getASTContext().getObjCObjectPointerType(QualType(ObjTy, 0));
+ C.getASTContext().getObjCObjectPointerType(QualType(ObjTy.Type, 0));
+ // We used to assume that whatever type we got from inferring the
+ // type is actually precise (and it is not exactly correct).
+ // A big portion of the existing behavior depends on that assumption
+ // (e.g. certain inlining won't take place). For this reason, we don't
+ // use ObjTy.Precise flag here.
+ //
+ // TODO: We should mitigate this problem some time in the future
+ // and replace hardcoded 'false' with '!ObjTy.Precise'.
C.addTransition(setDynamicTypeInfo(State, RetReg, DynResTy, false));
break;
}
@@ -303,40 +430,6 @@ void DynamicTypePropagation::checkPostStmt(const CXXNewExpr *NewE,
/*CanBeSubClassed=*/false));
}
-const ObjCObjectType *
-DynamicTypePropagation::getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
- CheckerContext &C) const {
- if (MsgE->getReceiverKind() == ObjCMessageExpr::Class) {
- if (const ObjCObjectType *ObjTy
- = MsgE->getClassReceiver()->getAs<ObjCObjectType>())
- return ObjTy;
- }
-
- if (MsgE->getReceiverKind() == ObjCMessageExpr::SuperClass) {
- if (const ObjCObjectType *ObjTy
- = MsgE->getSuperType()->getAs<ObjCObjectType>())
- return ObjTy;
- }
-
- const Expr *RecE = MsgE->getInstanceReceiver();
- if (!RecE)
- return nullptr;
-
- RecE= RecE->IgnoreParenImpCasts();
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(RecE)) {
- const StackFrameContext *SFCtx = C.getStackFrame();
- // Are we calling [self alloc]? If this is self, get the type of the
- // enclosing ObjC class.
- if (DRE->getDecl() == SFCtx->getSelfDecl()) {
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(SFCtx->getDecl()))
- if (const ObjCObjectType *ObjTy =
- dyn_cast<ObjCObjectType>(MD->getClassInterface()->getTypeForDecl()))
- return ObjTy;
- }
- }
- return nullptr;
-}
-
// Return a better dynamic type if one can be derived from the cast.
// Compare the current dynamic type of the region and the new type to which we
// are casting. If the new type is lower in the inheritance hierarchy, pick it.
@@ -821,25 +914,56 @@ void DynamicTypePropagation::checkPostObjCMessage(const ObjCMethodCall &M,
Selector Sel = MessageExpr->getSelector();
ProgramStateRef State = C.getState();
- // Inference for class variables.
- // We are only interested in cases where the class method is invoked on a
- // class. This method is provided by the runtime and available on all classes.
- if (MessageExpr->getReceiverKind() == ObjCMessageExpr::Class &&
- Sel.getAsString() == "class") {
- QualType ReceiverType = MessageExpr->getClassReceiver();
- const auto *ReceiverClassType = ReceiverType->castAs<ObjCObjectType>();
- if (!ReceiverClassType->isSpecialized())
- return;
- QualType ReceiverClassPointerType =
- C.getASTContext().getObjCObjectPointerType(
- QualType(ReceiverClassType, 0));
- const auto *InferredType =
- ReceiverClassPointerType->castAs<ObjCObjectPointerType>();
+ // Here we try to propagate information on Class objects.
+ if (Sel.getAsString() == "class") {
+ // We try to figure out the type from the receiver of the 'class' message.
+ if (RuntimeType ReceiverRuntimeType = inferReceiverType(M, C)) {
+
+ ReceiverRuntimeType.Type->getSuperClassType();
+ QualType ReceiverClassType(ReceiverRuntimeType.Type, 0);
+
+ // We want to consider only precise information on generics.
+ if (ReceiverRuntimeType.Type->isSpecialized() &&
+ ReceiverRuntimeType.Precise) {
+ QualType ReceiverClassPointerType =
+ C.getASTContext().getObjCObjectPointerType(ReceiverClassType);
+ const auto *InferredType =
+ ReceiverClassPointerType->castAs<ObjCObjectPointerType>();
+ State = State->set<MostSpecializedTypeArgsMap>(RetSym, InferredType);
+ }
- State = State->set<MostSpecializedTypeArgsMap>(RetSym, InferredType);
- C.addTransition(State);
- return;
+ // Constrain the resulting class object to the inferred type.
+ State = setClassObjectDynamicTypeInfo(State, RetSym, ReceiverClassType,
+ !ReceiverRuntimeType.Precise);
+
+ C.addTransition(State);
+ return;
+ }
+ }
+
+ if (Sel.getAsString() == "superclass") {
+ // We try to figure out the type from the receiver of the 'superclass'
+ // message.
+ if (RuntimeType ReceiverRuntimeType = inferReceiverType(M, C)) {
+
+ // Result type would be a super class of the receiver's type.
+ QualType ReceiversSuperClass =
+ ReceiverRuntimeType.Type->getSuperClassType();
+
+ // Check if it really had super class.
+ //
+ // TODO: we can probably pay closer attention to cases when the class
+ // object can be 'nil' as the result of such message.
+ if (!ReceiversSuperClass.isNull()) {
+ // Constrain the resulting class object to the inferred type.
+ State = setClassObjectDynamicTypeInfo(
+ State, RetSym, ReceiversSuperClass, !ReceiverRuntimeType.Precise);
+
+ C.addTransition(State);
+ }
+ return;
+ }
}
// Tracking for return types.
@@ -979,9 +1103,10 @@ PathDiagnosticPieceRef DynamicTypePropagation::GenericsBugVisitor::VisitNode(
void ento::registerObjCGenericsChecker(CheckerManager &mgr) {
DynamicTypePropagation *checker = mgr.getChecker<DynamicTypePropagation>();
checker->CheckGenerics = true;
+ checker->GenericCheckName = mgr.getCurrentCheckerName();
}
-bool ento::shouldRegisterObjCGenericsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCGenericsChecker(const CheckerManager &mgr) {
return true;
}
@@ -989,6 +1114,6 @@ void ento::registerDynamicTypePropagation(CheckerManager &mgr) {
mgr.registerChecker<DynamicTypePropagation>();
}
-bool ento::shouldRegisterDynamicTypePropagation(const LangOptions &LO) {
+bool ento::shouldRegisterDynamicTypePropagation(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
index 481a5685a71f..0e94b915a468 100644
--- a/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
@@ -142,6 +142,6 @@ void ento::registerEnumCastOutOfRangeChecker(CheckerManager &mgr) {
mgr.registerChecker<EnumCastOutOfRangeChecker>();
}
-bool ento::shouldRegisterEnumCastOutOfRangeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterEnumCastOutOfRangeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 17c813962a23..4225d890c47a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
+#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Checkers/SValExplainer.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -13,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -45,13 +47,17 @@ class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols,
void analyzerHashDump(const CallExpr *CE, CheckerContext &C) const;
void analyzerDenote(const CallExpr *CE, CheckerContext &C) const;
void analyzerExpress(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerIsTainted(const CallExpr *CE, CheckerContext &C) const;
typedef void (ExprInspectionChecker::*FnCheck)(const CallExpr *,
CheckerContext &C) const;
- ExplodedNode *reportBug(llvm::StringRef Msg, CheckerContext &C) const;
+ // Optional parameter `ExprVal` for expression value to be marked interesting.
+ ExplodedNode *reportBug(llvm::StringRef Msg, CheckerContext &C,
+ Optional<SVal> ExprVal = None) const;
ExplodedNode *reportBug(llvm::StringRef Msg, BugReporter &BR,
- ExplodedNode *N) const;
+ ExplodedNode *N,
+ Optional<SVal> ExprVal = None) const;
public:
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
@@ -72,26 +78,34 @@ bool ExprInspectionChecker::evalCall(const CallEvent &Call,
// These checks should have no effect on the surrounding environment
// (globals should not be invalidated, etc), hence the use of evalCall.
- FnCheck Handler = llvm::StringSwitch<FnCheck>(C.getCalleeName(CE))
- .Case("clang_analyzer_eval", &ExprInspectionChecker::analyzerEval)
- .Case("clang_analyzer_checkInlined",
- &ExprInspectionChecker::analyzerCheckInlined)
- .Case("clang_analyzer_crash", &ExprInspectionChecker::analyzerCrash)
- .Case("clang_analyzer_warnIfReached",
- &ExprInspectionChecker::analyzerWarnIfReached)
- .Case("clang_analyzer_warnOnDeadSymbol",
- &ExprInspectionChecker::analyzerWarnOnDeadSymbol)
- .StartsWith("clang_analyzer_explain", &ExprInspectionChecker::analyzerExplain)
- .StartsWith("clang_analyzer_dump", &ExprInspectionChecker::analyzerDump)
- .Case("clang_analyzer_getExtent", &ExprInspectionChecker::analyzerGetExtent)
- .Case("clang_analyzer_printState",
- &ExprInspectionChecker::analyzerPrintState)
- .Case("clang_analyzer_numTimesReached",
- &ExprInspectionChecker::analyzerNumTimesReached)
- .Case("clang_analyzer_hashDump", &ExprInspectionChecker::analyzerHashDump)
- .Case("clang_analyzer_denote", &ExprInspectionChecker::analyzerDenote)
- .Case("clang_analyzer_express", &ExprInspectionChecker::analyzerExpress)
- .Default(nullptr);
+ FnCheck Handler =
+ llvm::StringSwitch<FnCheck>(C.getCalleeName(CE))
+ .Case("clang_analyzer_eval", &ExprInspectionChecker::analyzerEval)
+ .Case("clang_analyzer_checkInlined",
+ &ExprInspectionChecker::analyzerCheckInlined)
+ .Case("clang_analyzer_crash", &ExprInspectionChecker::analyzerCrash)
+ .Case("clang_analyzer_warnIfReached",
+ &ExprInspectionChecker::analyzerWarnIfReached)
+ .Case("clang_analyzer_warnOnDeadSymbol",
+ &ExprInspectionChecker::analyzerWarnOnDeadSymbol)
+ .StartsWith("clang_analyzer_explain",
+ &ExprInspectionChecker::analyzerExplain)
+ .StartsWith("clang_analyzer_dump",
+ &ExprInspectionChecker::analyzerDump)
+ .Case("clang_analyzer_getExtent",
+ &ExprInspectionChecker::analyzerGetExtent)
+ .Case("clang_analyzer_printState",
+ &ExprInspectionChecker::analyzerPrintState)
+ .Case("clang_analyzer_numTimesReached",
+ &ExprInspectionChecker::analyzerNumTimesReached)
+ .Case("clang_analyzer_hashDump",
+ &ExprInspectionChecker::analyzerHashDump)
+ .Case("clang_analyzer_denote", &ExprInspectionChecker::analyzerDenote)
+ .Case("clang_analyzer_express",
+ &ExprInspectionChecker::analyzerExpress)
+ .StartsWith("clang_analyzer_isTainted",
+ &ExprInspectionChecker::analyzerIsTainted)
+ .Default(nullptr);
if (!Handler)
return false;
@@ -133,22 +147,28 @@ static const char *getArgumentValueString(const CallExpr *CE,
}
ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
- CheckerContext &C) const {
+ CheckerContext &C,
+ Optional<SVal> ExprVal) const {
ExplodedNode *N = C.generateNonFatalErrorNode();
- reportBug(Msg, C.getBugReporter(), N);
+ reportBug(Msg, C.getBugReporter(), N, ExprVal);
return N;
}
ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
BugReporter &BR,
- ExplodedNode *N) const {
+ ExplodedNode *N,
+ Optional<SVal> ExprVal) const {
if (!N)
return nullptr;
if (!BT)
BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
- BR.emitReport(std::make_unique<PathSensitiveBugReport>(*BT, Msg, N));
+ auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
+ if (ExprVal) {
+ R->markInteresting(*ExprVal);
+ }
+ BR.emitReport(std::move(R));
return N;
}
@@ -234,8 +254,9 @@ void ExprInspectionChecker::analyzerGetExtent(const CallExpr *CE,
}
ProgramStateRef State = C.getState();
- State = State->BindExpr(CE, C.getLocationContext(),
- MR->getExtent(C.getSValBuilder()));
+ DefinedOrUnknownSVal Size = getDynamicSize(State, MR, C.getSValBuilder());
+
+ State = State->BindExpr(CE, C.getLocationContext(), Size);
C.addTransition(State);
}
@@ -394,7 +415,8 @@ void ExprInspectionChecker::analyzerExpress(const CallExpr *CE,
return;
}
- SymbolRef Sym = C.getSVal(CE->getArg(0)).getAsSymbol();
+ SVal ArgVal = C.getSVal(CE->getArg(0));
+ SymbolRef Sym = ArgVal.getAsSymbol();
if (!Sym) {
reportBug("Not a symbol", C);
return;
@@ -407,13 +429,24 @@ void ExprInspectionChecker::analyzerExpress(const CallExpr *CE,
return;
}
- reportBug(*Str, C);
+ reportBug(*Str, C, ArgVal);
+}
+
+void ExprInspectionChecker::analyzerIsTainted(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getNumArgs() != 1) {
+ reportBug("clang_analyzer_isTainted() requires exactly one argument", C);
+ return;
+ }
+ const bool IsTainted =
+ taint::isTainted(C.getState(), CE->getArg(0), C.getLocationContext());
+ reportBug(IsTainted ? "YES" : "NO", C);
}
void ento::registerExprInspectionChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ExprInspectionChecker>();
}
-bool ento::shouldRegisterExprInspectionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterExprInspectionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index b315a8452285..6275e49e51ae 100644
--- a/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -66,6 +66,6 @@ void ento::registerFixedAddressChecker(CheckerManager &mgr) {
mgr.registerChecker<FixedAddressChecker>();
}
-bool ento::shouldRegisterFixedAddressChecker(const LangOptions &LO) {
+bool ento::shouldRegisterFixedAddressChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
index 3c04983df443..fc35082705fa 100644
--- a/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
@@ -90,6 +90,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace ento;
@@ -149,6 +150,10 @@ public:
CASE(Kind::Released)
CASE(Kind::Escaped)
}
+ if (ErrorSym) {
+ OS << " ErrorSym: ";
+ ErrorSym->dumpToStream(OS);
+ }
}
LLVM_DUMP_METHOD void dump() const { dump(llvm::errs()); }
@@ -314,6 +319,17 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
// Function returns an open handle.
if (hasFuchsiaAttr<AcquireHandleAttr>(FuncDecl)) {
SymbolRef RetSym = Call.getReturnValue().getAsSymbol();
+ Notes.push_back([RetSym, FuncDecl](BugReport &BR) -> std::string {
+ auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
+ if (auto IsInteresting = PathBR->getInterestingnessKind(RetSym)) {
+ std::string SBuf;
+ llvm::raw_string_ostream OS(SBuf);
+ OS << "Function '" << FuncDecl->getNameAsString()
+ << "' returns an open handle";
+ return OS.str();
+ } else
+ return "";
+ });
State =
State->set<HStateMap>(RetSym, HandleState::getMaybeAllocated(nullptr));
}
@@ -322,6 +338,7 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
if (Arg >= FuncDecl->getNumParams())
break;
const ParmVarDecl *PVD = FuncDecl->getParamDecl(Arg);
+ unsigned ParamDiagIdx = PVD->getFunctionScopeIndex() + 1;
SymbolRef Handle =
getFuchsiaHandleSymbol(PVD->getType(), Call.getArgSVal(Arg), State);
if (!Handle)
@@ -335,20 +352,28 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
reportDoubleRelease(Handle, Call.getArgSourceRange(Arg), C);
return;
} else {
- Notes.push_back([Handle](BugReport &BR) {
+ Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
- return "Handle released here.";
+ std::string SBuf;
+ llvm::raw_string_ostream OS(SBuf);
+ OS << "Handle released through " << ParamDiagIdx
+ << llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
+ return OS.str();
} else
return "";
});
State = State->set<HStateMap>(Handle, HandleState::getReleased());
}
} else if (hasFuchsiaAttr<AcquireHandleAttr>(PVD)) {
- Notes.push_back([Handle](BugReport &BR) {
+ Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
- return "Handle allocated here.";
+ std::string SBuf;
+ llvm::raw_string_ostream OS(SBuf);
+ OS << "Handle allocated through " << ParamDiagIdx
+ << llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
+ return OS.str();
} else
return "";
});
@@ -358,8 +383,8 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
}
const NoteTag *T = nullptr;
if (!Notes.empty()) {
- T = C.getNoteTag(
- [this, Notes{std::move(Notes)}](BugReport &BR) -> std::string {
+ T = C.getNoteTag([this, Notes{std::move(Notes)}](
+ PathSensitiveBugReport &BR) -> std::string {
if (&BR.getBugType() != &UseAfterReleaseBugType &&
&BR.getBugType() != &LeakBugType &&
&BR.getBugType() != &DoubleReleaseBugType)
@@ -381,7 +406,13 @@ void FuchsiaHandleChecker::checkDeadSymbols(SymbolReaper &SymReaper,
SmallVector<SymbolRef, 2> LeakedSyms;
HStateMapTy TrackedHandles = State->get<HStateMap>();
for (auto &CurItem : TrackedHandles) {
- if (!SymReaper.isDead(CurItem.first))
+ SymbolRef ErrorSym = CurItem.second.getErrorSym();
+ // Keeping zombie handle symbols. In case the error symbol is dying later
+ // than the handle symbol we might produce spurious leak warnings (in case
+ // we find out later from the status code that the handle allocation failed
+ // in the first place).
+ if (!SymReaper.isDead(CurItem.first) ||
+ (ErrorSym && !SymReaper.isDead(ErrorSym)))
continue;
if (CurItem.second.isAllocated() || CurItem.second.maybeAllocated())
LeakedSyms.push_back(CurItem.first);
@@ -535,7 +566,7 @@ void ento::registerFuchsiaHandleChecker(CheckerManager &mgr) {
mgr.registerChecker<FuchsiaHandleChecker>();
}
-bool ento::shouldRegisterFuchsiaHandleChecker(const LangOptions &LO) {
+bool ento::shouldRegisterFuchsiaHandleChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
index d471c23b83bf..63fbe75fd498 100644
--- a/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
@@ -52,18 +52,16 @@ public:
BugReporter &BR) const;
};
-auto callsName(const char *FunctionName)
- -> decltype(callee(functionDecl())) {
+decltype(auto) callsName(const char *FunctionName) {
return callee(functionDecl(hasName(FunctionName)));
}
-auto equalsBoundArgDecl(int ArgIdx, const char *DeclName)
- -> decltype(hasArgument(0, expr())) {
+decltype(auto) equalsBoundArgDecl(int ArgIdx, const char *DeclName) {
return hasArgument(ArgIdx, ignoringParenCasts(declRefExpr(
to(varDecl(equalsBoundNode(DeclName))))));
}
-auto bindAssignmentToDecl(const char *DeclName) -> decltype(hasLHS(expr())) {
+decltype(auto) bindAssignmentToDecl(const char *DeclName) {
return hasLHS(ignoringParenImpCasts(
declRefExpr(to(varDecl().bind(DeclName)))));
}
@@ -227,6 +225,6 @@ void ento::registerGCDAntipattern(CheckerManager &Mgr) {
Mgr.registerChecker<GCDAntipatternChecker>();
}
-bool ento::shouldRegisterGCDAntipattern(const LangOptions &LO) {
+bool ento::shouldRegisterGCDAntipattern(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
index f4308f510f0b..8d9afbe88aa8 100644
--- a/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
@@ -291,8 +291,9 @@ void ento::registerGTestChecker(CheckerManager &Mgr) {
Mgr.registerChecker<GTestChecker>();
}
-bool ento::shouldRegisterGTestChecker(const LangOptions &LO) {
+bool ento::shouldRegisterGTestChecker(const CheckerManager &mgr) {
// gtest is a C++ API so there is no sense running the checker
// if not compiling for C++.
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 302d5bb1bea8..c06d2fcd8e7d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -22,11 +22,14 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/Support/YAMLTraits.h"
+
#include <algorithm>
#include <limits>
+#include <memory>
#include <unordered_map>
#include <utility>
@@ -35,17 +38,15 @@ using namespace ento;
using namespace taint;
namespace {
-class GenericTaintChecker
- : public Checker<check::PostStmt<CallExpr>, check::PreStmt<CallExpr>> {
+class GenericTaintChecker : public Checker<check::PreCall, check::PostCall> {
public:
static void *getTag() {
static int Tag;
return &Tag;
}
- void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
-
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
const char *Sep) const override;
@@ -81,7 +82,7 @@ public:
/// Convert SignedArgVector to ArgVector.
ArgVector convertToArgVector(CheckerManager &Mgr, const std::string &Option,
- SignedArgVector Args);
+ const SignedArgVector &Args);
/// Parse the config.
void parseConfiguration(CheckerManager &Mgr, const std::string &Option,
@@ -96,7 +97,8 @@ private:
mutable std::unique_ptr<BugType> BT;
void initBugType() const {
if (!BT)
- BT.reset(new BugType(this, "Use of Untrusted Data", "Untrusted Data"));
+ BT = std::make_unique<BugType>(this, "Use of Untrusted Data",
+ "Untrusted Data");
}
struct FunctionData {
@@ -106,9 +108,12 @@ private:
FunctionData &operator=(const FunctionData &) = delete;
FunctionData &operator=(FunctionData &&) = delete;
- static Optional<FunctionData> create(const CallExpr *CE,
+ static Optional<FunctionData> create(const CallEvent &Call,
const CheckerContext &C) {
- const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!Call.getDecl())
+ return None;
+
+ const FunctionDecl *FDecl = Call.getDecl()->getAsFunction();
if (!FDecl || (FDecl->getKind() != Decl::Function &&
FDecl->getKind() != Decl::CXXMethod))
return None;
@@ -132,33 +137,33 @@ private:
/// Catch taint related bugs. Check if tainted data is passed to a
/// system call etc. Returns true on matching.
- bool checkPre(const CallExpr *CE, const FunctionData &FData,
+ bool checkPre(const CallEvent &Call, const FunctionData &FData,
CheckerContext &C) const;
/// Add taint sources on a pre-visit. Returns true on matching.
- bool addSourcesPre(const CallExpr *CE, const FunctionData &FData,
+ bool addSourcesPre(const CallEvent &Call, const FunctionData &FData,
CheckerContext &C) const;
/// Mark filter's arguments not tainted on a pre-visit. Returns true on
/// matching.
- bool addFiltersPre(const CallExpr *CE, const FunctionData &FData,
+ bool addFiltersPre(const CallEvent &Call, const FunctionData &FData,
CheckerContext &C) const;
/// Propagate taint generated at pre-visit. Returns true on matching.
- bool propagateFromPre(const CallExpr *CE, CheckerContext &C) const;
+ static bool propagateFromPre(const CallEvent &Call, CheckerContext &C);
/// Check if the region the expression evaluates to is the standard input,
/// and thus, is tainted.
static bool isStdin(const Expr *E, CheckerContext &C);
/// Given a pointer argument, return the value it points to.
- static Optional<SVal> getPointedToSVal(CheckerContext &C, const Expr *Arg);
+ static Optional<SVal> getPointeeOf(CheckerContext &C, const Expr *Arg);
/// Check for CWE-134: Uncontrolled Format String.
static constexpr llvm::StringLiteral MsgUncontrolledFormatString =
"Untrusted data is used as a format string "
"(CWE-134: Uncontrolled Format String)";
- bool checkUncontrolledFormatString(const CallExpr *CE,
+ bool checkUncontrolledFormatString(const CallEvent &Call,
CheckerContext &C) const;
/// Check for:
@@ -167,7 +172,7 @@ private:
static constexpr llvm::StringLiteral MsgSanitizeSystemArgs =
"Untrusted data is passed to a system call "
"(CERT/STR02-C. Sanitize data passed to complex subsystems)";
- bool checkSystemCall(const CallExpr *CE, StringRef Name,
+ bool checkSystemCall(const CallEvent &Call, StringRef Name,
CheckerContext &C) const;
/// Check if tainted data is used as a buffer size ins strn.. functions,
@@ -176,13 +181,12 @@ private:
"Untrusted data is used to specify the buffer size "
"(CERT/STR31-C. Guarantee that storage for strings has sufficient space "
"for character data and the null terminator)";
- bool checkTaintedBufferSize(const CallExpr *CE, const FunctionDecl *FDecl,
- CheckerContext &C) const;
+ bool checkTaintedBufferSize(const CallEvent &Call, CheckerContext &C) const;
/// Check if tainted data is used as a custom sink's parameter.
static constexpr llvm::StringLiteral MsgCustomSink =
"Untrusted data is passed to a user-defined sink";
- bool checkCustomSinks(const CallExpr *CE, const FunctionData &FData,
+ bool checkCustomSinks(const CallEvent &Call, const FunctionData &FData,
CheckerContext &C) const;
/// Generate a report if the expression is tainted or points to tainted data.
@@ -212,7 +216,7 @@ private:
/// ReturnValueIndex is added to the dst list, the return value will be
/// tainted.
struct TaintPropagationRule {
- using PropagationFuncType = bool (*)(bool IsTainted, const CallExpr *,
+ using PropagationFuncType = bool (*)(bool IsTainted, const CallEvent &Call,
CheckerContext &C);
/// List of arguments which can be taint sources and should be checked.
@@ -256,7 +260,8 @@ private:
return (llvm::find(DstArgs, ArgNum) != DstArgs.end());
}
- static bool isTaintedOrPointsToTainted(const Expr *E, ProgramStateRef State,
+ static bool isTaintedOrPointsToTainted(const Expr *E,
+ const ProgramStateRef &State,
CheckerContext &C) {
if (isTainted(State, E, C.getLocationContext()) || isStdin(E, C))
return true;
@@ -264,16 +269,16 @@ private:
if (!E->getType().getTypePtr()->isPointerType())
return false;
- Optional<SVal> V = getPointedToSVal(C, E);
+ Optional<SVal> V = getPointeeOf(C, E);
return (V && isTainted(State, *V));
}
/// Pre-process a function which propagates taint according to the
/// taint rule.
- ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
+ ProgramStateRef process(const CallEvent &Call, CheckerContext &C) const;
// Functions for custom taintedness propagation.
- static bool postSocket(bool IsTainted, const CallExpr *CE,
+ static bool postSocket(bool IsTainted, const CallEvent &Call,
CheckerContext &C);
};
@@ -351,8 +356,10 @@ template <> struct MappingTraits<TaintConfig::NameScopeArgs> {
/// points to data, which should be tainted on return.
REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
-GenericTaintChecker::ArgVector GenericTaintChecker::convertToArgVector(
- CheckerManager &Mgr, const std::string &Option, SignedArgVector Args) {
+GenericTaintChecker::ArgVector
+GenericTaintChecker::convertToArgVector(CheckerManager &Mgr,
+ const std::string &Option,
+ const SignedArgVector &Args) {
ArgVector Result;
for (int Arg : Args) {
if (Arg == -1)
@@ -396,7 +403,7 @@ void GenericTaintChecker::parseConfiguration(CheckerManager &Mgr,
template <typename T>
auto GenericTaintChecker::findFunctionInConfig(const ConfigDataMap<T> &Map,
const FunctionData &FData) {
- auto Range = Map.equal_range(FData.Name);
+ auto Range = Map.equal_range(std::string(FData.Name));
auto It =
std::find_if(Range.first, Range.second, [&FData](const auto &Entry) {
const auto &Value = Entry.second;
@@ -419,125 +426,125 @@ GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
llvm::StringSwitch<TaintPropagationRule>(FData.FullName)
// Source functions
// TODO: Add support for vfscanf & family.
- .Case("fdopen", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("fopen", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("freopen", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("getch", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("getchar", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("getchar_unlocked",
- TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("getenv", TaintPropagationRule({}, {ReturnValueIndex}))
- .Case("gets", TaintPropagationRule({}, {0, ReturnValueIndex}))
- .Case("scanf", TaintPropagationRule({}, {}, VariadicType::Dst, 1))
- .Case("socket",
- TaintPropagationRule({}, {ReturnValueIndex}, VariadicType::None,
- InvalidArgIndex,
- &TaintPropagationRule::postSocket))
- .Case("wgetch", TaintPropagationRule({}, {ReturnValueIndex}))
+ .Case("fdopen", {{}, {ReturnValueIndex}})
+ .Case("fopen", {{}, {ReturnValueIndex}})
+ .Case("freopen", {{}, {ReturnValueIndex}})
+ .Case("getch", {{}, {ReturnValueIndex}})
+ .Case("getchar", {{}, {ReturnValueIndex}})
+ .Case("getchar_unlocked", {{}, {ReturnValueIndex}})
+ .Case("getenv", {{}, {ReturnValueIndex}})
+ .Case("gets", {{}, {0, ReturnValueIndex}})
+ .Case("scanf", {{}, {}, VariadicType::Dst, 1})
+ .Case("socket", {{},
+ {ReturnValueIndex},
+ VariadicType::None,
+ InvalidArgIndex,
+ &TaintPropagationRule::postSocket})
+ .Case("wgetch", {{}, {ReturnValueIndex}})
// Propagating functions
- .Case("atoi", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("atol", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("atoll", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("fgetc", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("fgetln", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("fgets", TaintPropagationRule({2}, {0, ReturnValueIndex}))
- .Case("fscanf", TaintPropagationRule({0}, {}, VariadicType::Dst, 2))
- .Case("getc", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("getc_unlocked", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("getdelim", TaintPropagationRule({3}, {0}))
- .Case("getline", TaintPropagationRule({2}, {0}))
- .Case("getw", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("pread",
- TaintPropagationRule({0, 1, 2, 3}, {1, ReturnValueIndex}))
- .Case("read", TaintPropagationRule({0, 2}, {1, ReturnValueIndex}))
- .Case("strchr", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("strrchr", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("tolower", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Case("toupper", TaintPropagationRule({0}, {ReturnValueIndex}))
- .Default(TaintPropagationRule());
+ .Case("atoi", {{0}, {ReturnValueIndex}})
+ .Case("atol", {{0}, {ReturnValueIndex}})
+ .Case("atoll", {{0}, {ReturnValueIndex}})
+ .Case("fgetc", {{0}, {ReturnValueIndex}})
+ .Case("fgetln", {{0}, {ReturnValueIndex}})
+ .Case("fgets", {{2}, {0, ReturnValueIndex}})
+ .Case("fscanf", {{0}, {}, VariadicType::Dst, 2})
+ .Case("sscanf", {{0}, {}, VariadicType::Dst, 2})
+ .Case("getc", {{0}, {ReturnValueIndex}})
+ .Case("getc_unlocked", {{0}, {ReturnValueIndex}})
+ .Case("getdelim", {{3}, {0}})
+ .Case("getline", {{2}, {0}})
+ .Case("getw", {{0}, {ReturnValueIndex}})
+ .Case("pread", {{0, 1, 2, 3}, {1, ReturnValueIndex}})
+ .Case("read", {{0, 2}, {1, ReturnValueIndex}})
+ .Case("strchr", {{0}, {ReturnValueIndex}})
+ .Case("strrchr", {{0}, {ReturnValueIndex}})
+ .Case("tolower", {{0}, {ReturnValueIndex}})
+ .Case("toupper", {{0}, {ReturnValueIndex}})
+ .Default({});
if (!Rule.isNull())
return Rule;
+ assert(FData.FDecl);
// Check if it's one of the memory setting/copying functions.
// This check is specialized but faster then calling isCLibraryFunction.
const FunctionDecl *FDecl = FData.FDecl;
unsigned BId = 0;
- if ((BId = FDecl->getMemoryFunctionKind()))
+ if ((BId = FDecl->getMemoryFunctionKind())) {
switch (BId) {
case Builtin::BImemcpy:
case Builtin::BImemmove:
case Builtin::BIstrncpy:
case Builtin::BIstrncat:
- return TaintPropagationRule({1, 2}, {0, ReturnValueIndex});
+ return {{1, 2}, {0, ReturnValueIndex}};
case Builtin::BIstrlcpy:
case Builtin::BIstrlcat:
- return TaintPropagationRule({1, 2}, {0});
+ return {{1, 2}, {0}};
case Builtin::BIstrndup:
- return TaintPropagationRule({0, 1}, {ReturnValueIndex});
+ return {{0, 1}, {ReturnValueIndex}};
default:
break;
- };
+ }
+ }
// Process all other functions which could be defined as builtins.
if (Rule.isNull()) {
- if (C.isCLibraryFunction(FDecl, "snprintf"))
- return TaintPropagationRule({1}, {0, ReturnValueIndex}, VariadicType::Src,
- 3);
- else if (C.isCLibraryFunction(FDecl, "sprintf"))
- return TaintPropagationRule({}, {0, ReturnValueIndex}, VariadicType::Src,
- 2);
- else if (C.isCLibraryFunction(FDecl, "strcpy") ||
- C.isCLibraryFunction(FDecl, "stpcpy") ||
- C.isCLibraryFunction(FDecl, "strcat"))
- return TaintPropagationRule({1}, {0, ReturnValueIndex});
- else if (C.isCLibraryFunction(FDecl, "bcopy"))
- return TaintPropagationRule({0, 2}, {1});
- else if (C.isCLibraryFunction(FDecl, "strdup") ||
- C.isCLibraryFunction(FDecl, "strdupa"))
- return TaintPropagationRule({0}, {ReturnValueIndex});
- else if (C.isCLibraryFunction(FDecl, "wcsdup"))
- return TaintPropagationRule({0}, {ReturnValueIndex});
+ const auto OneOf = [FDecl](const auto &... Name) {
+ // FIXME: use fold expression in C++17
+ using unused = int[];
+ bool ret = false;
+ static_cast<void>(unused{
+ 0, (ret |= CheckerContext::isCLibraryFunction(FDecl, Name), 0)...});
+ return ret;
+ };
+ if (OneOf("snprintf"))
+ return {{1}, {0, ReturnValueIndex}, VariadicType::Src, 3};
+ if (OneOf("sprintf"))
+ return {{}, {0, ReturnValueIndex}, VariadicType::Src, 2};
+ if (OneOf("strcpy", "stpcpy", "strcat"))
+ return {{1}, {0, ReturnValueIndex}};
+ if (OneOf("bcopy"))
+ return {{0, 2}, {1}};
+ if (OneOf("strdup", "strdupa", "wcsdup"))
+ return {{0}, {ReturnValueIndex}};
}
- // Skipping the following functions, since they might be used for cleansing
- // or smart memory copy:
+ // Skipping the following functions, since they might be used for cleansing or
+ // smart memory copy:
// - memccpy - copying until hitting a special character.
auto It = findFunctionInConfig(CustomPropagations, FData);
- if (It != CustomPropagations.end()) {
- const auto &Value = It->second;
- return Value.second;
- }
-
- return TaintPropagationRule();
+ if (It != CustomPropagations.end())
+ return It->second.second;
+ return {};
}
-void GenericTaintChecker::checkPreStmt(const CallExpr *CE,
+void GenericTaintChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- Optional<FunctionData> FData = FunctionData::create(CE, C);
+ Optional<FunctionData> FData = FunctionData::create(Call, C);
if (!FData)
return;
// Check for taintedness related errors first: system call, uncontrolled
// format string, tainted buffer size.
- if (checkPre(CE, *FData, C))
+ if (checkPre(Call, *FData, C))
return;
// Marks the function's arguments and/or return value tainted if it present in
// the list.
- if (addSourcesPre(CE, *FData, C))
+ if (addSourcesPre(Call, *FData, C))
return;
- addFiltersPre(CE, *FData, C);
+ addFiltersPre(Call, *FData, C);
}
-void GenericTaintChecker::checkPostStmt(const CallExpr *CE,
+void GenericTaintChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
// Set the marked values as tainted. The return value only accessible from
// checkPostStmt.
- propagateFromPre(CE, C);
+ propagateFromPre(Call, C);
}
void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
@@ -545,14 +552,14 @@ void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
printTaint(State, Out, NL, Sep);
}
-bool GenericTaintChecker::addSourcesPre(const CallExpr *CE,
+bool GenericTaintChecker::addSourcesPre(const CallEvent &Call,
const FunctionData &FData,
CheckerContext &C) const {
// First, try generating a propagation rule for this function.
TaintPropagationRule Rule = TaintPropagationRule::getTaintPropagationRule(
this->CustomPropagations, FData, C);
if (!Rule.isNull()) {
- ProgramStateRef State = Rule.process(CE, C);
+ ProgramStateRef State = Rule.process(Call, C);
if (State) {
C.addTransition(State);
return true;
@@ -561,7 +568,7 @@ bool GenericTaintChecker::addSourcesPre(const CallExpr *CE,
return false;
}
-bool GenericTaintChecker::addFiltersPre(const CallExpr *CE,
+bool GenericTaintChecker::addFiltersPre(const CallEvent &Call,
const FunctionData &FData,
CheckerContext &C) const {
auto It = findFunctionInConfig(CustomFilters, FData);
@@ -572,11 +579,11 @@ bool GenericTaintChecker::addFiltersPre(const CallExpr *CE,
const auto &Value = It->second;
const ArgVector &Args = Value.second;
for (unsigned ArgNum : Args) {
- if (ArgNum >= CE->getNumArgs())
+ if (ArgNum >= Call.getNumArgs())
continue;
- const Expr *Arg = CE->getArg(ArgNum);
- Optional<SVal> V = getPointedToSVal(C, Arg);
+ const Expr *Arg = Call.getArgExpr(ArgNum);
+ Optional<SVal> V = getPointeeOf(C, Arg);
if (V)
State = removeTaint(State, *V);
}
@@ -588,8 +595,8 @@ bool GenericTaintChecker::addFiltersPre(const CallExpr *CE,
return false;
}
-bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
- CheckerContext &C) const {
+bool GenericTaintChecker::propagateFromPre(const CallEvent &Call,
+ CheckerContext &C) {
ProgramStateRef State = C.getState();
// Depending on what was tainted at pre-visit, we determined a set of
@@ -602,16 +609,16 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
for (unsigned ArgNum : TaintArgs) {
// Special handling for the tainted return value.
if (ArgNum == ReturnValueIndex) {
- State = addTaint(State, CE, C.getLocationContext());
+ State = addTaint(State, Call.getReturnValue());
continue;
}
// The arguments are pointer arguments. The data they are pointing at is
// tainted after the call.
- if (CE->getNumArgs() < (ArgNum + 1))
+ if (Call.getNumArgs() < (ArgNum + 1))
return false;
- const Expr *Arg = CE->getArg(ArgNum);
- Optional<SVal> V = getPointedToSVal(C, Arg);
+ const Expr *Arg = Call.getArgExpr(ArgNum);
+ Optional<SVal> V = getPointeeOf(C, Arg);
if (V)
State = addTaint(State, *V);
}
@@ -626,27 +633,23 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
return false;
}
-bool GenericTaintChecker::checkPre(const CallExpr *CE,
+bool GenericTaintChecker::checkPre(const CallEvent &Call,
const FunctionData &FData,
CheckerContext &C) const {
-
- if (checkUncontrolledFormatString(CE, C))
- return true;
-
- if (checkSystemCall(CE, FData.Name, C))
+ if (checkUncontrolledFormatString(Call, C))
return true;
- if (checkTaintedBufferSize(CE, FData.FDecl, C))
+ if (checkSystemCall(Call, FData.Name, C))
return true;
- if (checkCustomSinks(CE, FData, C))
+ if (checkTaintedBufferSize(Call, C))
return true;
- return false;
+ return checkCustomSinks(Call, FData, C);
}
-Optional<SVal> GenericTaintChecker::getPointedToSVal(CheckerContext &C,
- const Expr *Arg) {
+Optional<SVal> GenericTaintChecker::getPointeeOf(CheckerContext &C,
+ const Expr *Arg) {
ProgramStateRef State = C.getState();
SVal AddrVal = C.getSVal(Arg->IgnoreParens());
if (AddrVal.isUnknownOrUndef())
@@ -671,31 +674,33 @@ Optional<SVal> GenericTaintChecker::getPointedToSVal(CheckerContext &C,
}
ProgramStateRef
-GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
+GenericTaintChecker::TaintPropagationRule::process(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
// Check for taint in arguments.
bool IsTainted = true;
for (unsigned ArgNum : SrcArgs) {
- if (ArgNum >= CE->getNumArgs())
+ if (ArgNum >= Call.getNumArgs())
continue;
- if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(ArgNum), State, C)))
+ if ((IsTainted =
+ isTaintedOrPointsToTainted(Call.getArgExpr(ArgNum), State, C)))
break;
}
// Check for taint in variadic arguments.
if (!IsTainted && VariadicType::Src == VarType) {
// Check if any of the arguments is tainted
- for (unsigned i = VariadicIndex; i < CE->getNumArgs(); ++i) {
- if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C)))
+ for (unsigned i = VariadicIndex; i < Call.getNumArgs(); ++i) {
+ if ((IsTainted =
+ isTaintedOrPointsToTainted(Call.getArgExpr(i), State, C)))
break;
}
}
if (PropagationFunc)
- IsTainted = PropagationFunc(IsTainted, CE, C);
+ IsTainted = PropagationFunc(IsTainted, Call, C);
if (!IsTainted)
return State;
@@ -708,7 +713,7 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
continue;
}
- if (ArgNum >= CE->getNumArgs())
+ if (ArgNum >= Call.getNumArgs())
continue;
// Mark the given argument.
@@ -721,14 +726,15 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
// If they are not pointing to const data, mark data as tainted.
// TODO: So far we are just going one level down; ideally we'd need to
// recurse here.
- for (unsigned i = VariadicIndex; i < CE->getNumArgs(); ++i) {
- const Expr *Arg = CE->getArg(i);
+ for (unsigned i = VariadicIndex; i < Call.getNumArgs(); ++i) {
+ const Expr *Arg = Call.getArgExpr(i);
// Process pointer argument.
const Type *ArgTy = Arg->getType().getTypePtr();
QualType PType = ArgTy->getPointeeType();
if ((!PType.isNull() && !PType.isConstQualified()) ||
- (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
+ (ArgTy->isReferenceType() && !Arg->getType().isConstQualified())) {
State = State->add<TaintArgsOnPostVisit>(i);
+ }
}
}
@@ -736,16 +742,14 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
}
// If argument 0(protocol domain) is network, the return value should get taint.
-bool GenericTaintChecker::TaintPropagationRule::postSocket(bool /*IsTainted*/,
- const CallExpr *CE,
- CheckerContext &C) {
- SourceLocation DomLoc = CE->getArg(0)->getExprLoc();
+bool GenericTaintChecker::TaintPropagationRule::postSocket(
+ bool /*IsTainted*/, const CallEvent &Call, CheckerContext &C) {
+ SourceLocation DomLoc = Call.getArgExpr(0)->getExprLoc();
StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
// White list the internal communication protocols.
if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
return false;
-
return true;
}
@@ -757,16 +761,15 @@ bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
const MemRegion *MemReg = Val.getAsRegion();
// The region should be symbolic, we do not know it's value.
- const SymbolicRegion *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
+ const auto *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
if (!SymReg)
return false;
// Get it's symbol and find the declaration region it's pointing to.
- const SymbolRegionValue *Sm =
- dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
+ const auto *Sm = dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
if (!Sm)
return false;
- const DeclRegion *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
+ const auto *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
if (!DeclReg)
return false;
@@ -784,23 +787,24 @@ bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
return false;
}
-static bool getPrintfFormatArgumentNum(const CallExpr *CE,
+static bool getPrintfFormatArgumentNum(const CallEvent &Call,
const CheckerContext &C,
unsigned &ArgNum) {
// Find if the function contains a format string argument.
// Handles: fprintf, printf, sprintf, snprintf, vfprintf, vprintf, vsprintf,
// vsnprintf, syslog, custom annotated functions.
- const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ const FunctionDecl *FDecl = Call.getDecl()->getAsFunction();
if (!FDecl)
return false;
for (const auto *Format : FDecl->specific_attrs<FormatAttr>()) {
ArgNum = Format->getFormatIdx() - 1;
- if ((Format->getType()->getName() == "printf") && CE->getNumArgs() > ArgNum)
+ if ((Format->getType()->getName() == "printf") &&
+ Call.getNumArgs() > ArgNum)
return true;
}
// Or if a function is named setproctitle (this is a heuristic).
- if (C.getCalleeName(CE).find("setproctitle") != StringRef::npos) {
+ if (C.getCalleeName(FDecl).find("setproctitle") != StringRef::npos) {
ArgNum = 0;
return true;
}
@@ -814,7 +818,7 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
// Check for taint.
ProgramStateRef State = C.getState();
- Optional<SVal> PointedToSVal = getPointedToSVal(C, E);
+ Optional<SVal> PointedToSVal = getPointeeOf(C, E);
SVal TaintedSVal;
if (PointedToSVal && isTainted(State, *PointedToSVal))
TaintedSVal = *PointedToSVal;
@@ -836,19 +840,19 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
}
bool GenericTaintChecker::checkUncontrolledFormatString(
- const CallExpr *CE, CheckerContext &C) const {
+ const CallEvent &Call, CheckerContext &C) const {
// Check if the function contains a format string argument.
unsigned ArgNum = 0;
- if (!getPrintfFormatArgumentNum(CE, C, ArgNum))
+ if (!getPrintfFormatArgumentNum(Call, C, ArgNum))
return false;
// If either the format string content or the pointer itself are tainted,
// warn.
- return generateReportIfTainted(CE->getArg(ArgNum),
+ return generateReportIfTainted(Call.getArgExpr(ArgNum),
MsgUncontrolledFormatString, C);
}
-bool GenericTaintChecker::checkSystemCall(const CallExpr *CE, StringRef Name,
+bool GenericTaintChecker::checkSystemCall(const CallEvent &Call, StringRef Name,
CheckerContext &C) const {
// TODO: It might make sense to run this check on demand. In some cases,
// we should check if the environment has been cleansed here. We also might
@@ -866,21 +870,22 @@ bool GenericTaintChecker::checkSystemCall(const CallExpr *CE, StringRef Name,
.Case("dlopen", 0)
.Default(InvalidArgIndex);
- if (ArgNum == InvalidArgIndex || CE->getNumArgs() < (ArgNum + 1))
+ if (ArgNum == InvalidArgIndex || Call.getNumArgs() < (ArgNum + 1))
return false;
- return generateReportIfTainted(CE->getArg(ArgNum), MsgSanitizeSystemArgs, C);
+ return generateReportIfTainted(Call.getArgExpr(ArgNum), MsgSanitizeSystemArgs,
+ C);
}
// TODO: Should this check be a part of the CString checker?
// If yes, should taint be a global setting?
-bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
- const FunctionDecl *FDecl,
+bool GenericTaintChecker::checkTaintedBufferSize(const CallEvent &Call,
CheckerContext &C) const {
+ const auto *FDecl = Call.getDecl()->getAsFunction();
// If the function has a buffer size argument, set ArgNum.
unsigned ArgNum = InvalidArgIndex;
unsigned BId = 0;
- if ((BId = FDecl->getMemoryFunctionKind()))
+ if ((BId = FDecl->getMemoryFunctionKind())) {
switch (BId) {
case Builtin::BImemcpy:
case Builtin::BImemmove:
@@ -892,26 +897,29 @@ bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
break;
default:
break;
- };
+ }
+ }
if (ArgNum == InvalidArgIndex) {
- if (C.isCLibraryFunction(FDecl, "malloc") ||
- C.isCLibraryFunction(FDecl, "calloc") ||
- C.isCLibraryFunction(FDecl, "alloca"))
+ using CCtx = CheckerContext;
+ if (CCtx::isCLibraryFunction(FDecl, "malloc") ||
+ CCtx::isCLibraryFunction(FDecl, "calloc") ||
+ CCtx::isCLibraryFunction(FDecl, "alloca"))
ArgNum = 0;
- else if (C.isCLibraryFunction(FDecl, "memccpy"))
+ else if (CCtx::isCLibraryFunction(FDecl, "memccpy"))
ArgNum = 3;
- else if (C.isCLibraryFunction(FDecl, "realloc"))
+ else if (CCtx::isCLibraryFunction(FDecl, "realloc"))
ArgNum = 1;
- else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ else if (CCtx::isCLibraryFunction(FDecl, "bcopy"))
ArgNum = 2;
}
- return ArgNum != InvalidArgIndex && CE->getNumArgs() > ArgNum &&
- generateReportIfTainted(CE->getArg(ArgNum), MsgTaintedBufferSize, C);
+ return ArgNum != InvalidArgIndex && Call.getNumArgs() > ArgNum &&
+ generateReportIfTainted(Call.getArgExpr(ArgNum), MsgTaintedBufferSize,
+ C);
}
-bool GenericTaintChecker::checkCustomSinks(const CallExpr *CE,
+bool GenericTaintChecker::checkCustomSinks(const CallEvent &Call,
const FunctionData &FData,
CheckerContext &C) const {
auto It = findFunctionInConfig(CustomSinks, FData);
@@ -921,10 +929,10 @@ bool GenericTaintChecker::checkCustomSinks(const CallExpr *CE,
const auto &Value = It->second;
const GenericTaintChecker::ArgVector &Args = Value.second;
for (unsigned ArgNum : Args) {
- if (ArgNum >= CE->getNumArgs())
+ if (ArgNum >= Call.getNumArgs())
continue;
- if (generateReportIfTainted(CE->getArg(ArgNum), MsgCustomSink, C))
+ if (generateReportIfTainted(Call.getArgExpr(ArgNum), MsgCustomSink, C))
return true;
}
@@ -942,6 +950,6 @@ void ento::registerGenericTaintChecker(CheckerManager &Mgr) {
Checker->parseConfiguration(Mgr, Option, std::move(Config.getValue()));
}
-bool ento::shouldRegisterGenericTaintChecker(const LangOptions &LO) {
+bool ento::shouldRegisterGenericTaintChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
index cc2cfb774227..1cf81b54e77d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -351,6 +351,8 @@ static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
case Stmt::CallExprClass:
case Stmt::ArraySubscriptExprClass:
case Stmt::OMPArraySectionExprClass:
+ case Stmt::OMPArrayShapingExprClass:
+ case Stmt::OMPIteratorExprClass:
case Stmt::ImplicitCastExprClass:
case Stmt::ParenExprClass:
case Stmt::BreakStmtClass:
@@ -513,6 +515,6 @@ void ento::registerIdenticalExprChecker(CheckerManager &Mgr) {
Mgr.registerChecker<FindIdenticalExprChecker>();
}
-bool ento::shouldRegisterIdenticalExprChecker(const LangOptions &LO) {
+bool ento::shouldRegisterIdenticalExprChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
index dd89c53478e8..65e52e139ee4 100644
--- a/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -307,6 +307,6 @@ void ento::registerInnerPointerChecker(CheckerManager &Mgr) {
Mgr.registerChecker<InnerPointerChecker>();
}
-bool ento::shouldRegisterInnerPointerChecker(const LangOptions &LO) {
+bool ento::shouldRegisterInnerPointerChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
index 9642588d6a41..99731d6044a0 100644
--- a/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
+++ b/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
@@ -11,13 +11,19 @@
#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H
#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H
-namespace clang {
-class CheckerManager;
+// FIXME: This file goes against how a checker should be implemented either in
+// a single file, or be exposed in a header file. Let's try to get rid of it!
+
+namespace clang {
namespace ento {
+class CheckerManager;
+
/// Register the part of MallocChecker connected to InnerPointerChecker.
void registerInnerPointerCheckerAux(CheckerManager &Mgr);
-}}
+} // namespace ento
+} // namespace clang
+
#endif /* INTERCHECKERAPI_H_ */
diff --git a/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
index d1a9a7df071d..6955ba11a28f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
@@ -26,7 +26,10 @@ using namespace iterator;
namespace {
class InvalidatedIteratorChecker
- : public Checker<check::PreCall> {
+ : public Checker<check::PreCall, check::PreStmt<UnaryOperator>,
+ check::PreStmt<BinaryOperator>,
+ check::PreStmt<ArraySubscriptExpr>,
+ check::PreStmt<MemberExpr>> {
std::unique_ptr<BugType> InvalidatedBugType;
@@ -37,6 +40,10 @@ public:
InvalidatedIteratorChecker();
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreStmt(const UnaryOperator *UO, CheckerContext &C) const;
+ void checkPreStmt(const BinaryOperator *BO, CheckerContext &C) const;
+ void checkPreStmt(const ArraySubscriptExpr *ASE, CheckerContext &C) const;
+ void checkPreStmt(const MemberExpr *ME, CheckerContext &C) const;
};
@@ -65,6 +72,48 @@ void InvalidatedIteratorChecker::checkPreCall(const CallEvent &Call,
}
}
+void InvalidatedIteratorChecker::checkPreStmt(const UnaryOperator *UO,
+ CheckerContext &C) const {
+ if (isa<CXXThisExpr>(UO->getSubExpr()))
+ return;
+
+ ProgramStateRef State = C.getState();
+ UnaryOperatorKind OK = UO->getOpcode();
+ SVal SubVal = State->getSVal(UO->getSubExpr(), C.getLocationContext());
+
+ if (isAccessOperator(OK)) {
+ verifyAccess(C, SubVal);
+ }
+}
+
+void InvalidatedIteratorChecker::checkPreStmt(const BinaryOperator *BO,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ BinaryOperatorKind OK = BO->getOpcode();
+ SVal LVal = State->getSVal(BO->getLHS(), C.getLocationContext());
+
+ if (isAccessOperator(OK)) {
+ verifyAccess(C, LVal);
+ }
+}
+
+void InvalidatedIteratorChecker::checkPreStmt(const ArraySubscriptExpr *ASE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal LVal = State->getSVal(ASE->getLHS(), C.getLocationContext());
+ verifyAccess(C, LVal);
+}
+
+void InvalidatedIteratorChecker::checkPreStmt(const MemberExpr *ME,
+ CheckerContext &C) const {
+ if (!ME->isArrow() || ME->isImplicitAccess())
+ return;
+
+ ProgramStateRef State = C.getState();
+ SVal BaseVal = State->getSVal(ME->getBase(), C.getLocationContext());
+ verifyAccess(C, BaseVal);
+}
+
void InvalidatedIteratorChecker::verifyAccess(CheckerContext &C, const SVal &Val) const {
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Val);
@@ -90,6 +139,6 @@ void ento::registerInvalidatedIteratorChecker(CheckerManager &mgr) {
mgr.registerChecker<InvalidatedIteratorChecker>();
}
-bool ento::shouldRegisterInvalidatedIteratorChecker(const LangOptions &LO) {
+bool ento::shouldRegisterInvalidatedIteratorChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp b/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
index 6bca5515724c..ac0f24603dd9 100644
--- a/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
@@ -128,24 +128,54 @@ bool isAccessOperator(OverloadedOperatorKind OK) {
isDecrementOperator(OK) || isRandomIncrOrDecrOperator(OK);
}
+bool isAccessOperator(UnaryOperatorKind OK) {
+ return isDereferenceOperator(OK) || isIncrementOperator(OK) ||
+ isDecrementOperator(OK);
+}
+
+bool isAccessOperator(BinaryOperatorKind OK) {
+ return isDereferenceOperator(OK) || isRandomIncrOrDecrOperator(OK);
+}
+
bool isDereferenceOperator(OverloadedOperatorKind OK) {
return OK == OO_Star || OK == OO_Arrow || OK == OO_ArrowStar ||
OK == OO_Subscript;
}
+bool isDereferenceOperator(UnaryOperatorKind OK) {
+ return OK == UO_Deref;
+}
+
+bool isDereferenceOperator(BinaryOperatorKind OK) {
+ return OK == BO_PtrMemI;
+}
+
bool isIncrementOperator(OverloadedOperatorKind OK) {
return OK == OO_PlusPlus;
}
+bool isIncrementOperator(UnaryOperatorKind OK) {
+ return OK == UO_PreInc || OK == UO_PostInc;
+}
+
bool isDecrementOperator(OverloadedOperatorKind OK) {
return OK == OO_MinusMinus;
}
+bool isDecrementOperator(UnaryOperatorKind OK) {
+ return OK == UO_PreDec || OK == UO_PostDec;
+}
+
bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK) {
return OK == OO_Plus || OK == OO_PlusEqual || OK == OO_Minus ||
OK == OO_MinusEqual;
}
+bool isRandomIncrOrDecrOperator(BinaryOperatorKind OK) {
+ return OK == BO_Add || OK == BO_AddAssign ||
+ OK == BO_Sub || OK == BO_SubAssign;
+}
+
const ContainerData *getContainerData(ProgramStateRef State,
const MemRegion *Cont) {
return State->get<ContainerMap>(Cont);
@@ -177,6 +207,20 @@ ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
return nullptr;
}
+ProgramStateRef createIteratorPosition(ProgramStateRef State, const SVal &Val,
+ const MemRegion *Cont, const Stmt* S,
+ const LocationContext *LCtx,
+ unsigned blockCount) {
+ auto &StateMgr = State->getStateManager();
+ auto &SymMgr = StateMgr.getSymbolManager();
+ auto &ACtx = StateMgr.getContext();
+
+ auto Sym = SymMgr.conjureSymbol(S, LCtx, ACtx.LongTy, blockCount);
+ State = assumeNoOverflow(State, Sym, 4);
+ return setIteratorPosition(State, Val,
+ IteratorPosition::getPosition(Cont, Sym));
+}
+
ProgramStateRef advancePosition(ProgramStateRef State, const SVal &Iter,
OverloadedOperatorKind Op,
const SVal &Distance) {
@@ -186,22 +230,70 @@ ProgramStateRef advancePosition(ProgramStateRef State, const SVal &Iter,
auto &SymMgr = State->getStateManager().getSymbolManager();
auto &SVB = State->getStateManager().getSValBuilder();
+ auto &BVF = State->getStateManager().getBasicVals();
assert ((Op == OO_Plus || Op == OO_PlusEqual ||
Op == OO_Minus || Op == OO_MinusEqual) &&
"Advance operator must be one of +, -, += and -=.");
auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
- if (const auto IntDist = Distance.getAs<nonloc::ConcreteInt>()) {
- // For concrete integers we can calculate the new position
- const auto NewPos =
- Pos->setTo(SVB.evalBinOp(State, BinOp,
- nonloc::SymbolVal(Pos->getOffset()),
- *IntDist, SymMgr.getType(Pos->getOffset()))
- .getAsSymbol());
- return setIteratorPosition(State, Iter, NewPos);
+ const auto IntDistOp = Distance.getAs<nonloc::ConcreteInt>();
+ if (!IntDistOp)
+ return nullptr;
+
+ // For concrete integers we can calculate the new position
+ nonloc::ConcreteInt IntDist = *IntDistOp;
+
+ if (IntDist.getValue().isNegative()) {
+ IntDist = nonloc::ConcreteInt(BVF.getValue(-IntDist.getValue()));
+ BinOp = (BinOp == BO_Add) ? BO_Sub : BO_Add;
}
+ const auto NewPos =
+ Pos->setTo(SVB.evalBinOp(State, BinOp,
+ nonloc::SymbolVal(Pos->getOffset()),
+ IntDist, SymMgr.getType(Pos->getOffset()))
+ .getAsSymbol());
+ return setIteratorPosition(State, Iter, NewPos);
+}
- return nullptr;
+// This function tells the analyzer's engine that symbols produced by our
+// checker, most notably iterator positions, are relatively small.
+// A distance between items in the container should not be very large.
+// By assuming that it is within around 1/8 of the address space,
+// we can help the analyzer perform operations on these symbols
+// without being afraid of integer overflows.
+// FIXME: Should we provide it as an API, so that all checkers could use it?
+ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
+ long Scale) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ BasicValueFactory &BV = SVB.getBasicValueFactory();
+
+ QualType T = Sym->getType();
+ assert(T->isSignedIntegerOrEnumerationType());
+ APSIntType AT = BV.getAPSIntType(T);
+
+ ProgramStateRef NewState = State;
+
+ llvm::APSInt Max = AT.getMaxValue() / AT.getValue(Scale);
+ SVal IsCappedFromAbove =
+ SVB.evalBinOpNN(State, BO_LE, nonloc::SymbolVal(Sym),
+ nonloc::ConcreteInt(Max), SVB.getConditionType());
+ if (auto DV = IsCappedFromAbove.getAs<DefinedSVal>()) {
+ NewState = NewState->assume(*DV, true);
+ if (!NewState)
+ return State;
+ }
+
+ llvm::APSInt Min = -Max;
+ SVal IsCappedFromBelow =
+ SVB.evalBinOpNN(State, BO_GE, nonloc::SymbolVal(Sym),
+ nonloc::ConcreteInt(Min), SVB.getConditionType());
+ if (auto DV = IsCappedFromBelow.getAs<DefinedSVal>()) {
+ NewState = NewState->assume(*DV, true);
+ if (!NewState)
+ return State;
+ }
+
+ return NewState;
}
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
diff --git a/clang/lib/StaticAnalyzer/Checkers/Iterator.h b/clang/lib/StaticAnalyzer/Checkers/Iterator.h
index c10d86691693..37157492fe3e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/Iterator.h
+++ b/clang/lib/StaticAnalyzer/Checkers/Iterator.h
@@ -115,9 +115,12 @@ class IteratorSymbolMap {};
class IteratorRegionMap {};
class ContainerMap {};
-using IteratorSymbolMapTy = CLANG_ENTO_PROGRAMSTATE_MAP(SymbolRef, IteratorPosition);
-using IteratorRegionMapTy = CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, IteratorPosition);
-using ContainerMapTy = CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, ContainerData);
+using IteratorSymbolMapTy =
+ CLANG_ENTO_PROGRAMSTATE_MAP(SymbolRef, IteratorPosition);
+using IteratorRegionMapTy =
+ CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, IteratorPosition);
+using ContainerMapTy =
+ CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, ContainerData);
} // namespace iterator
@@ -149,20 +152,33 @@ bool isEraseCall(const FunctionDecl *Func);
bool isEraseAfterCall(const FunctionDecl *Func);
bool isEmplaceCall(const FunctionDecl *Func);
bool isAccessOperator(OverloadedOperatorKind OK);
+bool isAccessOperator(UnaryOperatorKind OK);
+bool isAccessOperator(BinaryOperatorKind OK);
bool isDereferenceOperator(OverloadedOperatorKind OK);
+bool isDereferenceOperator(UnaryOperatorKind OK);
+bool isDereferenceOperator(BinaryOperatorKind OK);
bool isIncrementOperator(OverloadedOperatorKind OK);
+bool isIncrementOperator(UnaryOperatorKind OK);
bool isDecrementOperator(OverloadedOperatorKind OK);
+bool isDecrementOperator(UnaryOperatorKind OK);
bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK);
+bool isRandomIncrOrDecrOperator(BinaryOperatorKind OK);
const ContainerData *getContainerData(ProgramStateRef State,
const MemRegion *Cont);
const IteratorPosition *getIteratorPosition(ProgramStateRef State,
const SVal &Val);
ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
const IteratorPosition &Pos);
+ProgramStateRef createIteratorPosition(ProgramStateRef State, const SVal &Val,
+ const MemRegion *Cont, const Stmt* S,
+ const LocationContext *LCtx,
+ unsigned blockCount);
ProgramStateRef advancePosition(ProgramStateRef State,
const SVal &Iter,
OverloadedOperatorKind Op,
const SVal &Distance);
+ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
+ long Scale);
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
BinaryOperator::Opcode Opc);
bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
diff --git a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index eb962a2ffd9e..fd8cbd694b24 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// Defines a checker for using iterators outside their range (past end). Usage
-// means here dereferencing, incrementing etc.
+// Defines a modeling-checker for modeling STL iterator-like iterators.
//
//===----------------------------------------------------------------------===//
//
@@ -84,9 +83,20 @@ using namespace iterator;
namespace {
class IteratorModeling
- : public Checker<check::PostCall, check::PostStmt<MaterializeTemporaryExpr>,
+ : public Checker<check::PostCall, check::PostStmt<UnaryOperator>,
+ check::PostStmt<BinaryOperator>,
+ check::PostStmt<MaterializeTemporaryExpr>,
check::Bind, check::LiveSymbols, check::DeadSymbols> {
+ using AdvanceFn = void (IteratorModeling::*)(CheckerContext &, const Expr *,
+ SVal, SVal, SVal) const;
+
+ void handleOverloadedOperator(CheckerContext &C, const CallEvent &Call,
+ OverloadedOperatorKind Op) const;
+ void handleAdvanceLikeFunction(CheckerContext &C, const CallEvent &Call,
+ const Expr *OrigExpr,
+ const AdvanceFn *Handler) const;
+
void handleComparison(CheckerContext &C, const Expr *CE, SVal RetVal,
const SVal &LVal, const SVal &RVal,
OverloadedOperatorKind Op) const;
@@ -100,35 +110,46 @@ class IteratorModeling
void handleRandomIncrOrDecr(CheckerContext &C, const Expr *CE,
OverloadedOperatorKind Op, const SVal &RetVal,
const SVal &LHS, const SVal &RHS) const;
- void handleBegin(CheckerContext &C, const Expr *CE, const SVal &RetVal,
- const SVal &Cont) const;
- void handleEnd(CheckerContext &C, const Expr *CE, const SVal &RetVal,
- const SVal &Cont) const;
+ void handlePtrIncrOrDecr(CheckerContext &C, const Expr *Iterator,
+ OverloadedOperatorKind OK, SVal Offset) const;
+ void handleAdvance(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
+ SVal Amount) const;
+ void handlePrev(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
+ SVal Amount) const;
+ void handleNext(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
+ SVal Amount) const;
void assignToContainer(CheckerContext &C, const Expr *CE, const SVal &RetVal,
const MemRegion *Cont) const;
- void handleAssign(CheckerContext &C, const SVal &Cont,
- const Expr *CE = nullptr,
- const SVal &OldCont = UndefinedVal()) const;
- void handleClear(CheckerContext &C, const SVal &Cont) const;
- void handlePushBack(CheckerContext &C, const SVal &Cont) const;
- void handlePopBack(CheckerContext &C, const SVal &Cont) const;
- void handlePushFront(CheckerContext &C, const SVal &Cont) const;
- void handlePopFront(CheckerContext &C, const SVal &Cont) const;
- void handleInsert(CheckerContext &C, const SVal &Iter) const;
- void handleErase(CheckerContext &C, const SVal &Iter) const;
- void handleErase(CheckerContext &C, const SVal &Iter1,
- const SVal &Iter2) const;
- void handleEraseAfter(CheckerContext &C, const SVal &Iter) const;
- void handleEraseAfter(CheckerContext &C, const SVal &Iter1,
- const SVal &Iter2) const;
+ bool noChangeInAdvance(CheckerContext &C, SVal Iter, const Expr *CE) const;
void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
const char *Sep) const override;
+ // std::advance, std::prev & std::next
+ CallDescriptionMap<AdvanceFn> AdvanceLikeFunctions = {
+ // template<class InputIt, class Distance>
+ // void advance(InputIt& it, Distance n);
+ {{{"std", "advance"}, 2}, &IteratorModeling::handleAdvance},
+
+ // template<class BidirIt>
+ // BidirIt prev(
+ // BidirIt it,
+ // typename std::iterator_traits<BidirIt>::difference_type n = 1);
+ {{{"std", "prev"}, 2}, &IteratorModeling::handlePrev},
+
+ // template<class ForwardIt>
+ // ForwardIt next(
+ // ForwardIt it,
+ // typename std::iterator_traits<ForwardIt>::difference_type n = 1);
+ {{{"std", "next"}, 2}, &IteratorModeling::handleNext},
+ };
+
public:
- IteratorModeling() {}
+ IteratorModeling() = default;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+ void checkPostStmt(const UnaryOperator *UO, CheckerContext &C) const;
+ void checkPostStmt(const BinaryOperator *BO, CheckerContext &C) const;
void checkPostStmt(const CXXConstructExpr *CCE, CheckerContext &C) const;
void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
void checkPostStmt(const MaterializeTemporaryExpr *MTE,
@@ -137,68 +158,14 @@ public:
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
};
-bool isBeginCall(const FunctionDecl *Func);
-bool isEndCall(const FunctionDecl *Func);
-bool isAssignCall(const FunctionDecl *Func);
-bool isClearCall(const FunctionDecl *Func);
-bool isPushBackCall(const FunctionDecl *Func);
-bool isEmplaceBackCall(const FunctionDecl *Func);
-bool isPopBackCall(const FunctionDecl *Func);
-bool isPushFrontCall(const FunctionDecl *Func);
-bool isEmplaceFrontCall(const FunctionDecl *Func);
-bool isPopFrontCall(const FunctionDecl *Func);
-bool isAssignmentOperator(OverloadedOperatorKind OK);
bool isSimpleComparisonOperator(OverloadedOperatorKind OK);
-bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg);
-bool frontModifiable(ProgramStateRef State, const MemRegion *Reg);
-bool backModifiable(ProgramStateRef State, const MemRegion *Reg);
-SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont);
-SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont);
-ProgramStateRef createContainerBegin(ProgramStateRef State,
- const MemRegion *Cont, const Expr *E,
- QualType T, const LocationContext *LCtx,
- unsigned BlockCount);
-ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
- const Expr *E, QualType T,
- const LocationContext *LCtx,
- unsigned BlockCount);
-ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
- const ContainerData &CData);
+bool isSimpleComparisonOperator(BinaryOperatorKind OK);
ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val);
-ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
- long Scale);
-ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
- const MemRegion *Cont);
-ProgramStateRef
-invalidateAllIteratorPositionsExcept(ProgramStateRef State,
- const MemRegion *Cont, SymbolRef Offset,
- BinaryOperator::Opcode Opc);
-ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
- SymbolRef Offset,
- BinaryOperator::Opcode Opc);
-ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
- SymbolRef Offset1,
- BinaryOperator::Opcode Opc1,
- SymbolRef Offset2,
- BinaryOperator::Opcode Opc2);
-ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
- const MemRegion *Cont,
- const MemRegion *NewCont);
-ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
- const MemRegion *Cont,
- const MemRegion *NewCont,
- SymbolRef Offset,
- BinaryOperator::Opcode Opc);
-ProgramStateRef rebaseSymbolInIteratorPositionsIf(
- ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
- SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc);
ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
SymbolRef Sym2, bool Equal);
-SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB, SymbolRef Expr,
- SymbolRef OldSym, SymbolRef NewSym);
-bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont);
bool isBoundThroughLazyCompoundVal(const Environment &Env,
const MemRegion *Reg);
+const ExplodedNode *findCallEnter(const ExplodedNode *Node, const Expr *Call);
} // namespace
@@ -211,189 +178,57 @@ void IteratorModeling::checkPostCall(const CallEvent &Call,
if (Func->isOverloadedOperator()) {
const auto Op = Func->getOverloadedOperator();
- if (isAssignmentOperator(Op)) {
- // Overloaded 'operator=' must be a non-static member function.
- const auto *InstCall = cast<CXXInstanceCall>(&Call);
- if (cast<CXXMethodDecl>(Func)->isMoveAssignmentOperator()) {
- handleAssign(C, InstCall->getCXXThisVal(), Call.getOriginExpr(),
- Call.getArgSVal(0));
- return;
- }
-
- handleAssign(C, InstCall->getCXXThisVal());
- return;
- } else if (isSimpleComparisonOperator(Op)) {
- const auto *OrigExpr = Call.getOriginExpr();
- if (!OrigExpr)
- return;
-
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- handleComparison(C, OrigExpr, Call.getReturnValue(),
- InstCall->getCXXThisVal(), Call.getArgSVal(0), Op);
- return;
- }
-
- handleComparison(C, OrigExpr, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getArgSVal(1), Op);
- return;
- } else if (isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
- const auto *OrigExpr = Call.getOriginExpr();
- if (!OrigExpr)
- return;
-
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- if (Call.getNumArgs() >= 1 &&
- Call.getArgExpr(0)->getType()->isIntegralOrEnumerationType()) {
- handleRandomIncrOrDecr(C, OrigExpr, Func->getOverloadedOperator(),
- Call.getReturnValue(),
- InstCall->getCXXThisVal(), Call.getArgSVal(0));
- return;
- }
- } else {
- if (Call.getNumArgs() >= 2 &&
- Call.getArgExpr(1)->getType()->isIntegralOrEnumerationType()) {
- handleRandomIncrOrDecr(C, OrigExpr, Func->getOverloadedOperator(),
- Call.getReturnValue(), Call.getArgSVal(0),
- Call.getArgSVal(1));
- return;
- }
- }
- } else if (isIncrementOperator(Func->getOverloadedOperator())) {
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- handleIncrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
- Call.getNumArgs());
- return;
- }
-
- handleIncrement(C, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getNumArgs());
- return;
- } else if (isDecrementOperator(Func->getOverloadedOperator())) {
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- handleDecrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
- Call.getNumArgs());
- return;
- }
-
- handleDecrement(C, Call.getReturnValue(), Call.getArgSVal(0),
- Call.getNumArgs());
- return;
- }
- } else {
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- if (isAssignCall(Func)) {
- handleAssign(C, InstCall->getCXXThisVal());
- return;
- }
-
- if (isClearCall(Func)) {
- handleClear(C, InstCall->getCXXThisVal());
- return;
- }
-
- if (isPushBackCall(Func) || isEmplaceBackCall(Func)) {
- handlePushBack(C, InstCall->getCXXThisVal());
- return;
- }
-
- if (isPopBackCall(Func)) {
- handlePopBack(C, InstCall->getCXXThisVal());
- return;
- }
-
- if (isPushFrontCall(Func) || isEmplaceFrontCall(Func)) {
- handlePushFront(C, InstCall->getCXXThisVal());
- return;
- }
+ handleOverloadedOperator(C, Call, Op);
+ return;
+ }
- if (isPopFrontCall(Func)) {
- handlePopFront(C, InstCall->getCXXThisVal());
- return;
- }
+ const auto *OrigExpr = Call.getOriginExpr();
+ if (!OrigExpr)
+ return;
- if (isInsertCall(Func) || isEmplaceCall(Func)) {
- handleInsert(C, Call.getArgSVal(0));
- return;
- }
+ const AdvanceFn *Handler = AdvanceLikeFunctions.lookup(Call);
+ if (Handler) {
+ handleAdvanceLikeFunction(C, Call, OrigExpr, Handler);
+ return;
+ }
- if (isEraseCall(Func)) {
- if (Call.getNumArgs() == 1) {
- handleErase(C, Call.getArgSVal(0));
- return;
- }
+ if (!isIteratorType(Call.getResultType()))
+ return;
- if (Call.getNumArgs() == 2) {
- handleErase(C, Call.getArgSVal(0), Call.getArgSVal(1));
- return;
- }
- }
+ auto State = C.getState();
- if (isEraseAfterCall(Func)) {
- if (Call.getNumArgs() == 1) {
- handleEraseAfter(C, Call.getArgSVal(0));
- return;
- }
+ // Already bound to container?
+ if (getIteratorPosition(State, Call.getReturnValue()))
+ return;
- if (Call.getNumArgs() == 2) {
- handleEraseAfter(C, Call.getArgSVal(0), Call.getArgSVal(1));
- return;
- }
+ // Copy-like and move constructors
+ if (isa<CXXConstructorCall>(&Call) && Call.getNumArgs() == 1) {
+ if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(0))) {
+ State = setIteratorPosition(State, Call.getReturnValue(), *Pos);
+ if (cast<CXXConstructorDecl>(Func)->isMoveConstructor()) {
+ State = removeIteratorPosition(State, Call.getArgSVal(0));
}
- }
-
- const auto *OrigExpr = Call.getOriginExpr();
- if (!OrigExpr)
- return;
-
- if (!isIteratorType(Call.getResultType()))
+ C.addTransition(State);
return;
-
- auto State = C.getState();
-
- if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- if (isBeginCall(Func)) {
- handleBegin(C, OrigExpr, Call.getReturnValue(),
- InstCall->getCXXThisVal());
- return;
- }
-
- if (isEndCall(Func)) {
- handleEnd(C, OrigExpr, Call.getReturnValue(),
- InstCall->getCXXThisVal());
- return;
- }
}
+ }
- // Already bound to container?
- if (getIteratorPosition(State, Call.getReturnValue()))
- return;
-
- // Copy-like and move constructors
- if (isa<CXXConstructorCall>(&Call) && Call.getNumArgs() == 1) {
- if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(0))) {
- State = setIteratorPosition(State, Call.getReturnValue(), *Pos);
- if (cast<CXXConstructorDecl>(Func)->isMoveConstructor()) {
- State = removeIteratorPosition(State, Call.getArgSVal(0));
- }
- C.addTransition(State);
+ // Assumption: if return value is an iterator which is not yet bound to a
+ // container, then look for the first iterator argument of the
+ // same type as the return value and bind the return value to
+ // the same container. This approach works for STL algorithms.
+ // FIXME: Add a more conservative mode
+ for (unsigned i = 0; i < Call.getNumArgs(); ++i) {
+ if (isIteratorType(Call.getArgExpr(i)->getType()) &&
+ Call.getArgExpr(i)->getType().getNonReferenceType().getDesugaredType(
+ C.getASTContext()).getTypePtr() ==
+ Call.getResultType().getDesugaredType(C.getASTContext()).getTypePtr()) {
+ if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(i))) {
+ assignToContainer(C, OrigExpr, Call.getReturnValue(),
+ Pos->getContainer());
return;
}
}
-
- // Assumption: if return value is an iterator which is not yet bound to a
- // container, then look for the first iterator argument, and
- // bind the return value to the same container. This approach
- // works for STL algorithms.
- // FIXME: Add a more conservative mode
- for (unsigned i = 0; i < Call.getNumArgs(); ++i) {
- if (isIteratorType(Call.getArgExpr(i)->getType())) {
- if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(i))) {
- assignToContainer(C, OrigExpr, Call.getReturnValue(),
- Pos->getContainer());
- return;
- }
- }
- }
}
}
@@ -413,6 +248,35 @@ void IteratorModeling::checkBind(SVal Loc, SVal Val, const Stmt *S,
}
}
+void IteratorModeling::checkPostStmt(const UnaryOperator *UO,
+ CheckerContext &C) const {
+ UnaryOperatorKind OK = UO->getOpcode();
+ if (!isIncrementOperator(OK) && !isDecrementOperator(OK))
+ return;
+
+ auto &SVB = C.getSValBuilder();
+ handlePtrIncrOrDecr(C, UO->getSubExpr(),
+ isIncrementOperator(OK) ? OO_Plus : OO_Minus,
+ SVB.makeArrayIndex(1));
+}
+
+void IteratorModeling::checkPostStmt(const BinaryOperator *BO,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ BinaryOperatorKind OK = BO->getOpcode();
+ SVal RVal = State->getSVal(BO->getRHS(), C.getLocationContext());
+
+ if (isSimpleComparisonOperator(BO->getOpcode())) {
+ SVal LVal = State->getSVal(BO->getLHS(), C.getLocationContext());
+ SVal Result = State->getSVal(BO, C.getLocationContext());
+ handleComparison(C, BO, Result, LVal, RVal,
+ BinaryOperator::getOverloadedOperator(OK));
+ } else if (isRandomIncrOrDecrOperator(OK)) {
+ handlePtrIncrOrDecr(C, BO->getLHS(),
+ BinaryOperator::getOverloadedOperator(OK), RVal);
+ }
+}
+
void IteratorModeling::checkPostStmt(const MaterializeTemporaryExpr *MTE,
CheckerContext &C) const {
/* Transfer iterator state to temporary objects */
@@ -426,8 +290,7 @@ void IteratorModeling::checkPostStmt(const MaterializeTemporaryExpr *MTE,
void IteratorModeling::checkLiveSymbols(ProgramStateRef State,
SymbolReaper &SR) const {
- // Keep symbolic expressions of iterator positions, container begins and ends
- // alive
+ // Keep symbolic expressions of iterator positions alive
auto RegionMap = State->get<IteratorRegionMap>();
for (const auto &Reg : RegionMap) {
const auto Offset = Reg.second.getOffset();
@@ -444,20 +307,6 @@ void IteratorModeling::checkLiveSymbols(ProgramStateRef State,
SR.markLive(*i);
}
- auto ContMap = State->get<ContainerMap>();
- for (const auto &Cont : ContMap) {
- const auto CData = Cont.second;
- if (CData.getBegin()) {
- SR.markLive(CData.getBegin());
- if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getBegin()))
- SR.markLive(SIE->getLHS());
- }
- if (CData.getEnd()) {
- SR.markLive(CData.getEnd());
- if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getEnd()))
- SR.markLive(SIE->getLHS());
- }
- }
}
void IteratorModeling::checkDeadSymbols(SymbolReaper &SR,
@@ -484,18 +333,92 @@ void IteratorModeling::checkDeadSymbols(SymbolReaper &SR,
}
}
- auto ContMap = State->get<ContainerMap>();
- for (const auto &Cont : ContMap) {
- if (!SR.isLiveRegion(Cont.first)) {
- // We must keep the container data while it has live iterators to be able
- // to compare them to the begin and the end of the container.
- if (!hasLiveIterators(State, Cont.first)) {
- State = State->remove<ContainerMap>(Cont.first);
+ C.addTransition(State);
+}
+
+void
+IteratorModeling::handleOverloadedOperator(CheckerContext &C,
+ const CallEvent &Call,
+ OverloadedOperatorKind Op) const {
+ if (isSimpleComparisonOperator(Op)) {
+ const auto *OrigExpr = Call.getOriginExpr();
+ if (!OrigExpr)
+ return;
+
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ handleComparison(C, OrigExpr, Call.getReturnValue(),
+ InstCall->getCXXThisVal(), Call.getArgSVal(0), Op);
+ return;
}
+
+ handleComparison(C, OrigExpr, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getArgSVal(1), Op);
+ return;
+ } else if (isRandomIncrOrDecrOperator(Op)) {
+ const auto *OrigExpr = Call.getOriginExpr();
+ if (!OrigExpr)
+ return;
+
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ if (Call.getNumArgs() >= 1 &&
+ Call.getArgExpr(0)->getType()->isIntegralOrEnumerationType()) {
+ handleRandomIncrOrDecr(C, OrigExpr, Op, Call.getReturnValue(),
+ InstCall->getCXXThisVal(), Call.getArgSVal(0));
+ return;
+ }
+ } else {
+ if (Call.getNumArgs() >= 2 &&
+ Call.getArgExpr(1)->getType()->isIntegralOrEnumerationType()) {
+ handleRandomIncrOrDecr(C, OrigExpr, Op, Call.getReturnValue(),
+ Call.getArgSVal(0), Call.getArgSVal(1));
+ return;
+ }
+ }
+ } else if (isIncrementOperator(Op)) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ handleIncrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
+ Call.getNumArgs());
+ return;
+ }
+
+ handleIncrement(C, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getNumArgs());
+ return;
+ } else if (isDecrementOperator(Op)) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ handleDecrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
+ Call.getNumArgs());
+ return;
+ }
+
+ handleDecrement(C, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getNumArgs());
+ return;
}
+}
+
+void
+IteratorModeling::handleAdvanceLikeFunction(CheckerContext &C,
+ const CallEvent &Call,
+ const Expr *OrigExpr,
+ const AdvanceFn *Handler) const {
+ if (!C.wasInlined) {
+ (this->**Handler)(C, OrigExpr, Call.getReturnValue(),
+ Call.getArgSVal(0), Call.getArgSVal(1));
+ return;
}
- C.addTransition(State);
+ // If std::advance() was inlined, but a non-standard function it calls inside
+ // was not, then we have to model it explicitly
+ const auto *IdInfo = cast<FunctionDecl>(Call.getDecl())->getIdentifier();
+ if (IdInfo) {
+ if (IdInfo->getName() == "advance") {
+ if (noChangeInAdvance(C, Call.getArgSVal(0), OrigExpr)) {
+ (this->**Handler)(C, OrigExpr, Call.getReturnValue(),
+ Call.getArgSVal(0), Call.getArgSVal(1));
+ }
+ }
+ }
}
void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
@@ -518,7 +441,7 @@ void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
if (!Cont)
return;
- // At least one of the iterators have recorded positions. If one of them has
+ // At least one of the iterators has recorded positions. If one of them does
// not then create a new symbol for the offset.
SymbolRef Sym;
if (!LPos || !RPos) {
@@ -538,7 +461,7 @@ void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
RPos = getIteratorPosition(State, RVal);
}
- // We cannot make assumpotions on `UnknownVal`. Let us conjure a symbol
+ // We cannot make assumptions on `UnknownVal`. Let us conjure a symbol
// instead.
if (RetVal.isUnknown()) {
auto &SymMgr = C.getSymbolManager();
@@ -574,7 +497,7 @@ void IteratorModeling::processComparison(CheckerContext &C,
StateTrue = StateTrue->assume(*ConditionVal, true);
C.addTransition(StateTrue);
}
-
+
if (auto StateFalse = relateSymbols(State, Sym1, Sym2, Op != OO_EqualEqual)) {
StateFalse = StateFalse->assume(*ConditionVal, false);
C.addTransition(StateFalse);
@@ -648,481 +571,139 @@ void IteratorModeling::handleRandomIncrOrDecr(CheckerContext &C,
return;
const auto *value = &RHS;
+ SVal val;
if (auto loc = RHS.getAs<Loc>()) {
- const auto val = State->getRawSVal(*loc);
+ val = State->getRawSVal(*loc);
value = &val;
}
auto &TgtVal = (Op == OO_PlusEqual || Op == OO_MinusEqual) ? LHS : RetVal;
- auto NewState =
- advancePosition(State, LHS, Op, *value);
- if (NewState) {
- const auto *NewPos = getIteratorPosition(NewState, LHS);
+ // `AdvancedState` is a state where the position of `LHS` is advanced. We
+ // only need this state to retrieve the new position, but we do not want
+ // to change the position of `LHS` (in every case).
+ auto AdvancedState = advancePosition(State, LHS, Op, *value);
+ if (AdvancedState) {
+ const auto *NewPos = getIteratorPosition(AdvancedState, LHS);
assert(NewPos &&
"Iterator should have position after successful advancement");
- State = setIteratorPosition(NewState, TgtVal, *NewPos);
+ State = setIteratorPosition(State, TgtVal, *NewPos);
C.addTransition(State);
} else {
assignToContainer(C, CE, TgtVal, Pos->getContainer());
}
}
-void IteratorModeling::handleBegin(CheckerContext &C, const Expr *CE,
- const SVal &RetVal, const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // If the container already has a begin symbol then use it. Otherwise first
- // create a new one.
- auto State = C.getState();
- auto BeginSym = getContainerBegin(State, ContReg);
- if (!BeginSym) {
- State = createContainerBegin(State, ContReg, CE, C.getASTContext().LongTy,
- C.getLocationContext(), C.blockCount());
- BeginSym = getContainerBegin(State, ContReg);
- }
- State = setIteratorPosition(State, RetVal,
- IteratorPosition::getPosition(ContReg, BeginSym));
- C.addTransition(State);
-}
-
-void IteratorModeling::handleEnd(CheckerContext &C, const Expr *CE,
- const SVal &RetVal, const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // If the container already has an end symbol then use it. Otherwise first
- // create a new one.
- auto State = C.getState();
- auto EndSym = getContainerEnd(State, ContReg);
- if (!EndSym) {
- State = createContainerEnd(State, ContReg, CE, C.getASTContext().LongTy,
- C.getLocationContext(), C.blockCount());
- EndSym = getContainerEnd(State, ContReg);
- }
- State = setIteratorPosition(State, RetVal,
- IteratorPosition::getPosition(ContReg, EndSym));
- C.addTransition(State);
-}
-
-void IteratorModeling::assignToContainer(CheckerContext &C, const Expr *CE,
- const SVal &RetVal,
- const MemRegion *Cont) const {
- Cont = Cont->getMostDerivedObjectRegion();
-
- auto State = C.getState();
- auto &SymMgr = C.getSymbolManager();
- auto Sym = SymMgr.conjureSymbol(CE, C.getLocationContext(),
- C.getASTContext().LongTy, C.blockCount());
- State = assumeNoOverflow(State, Sym, 4);
- State = setIteratorPosition(State, RetVal,
- IteratorPosition::getPosition(Cont, Sym));
- C.addTransition(State);
-}
-
-void IteratorModeling::handleAssign(CheckerContext &C, const SVal &Cont,
- const Expr *CE, const SVal &OldCont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // Assignment of a new value to a container always invalidates all its
- // iterators
- auto State = C.getState();
- const auto CData = getContainerData(State, ContReg);
- if (CData) {
- State = invalidateAllIteratorPositions(State, ContReg);
- }
-
- // In case of move, iterators of the old container (except the past-end
- // iterators) remain valid but refer to the new container
- if (!OldCont.isUndef()) {
- const auto *OldContReg = OldCont.getAsRegion();
- if (OldContReg) {
- OldContReg = OldContReg->getMostDerivedObjectRegion();
- const auto OldCData = getContainerData(State, OldContReg);
- if (OldCData) {
- if (const auto OldEndSym = OldCData->getEnd()) {
- // If we already assigned an "end" symbol to the old container, then
- // first reassign all iterator positions to the new container which
- // are not past the container (thus not greater or equal to the
- // current "end" symbol).
- State = reassignAllIteratorPositionsUnless(State, OldContReg, ContReg,
- OldEndSym, BO_GE);
- auto &SymMgr = C.getSymbolManager();
- auto &SVB = C.getSValBuilder();
- // Then generate and assign a new "end" symbol for the new container.
- auto NewEndSym =
- SymMgr.conjureSymbol(CE, C.getLocationContext(),
- C.getASTContext().LongTy, C.blockCount());
- State = assumeNoOverflow(State, NewEndSym, 4);
- if (CData) {
- State = setContainerData(State, ContReg, CData->newEnd(NewEndSym));
- } else {
- State = setContainerData(State, ContReg,
- ContainerData::fromEnd(NewEndSym));
- }
- // Finally, replace the old "end" symbol in the already reassigned
- // iterator positions with the new "end" symbol.
- State = rebaseSymbolInIteratorPositionsIf(
- State, SVB, OldEndSym, NewEndSym, OldEndSym, BO_LT);
- } else {
- // There was no "end" symbol assigned yet to the old container,
- // so reassign all iterator positions to the new container.
- State = reassignAllIteratorPositions(State, OldContReg, ContReg);
- }
- if (const auto OldBeginSym = OldCData->getBegin()) {
- // If we already assigned a "begin" symbol to the old container, then
- // assign it to the new container and remove it from the old one.
- if (CData) {
- State =
- setContainerData(State, ContReg, CData->newBegin(OldBeginSym));
- } else {
- State = setContainerData(State, ContReg,
- ContainerData::fromBegin(OldBeginSym));
- }
- State =
- setContainerData(State, OldContReg, OldCData->newEnd(nullptr));
- }
- } else {
- // There was neither "begin" nor "end" symbol assigned yet to the old
- // container, so reassign all iterator positions to the new container.
- State = reassignAllIteratorPositions(State, OldContReg, ContReg);
- }
- }
- }
- C.addTransition(State);
-}
-
-void IteratorModeling::handleClear(CheckerContext &C, const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // The clear() operation invalidates all the iterators, except the past-end
- // iterators of list-like containers
- auto State = C.getState();
- if (!hasSubscriptOperator(State, ContReg) ||
- !backModifiable(State, ContReg)) {
- const auto CData = getContainerData(State, ContReg);
- if (CData) {
- if (const auto EndSym = CData->getEnd()) {
- State =
- invalidateAllIteratorPositionsExcept(State, ContReg, EndSym, BO_GE);
- C.addTransition(State);
- return;
- }
- }
- }
- State = invalidateAllIteratorPositions(State, ContReg);
- C.addTransition(State);
-}
-
-void IteratorModeling::handlePushBack(CheckerContext &C,
- const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
+void IteratorModeling::handlePtrIncrOrDecr(CheckerContext &C,
+ const Expr *Iterator,
+ OverloadedOperatorKind OK,
+ SVal Offset) const {
+ QualType PtrType = Iterator->getType();
+ if (!PtrType->isPointerType())
return;
+ QualType ElementType = PtrType->getPointeeType();
- ContReg = ContReg->getMostDerivedObjectRegion();
+ ProgramStateRef State = C.getState();
+ SVal OldVal = State->getSVal(Iterator, C.getLocationContext());
- // For deque-like containers invalidate all iterator positions
- auto State = C.getState();
- if (hasSubscriptOperator(State, ContReg) && frontModifiable(State, ContReg)) {
- State = invalidateAllIteratorPositions(State, ContReg);
- C.addTransition(State);
+ const IteratorPosition *OldPos = getIteratorPosition(State, OldVal);
+ if (!OldPos)
return;
- }
- const auto CData = getContainerData(State, ContReg);
- if (!CData)
- return;
+ SVal NewVal;
+ if (OK == OO_Plus || OK == OO_PlusEqual)
+ NewVal = State->getLValue(ElementType, Offset, OldVal);
+ else {
+ const llvm::APSInt &OffsetInt =
+ Offset.castAs<nonloc::ConcreteInt>().getValue();
+ auto &BVF = C.getSymbolManager().getBasicVals();
+ SVal NegatedOffset = nonloc::ConcreteInt(BVF.getValue(-OffsetInt));
+ NewVal = State->getLValue(ElementType, NegatedOffset, OldVal);
+ }
+
+ // `AdvancedState` is a state where the position of `Old` is advanced. We
+ // only need this state to retrieve the new position, but we do not want
+ // ever to change the position of `OldVal`.
+ auto AdvancedState = advancePosition(State, OldVal, OK, Offset);
+ if (AdvancedState) {
+ const IteratorPosition *NewPos = getIteratorPosition(AdvancedState, OldVal);
+ assert(NewPos &&
+ "Iterator should have position after successful advancement");
- // For vector-like containers invalidate the past-end iterator positions
- if (const auto EndSym = CData->getEnd()) {
- if (hasSubscriptOperator(State, ContReg)) {
- State = invalidateIteratorPositions(State, EndSym, BO_GE);
- }
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto newEndSym =
- SVB.evalBinOp(State, BO_Add,
- nonloc::SymbolVal(EndSym),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(EndSym)).getAsSymbol();
- State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
+ ProgramStateRef NewState = setIteratorPosition(State, NewVal, *NewPos);
+ C.addTransition(NewState);
+ } else {
+ assignToContainer(C, Iterator, NewVal, OldPos->getContainer());
}
- C.addTransition(State);
}
-void IteratorModeling::handlePopBack(CheckerContext &C,
- const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- auto State = C.getState();
- const auto CData = getContainerData(State, ContReg);
- if (!CData)
- return;
-
- if (const auto EndSym = CData->getEnd()) {
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto BackSym =
- SVB.evalBinOp(State, BO_Sub,
- nonloc::SymbolVal(EndSym),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(EndSym)).getAsSymbol();
- // For vector-like and deque-like containers invalidate the last and the
- // past-end iterator positions. For list-like containers only invalidate
- // the last position
- if (hasSubscriptOperator(State, ContReg) &&
- backModifiable(State, ContReg)) {
- State = invalidateIteratorPositions(State, BackSym, BO_GE);
- State = setContainerData(State, ContReg, CData->newEnd(nullptr));
- } else {
- State = invalidateIteratorPositions(State, BackSym, BO_EQ);
- }
- auto newEndSym = BackSym;
- State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
- C.addTransition(State);
- }
+void IteratorModeling::handleAdvance(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Iter,
+ SVal Amount) const {
+ handleRandomIncrOrDecr(C, CE, OO_PlusEqual, RetVal, Iter, Amount);
}
-void IteratorModeling::handlePushFront(CheckerContext &C,
- const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- // For deque-like containers invalidate all iterator positions
- auto State = C.getState();
- if (hasSubscriptOperator(State, ContReg)) {
- State = invalidateAllIteratorPositions(State, ContReg);
- C.addTransition(State);
- } else {
- const auto CData = getContainerData(State, ContReg);
- if (!CData)
- return;
-
- if (const auto BeginSym = CData->getBegin()) {
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto newBeginSym =
- SVB.evalBinOp(State, BO_Sub,
- nonloc::SymbolVal(BeginSym),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(BeginSym)).getAsSymbol();
- State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
- C.addTransition(State);
- }
- }
+void IteratorModeling::handlePrev(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Iter, SVal Amount) const {
+ handleRandomIncrOrDecr(C, CE, OO_Minus, RetVal, Iter, Amount);
}
-void IteratorModeling::handlePopFront(CheckerContext &C,
- const SVal &Cont) const {
- const auto *ContReg = Cont.getAsRegion();
- if (!ContReg)
- return;
-
- ContReg = ContReg->getMostDerivedObjectRegion();
-
- auto State = C.getState();
- const auto CData = getContainerData(State, ContReg);
- if (!CData)
- return;
-
- // For deque-like containers invalidate all iterator positions. For list-like
- // iterators only invalidate the first position
- if (const auto BeginSym = CData->getBegin()) {
- if (hasSubscriptOperator(State, ContReg)) {
- State = invalidateIteratorPositions(State, BeginSym, BO_LE);
- } else {
- State = invalidateIteratorPositions(State, BeginSym, BO_EQ);
- }
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto newBeginSym =
- SVB.evalBinOp(State, BO_Add,
- nonloc::SymbolVal(BeginSym),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(BeginSym)).getAsSymbol();
- State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
- C.addTransition(State);
- }
+void IteratorModeling::handleNext(CheckerContext &C, const Expr *CE,
+ SVal RetVal, SVal Iter, SVal Amount) const {
+ handleRandomIncrOrDecr(C, CE, OO_Plus, RetVal, Iter, Amount);
}
-void IteratorModeling::handleInsert(CheckerContext &C, const SVal &Iter) const {
- auto State = C.getState();
- const auto *Pos = getIteratorPosition(State, Iter);
- if (!Pos)
- return;
-
- // For deque-like containers invalidate all iterator positions. For
- // vector-like containers invalidate iterator positions after the insertion.
- const auto *Cont = Pos->getContainer();
- if (hasSubscriptOperator(State, Cont) && backModifiable(State, Cont)) {
- if (frontModifiable(State, Cont)) {
- State = invalidateAllIteratorPositions(State, Cont);
- } else {
- State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
- }
- if (const auto *CData = getContainerData(State, Cont)) {
- if (const auto EndSym = CData->getEnd()) {
- State = invalidateIteratorPositions(State, EndSym, BO_GE);
- State = setContainerData(State, Cont, CData->newEnd(nullptr));
- }
- }
- C.addTransition(State);
- }
-}
+void IteratorModeling::assignToContainer(CheckerContext &C, const Expr *CE,
+ const SVal &RetVal,
+ const MemRegion *Cont) const {
+ Cont = Cont->getMostDerivedObjectRegion();
-void IteratorModeling::handleErase(CheckerContext &C, const SVal &Iter) const {
auto State = C.getState();
- const auto *Pos = getIteratorPosition(State, Iter);
- if (!Pos)
- return;
+ const auto *LCtx = C.getLocationContext();
+ State = createIteratorPosition(State, RetVal, Cont, CE, LCtx, C.blockCount());
- // For deque-like containers invalidate all iterator positions. For
- // vector-like containers invalidate iterator positions at and after the
- // deletion. For list-like containers only invalidate the deleted position.
- const auto *Cont = Pos->getContainer();
- if (hasSubscriptOperator(State, Cont) && backModifiable(State, Cont)) {
- if (frontModifiable(State, Cont)) {
- State = invalidateAllIteratorPositions(State, Cont);
- } else {
- State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
- }
- if (const auto *CData = getContainerData(State, Cont)) {
- if (const auto EndSym = CData->getEnd()) {
- State = invalidateIteratorPositions(State, EndSym, BO_GE);
- State = setContainerData(State, Cont, CData->newEnd(nullptr));
- }
- }
- } else {
- State = invalidateIteratorPositions(State, Pos->getOffset(), BO_EQ);
- }
C.addTransition(State);
}
-void IteratorModeling::handleErase(CheckerContext &C, const SVal &Iter1,
- const SVal &Iter2) const {
- auto State = C.getState();
- const auto *Pos1 = getIteratorPosition(State, Iter1);
- const auto *Pos2 = getIteratorPosition(State, Iter2);
- if (!Pos1 || !Pos2)
- return;
+bool IteratorModeling::noChangeInAdvance(CheckerContext &C, SVal Iter,
+ const Expr *CE) const {
+ // Compare the iterator position before and after the call. (To be called
+ // from `checkPostCall()`.)
+ const auto StateAfter = C.getState();
- // For deque-like containers invalidate all iterator positions. For
- // vector-like containers invalidate iterator positions at and after the
- // deletion range. For list-like containers only invalidate the deleted
- // position range [first..last].
- const auto *Cont = Pos1->getContainer();
- if (hasSubscriptOperator(State, Cont) && backModifiable(State, Cont)) {
- if (frontModifiable(State, Cont)) {
- State = invalidateAllIteratorPositions(State, Cont);
- } else {
- State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE);
- }
- if (const auto *CData = getContainerData(State, Cont)) {
- if (const auto EndSym = CData->getEnd()) {
- State = invalidateIteratorPositions(State, EndSym, BO_GE);
- State = setContainerData(State, Cont, CData->newEnd(nullptr));
- }
- }
- } else {
- State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE,
- Pos2->getOffset(), BO_LT);
- }
- C.addTransition(State);
-}
+ const auto *PosAfter = getIteratorPosition(StateAfter, Iter);
+ // If we have no position after the call of `std::advance`, then we are not
+ // interested. (Modeling of an inlined `std::advance()` should not remove the
+ // position in any case.)
+ if (!PosAfter)
+ return false;
-void IteratorModeling::handleEraseAfter(CheckerContext &C,
- const SVal &Iter) const {
- auto State = C.getState();
- const auto *Pos = getIteratorPosition(State, Iter);
- if (!Pos)
- return;
+ const ExplodedNode *N = findCallEnter(C.getPredecessor(), CE);
+ assert(N && "Any call should have a `CallEnter` node.");
- // Invalidate the deleted iterator position, which is the position of the
- // parameter plus one.
- auto &SymMgr = C.getSymbolManager();
- auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto NextSym =
- SVB.evalBinOp(State, BO_Add,
- nonloc::SymbolVal(Pos->getOffset()),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(Pos->getOffset())).getAsSymbol();
- State = invalidateIteratorPositions(State, NextSym, BO_EQ);
- C.addTransition(State);
-}
+ const auto StateBefore = N->getState();
+ const auto *PosBefore = getIteratorPosition(StateBefore, Iter);
-void IteratorModeling::handleEraseAfter(CheckerContext &C, const SVal &Iter1,
- const SVal &Iter2) const {
- auto State = C.getState();
- const auto *Pos1 = getIteratorPosition(State, Iter1);
- const auto *Pos2 = getIteratorPosition(State, Iter2);
- if (!Pos1 || !Pos2)
- return;
+ assert(PosBefore && "`std::advance() should not create new iterator "
+ "position but change existing ones");
- // Invalidate the deleted iterator position range (first..last)
- State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GT,
- Pos2->getOffset(), BO_LT);
- C.addTransition(State);
+ return PosBefore->getOffset() == PosAfter->getOffset();
}
void IteratorModeling::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const {
-
- auto ContMap = State->get<ContainerMap>();
-
- if (!ContMap.isEmpty()) {
- Out << Sep << "Container Data :" << NL;
- for (const auto &Cont : ContMap) {
- Cont.first->dumpToStream(Out);
- Out << " : [ ";
- const auto CData = Cont.second;
- if (CData.getBegin())
- CData.getBegin()->dumpToStream(Out);
- else
- Out << "<Unknown>";
- Out << " .. ";
- if (CData.getEnd())
- CData.getEnd()->dumpToStream(Out);
- else
- Out << "<Unknown>";
- Out << " ]" << NL;
- }
- }
-
auto SymbolMap = State->get<IteratorSymbolMap>();
auto RegionMap = State->get<IteratorRegionMap>();
+ // Use a counter to add newlines before every line except the first one.
+ unsigned Count = 0;
if (!SymbolMap.isEmpty() || !RegionMap.isEmpty()) {
Out << Sep << "Iterator Positions :" << NL;
for (const auto &Sym : SymbolMap) {
+ if (Count++)
+ Out << NL;
+
Sym.first->dumpToStream(Out);
Out << " : ";
const auto Pos = Sym.second;
@@ -1133,6 +714,9 @@ void IteratorModeling::printState(raw_ostream &Out, ProgramStateRef State,
}
for (const auto &Reg : RegionMap) {
+ if (Count++)
+ Out << NL;
+
Reg.first->dumpToStream(Out);
Out << " : ";
const auto Pos = Reg.second;
@@ -1144,229 +728,14 @@ void IteratorModeling::printState(raw_ostream &Out, ProgramStateRef State,
}
}
-
namespace {
-const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
- const MemRegion *Reg);
-
-bool isBeginCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- return IdInfo->getName().endswith_lower("begin");
-}
-
-bool isEndCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- return IdInfo->getName().endswith_lower("end");
-}
-
-bool isAssignCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() > 2)
- return false;
- return IdInfo->getName() == "assign";
-}
-
-bool isClearCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() > 0)
- return false;
- return IdInfo->getName() == "clear";
-}
-
-bool isPushBackCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() != 1)
- return false;
- return IdInfo->getName() == "push_back";
-}
-
-bool isEmplaceBackCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() < 1)
- return false;
- return IdInfo->getName() == "emplace_back";
-}
-
-bool isPopBackCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() > 0)
- return false;
- return IdInfo->getName() == "pop_back";
-}
-
-bool isPushFrontCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() != 1)
- return false;
- return IdInfo->getName() == "push_front";
-}
-
-bool isEmplaceFrontCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() < 1)
- return false;
- return IdInfo->getName() == "emplace_front";
-}
-
-bool isPopFrontCall(const FunctionDecl *Func) {
- const auto *IdInfo = Func->getIdentifier();
- if (!IdInfo)
- return false;
- if (Func->getNumParams() > 0)
- return false;
- return IdInfo->getName() == "pop_front";
-}
-
-bool isAssignmentOperator(OverloadedOperatorKind OK) { return OK == OO_Equal; }
-
bool isSimpleComparisonOperator(OverloadedOperatorKind OK) {
return OK == OO_EqualEqual || OK == OO_ExclaimEqual;
}
-bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg) {
- const auto *CRD = getCXXRecordDecl(State, Reg);
- if (!CRD)
- return false;
-
- for (const auto *Method : CRD->methods()) {
- if (!Method->isOverloadedOperator())
- continue;
- const auto OPK = Method->getOverloadedOperator();
- if (OPK == OO_Subscript) {
- return true;
- }
- }
- return false;
-}
-
-bool frontModifiable(ProgramStateRef State, const MemRegion *Reg) {
- const auto *CRD = getCXXRecordDecl(State, Reg);
- if (!CRD)
- return false;
-
- for (const auto *Method : CRD->methods()) {
- if (!Method->getDeclName().isIdentifier())
- continue;
- if (Method->getName() == "push_front" || Method->getName() == "pop_front") {
- return true;
- }
- }
- return false;
-}
-
-bool backModifiable(ProgramStateRef State, const MemRegion *Reg) {
- const auto *CRD = getCXXRecordDecl(State, Reg);
- if (!CRD)
- return false;
-
- for (const auto *Method : CRD->methods()) {
- if (!Method->getDeclName().isIdentifier())
- continue;
- if (Method->getName() == "push_back" || Method->getName() == "pop_back") {
- return true;
- }
- }
- return false;
-}
-
-const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
- const MemRegion *Reg) {
- auto TI = getDynamicTypeInfo(State, Reg);
- if (!TI.isValid())
- return nullptr;
-
- auto Type = TI.getType();
- if (const auto *RefT = Type->getAs<ReferenceType>()) {
- Type = RefT->getPointeeType();
- }
-
- return Type->getUnqualifiedDesugaredType()->getAsCXXRecordDecl();
-}
-
-SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont) {
- const auto *CDataPtr = getContainerData(State, Cont);
- if (!CDataPtr)
- return nullptr;
-
- return CDataPtr->getBegin();
-}
-
-SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont) {
- const auto *CDataPtr = getContainerData(State, Cont);
- if (!CDataPtr)
- return nullptr;
-
- return CDataPtr->getEnd();
-}
-
-ProgramStateRef createContainerBegin(ProgramStateRef State,
- const MemRegion *Cont, const Expr *E,
- QualType T, const LocationContext *LCtx,
- unsigned BlockCount) {
- // Only create if it does not exist
- const auto *CDataPtr = getContainerData(State, Cont);
- if (CDataPtr && CDataPtr->getBegin())
- return State;
-
- auto &SymMgr = State->getSymbolManager();
- const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
- "begin");
- State = assumeNoOverflow(State, Sym, 4);
-
- if (CDataPtr) {
- const auto CData = CDataPtr->newBegin(Sym);
- return setContainerData(State, Cont, CData);
- }
-
- const auto CData = ContainerData::fromBegin(Sym);
- return setContainerData(State, Cont, CData);
-}
-
-ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
- const Expr *E, QualType T,
- const LocationContext *LCtx,
- unsigned BlockCount) {
- // Only create if it does not exist
- const auto *CDataPtr = getContainerData(State, Cont);
- if (CDataPtr && CDataPtr->getEnd())
- return State;
-
- auto &SymMgr = State->getSymbolManager();
- const SymbolConjured *Sym = SymMgr.conjureSymbol(E, LCtx, T, BlockCount,
- "end");
- State = assumeNoOverflow(State, Sym, 4);
-
- if (CDataPtr) {
- const auto CData = CDataPtr->newEnd(Sym);
- return setContainerData(State, Cont, CData);
- }
-
- const auto CData = ContainerData::fromEnd(Sym);
- return setContainerData(State, Cont, CData);
-}
-
-ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
- const ContainerData &CData) {
- return State->set<ContainerMap>(Cont, CData);
+bool isSimpleComparisonOperator(BinaryOperatorKind OK) {
+ return OK == BO_EQ || OK == BO_NE;
}
ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val) {
@@ -1381,47 +750,6 @@ ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val) {
return nullptr;
}
-// This function tells the analyzer's engine that symbols produced by our
-// checker, most notably iterator positions, are relatively small.
-// A distance between items in the container should not be very large.
-// By assuming that it is within around 1/8 of the address space,
-// we can help the analyzer perform operations on these symbols
-// without being afraid of integer overflows.
-// FIXME: Should we provide it as an API, so that all checkers could use it?
-ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
- long Scale) {
- SValBuilder &SVB = State->getStateManager().getSValBuilder();
- BasicValueFactory &BV = SVB.getBasicValueFactory();
-
- QualType T = Sym->getType();
- assert(T->isSignedIntegerOrEnumerationType());
- APSIntType AT = BV.getAPSIntType(T);
-
- ProgramStateRef NewState = State;
-
- llvm::APSInt Max = AT.getMaxValue() / AT.getValue(Scale);
- SVal IsCappedFromAbove =
- SVB.evalBinOpNN(State, BO_LE, nonloc::SymbolVal(Sym),
- nonloc::ConcreteInt(Max), SVB.getConditionType());
- if (auto DV = IsCappedFromAbove.getAs<DefinedSVal>()) {
- NewState = NewState->assume(*DV, true);
- if (!NewState)
- return State;
- }
-
- llvm::APSInt Min = -Max;
- SVal IsCappedFromBelow =
- SVB.evalBinOpNN(State, BO_GE, nonloc::SymbolVal(Sym),
- nonloc::ConcreteInt(Min), SVB.getConditionType());
- if (auto DV = IsCappedFromBelow.getAs<DefinedSVal>()) {
- NewState = NewState->assume(*DV, true);
- if (!NewState)
- return State;
- }
-
- return NewState;
-}
-
ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
SymbolRef Sym2, bool Equal) {
auto &SVB = State->getStateManager().getSValBuilder();
@@ -1454,22 +782,6 @@ ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
return NewState;
}
-bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont) {
- auto RegionMap = State->get<IteratorRegionMap>();
- for (const auto &Reg : RegionMap) {
- if (Reg.second.getContainer() == Cont)
- return true;
- }
-
- auto SymbolMap = State->get<IteratorSymbolMap>();
- for (const auto &Sym : SymbolMap) {
- if (Sym.second.getContainer() == Cont)
- return true;
- }
-
- return false;
-}
-
bool isBoundThroughLazyCompoundVal(const Environment &Env,
const MemRegion *Reg) {
for (const auto &Binding : Env) {
@@ -1482,150 +794,18 @@ bool isBoundThroughLazyCompoundVal(const Environment &Env,
return false;
}
-template <typename Condition, typename Process>
-ProgramStateRef processIteratorPositions(ProgramStateRef State, Condition Cond,
- Process Proc) {
- auto &RegionMapFactory = State->get_context<IteratorRegionMap>();
- auto RegionMap = State->get<IteratorRegionMap>();
- bool Changed = false;
- for (const auto &Reg : RegionMap) {
- if (Cond(Reg.second)) {
- RegionMap = RegionMapFactory.add(RegionMap, Reg.first, Proc(Reg.second));
- Changed = true;
+const ExplodedNode *findCallEnter(const ExplodedNode *Node, const Expr *Call) {
+ while (Node) {
+ ProgramPoint PP = Node->getLocation();
+ if (auto Enter = PP.getAs<CallEnter>()) {
+ if (Enter->getCallExpr() == Call)
+ break;
}
- }
-
- if (Changed)
- State = State->set<IteratorRegionMap>(RegionMap);
- auto &SymbolMapFactory = State->get_context<IteratorSymbolMap>();
- auto SymbolMap = State->get<IteratorSymbolMap>();
- Changed = false;
- for (const auto &Sym : SymbolMap) {
- if (Cond(Sym.second)) {
- SymbolMap = SymbolMapFactory.add(SymbolMap, Sym.first, Proc(Sym.second));
- Changed = true;
- }
+ Node = Node->getFirstPred();
}
- if (Changed)
- State = State->set<IteratorSymbolMap>(SymbolMap);
-
- return State;
-}
-
-ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
- const MemRegion *Cont) {
- auto MatchCont = [&](const IteratorPosition &Pos) {
- return Pos.getContainer() == Cont;
- };
- auto Invalidate = [&](const IteratorPosition &Pos) {
- return Pos.invalidate();
- };
- return processIteratorPositions(State, MatchCont, Invalidate);
-}
-
-ProgramStateRef
-invalidateAllIteratorPositionsExcept(ProgramStateRef State,
- const MemRegion *Cont, SymbolRef Offset,
- BinaryOperator::Opcode Opc) {
- auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
- return Pos.getContainer() == Cont &&
- !compare(State, Pos.getOffset(), Offset, Opc);
- };
- auto Invalidate = [&](const IteratorPosition &Pos) {
- return Pos.invalidate();
- };
- return processIteratorPositions(State, MatchContAndCompare, Invalidate);
-}
-
-ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
- SymbolRef Offset,
- BinaryOperator::Opcode Opc) {
- auto Compare = [&](const IteratorPosition &Pos) {
- return compare(State, Pos.getOffset(), Offset, Opc);
- };
- auto Invalidate = [&](const IteratorPosition &Pos) {
- return Pos.invalidate();
- };
- return processIteratorPositions(State, Compare, Invalidate);
-}
-
-ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
- SymbolRef Offset1,
- BinaryOperator::Opcode Opc1,
- SymbolRef Offset2,
- BinaryOperator::Opcode Opc2) {
- auto Compare = [&](const IteratorPosition &Pos) {
- return compare(State, Pos.getOffset(), Offset1, Opc1) &&
- compare(State, Pos.getOffset(), Offset2, Opc2);
- };
- auto Invalidate = [&](const IteratorPosition &Pos) {
- return Pos.invalidate();
- };
- return processIteratorPositions(State, Compare, Invalidate);
-}
-
-ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
- const MemRegion *Cont,
- const MemRegion *NewCont) {
- auto MatchCont = [&](const IteratorPosition &Pos) {
- return Pos.getContainer() == Cont;
- };
- auto ReAssign = [&](const IteratorPosition &Pos) {
- return Pos.reAssign(NewCont);
- };
- return processIteratorPositions(State, MatchCont, ReAssign);
-}
-
-ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
- const MemRegion *Cont,
- const MemRegion *NewCont,
- SymbolRef Offset,
- BinaryOperator::Opcode Opc) {
- auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
- return Pos.getContainer() == Cont &&
- !compare(State, Pos.getOffset(), Offset, Opc);
- };
- auto ReAssign = [&](const IteratorPosition &Pos) {
- return Pos.reAssign(NewCont);
- };
- return processIteratorPositions(State, MatchContAndCompare, ReAssign);
-}
-
-// This function rebases symbolic expression `OldSym + Int` to `NewSym + Int`,
-// `OldSym - Int` to `NewSym - Int` and `OldSym` to `NewSym` in any iterator
-// position offsets where `CondSym` is true.
-ProgramStateRef rebaseSymbolInIteratorPositionsIf(
- ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
- SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc) {
- auto LessThanEnd = [&](const IteratorPosition &Pos) {
- return compare(State, Pos.getOffset(), CondSym, Opc);
- };
- auto RebaseSymbol = [&](const IteratorPosition &Pos) {
- return Pos.setTo(rebaseSymbol(State, SVB, Pos.getOffset(), OldSym,
- NewSym));
- };
- return processIteratorPositions(State, LessThanEnd, RebaseSymbol);
-}
-
-// This function rebases symbolic expression `OldExpr + Int` to `NewExpr + Int`,
-// `OldExpr - Int` to `NewExpr - Int` and `OldExpr` to `NewExpr` in expression
-// `OrigExpr`.
-SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB,
- SymbolRef OrigExpr, SymbolRef OldExpr,
- SymbolRef NewSym) {
- auto &SymMgr = SVB.getSymbolManager();
- auto Diff = SVB.evalBinOpNN(State, BO_Sub, nonloc::SymbolVal(OrigExpr),
- nonloc::SymbolVal(OldExpr),
- SymMgr.getType(OrigExpr));
-
- const auto DiffInt = Diff.getAs<nonloc::ConcreteInt>();
- if (!DiffInt)
- return OrigExpr;
-
- return SVB.evalBinOpNN(State, BO_Add, *DiffInt, nonloc::SymbolVal(NewSym),
- SymMgr.getType(OrigExpr)).getAsSymbol();
+ return Node;
}
} // namespace
@@ -1634,6 +814,6 @@ void ento::registerIteratorModeling(CheckerManager &mgr) {
mgr.registerChecker<IteratorModeling>();
}
-bool ento::shouldRegisterIteratorModeling(const LangOptions &LO) {
+bool ento::shouldRegisterIteratorModeling(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
index bd8b84d464b6..df8e379d1f20 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
@@ -27,22 +27,41 @@ using namespace iterator;
namespace {
class IteratorRangeChecker
- : public Checker<check::PreCall> {
+ : public Checker<check::PreCall, check::PreStmt<UnaryOperator>,
+ check::PreStmt<BinaryOperator>,
+ check::PreStmt<ArraySubscriptExpr>,
+ check::PreStmt<MemberExpr>> {
std::unique_ptr<BugType> OutOfRangeBugType;
- void verifyDereference(CheckerContext &C, const SVal &Val) const;
- void verifyIncrement(CheckerContext &C, const SVal &Iter) const;
- void verifyDecrement(CheckerContext &C, const SVal &Iter) const;
+ void verifyDereference(CheckerContext &C, SVal Val) const;
+ void verifyIncrement(CheckerContext &C, SVal Iter) const;
+ void verifyDecrement(CheckerContext &C, SVal Iter) const;
void verifyRandomIncrOrDecr(CheckerContext &C, OverloadedOperatorKind Op,
- const SVal &LHS, const SVal &RHS) const;
- void reportBug(const StringRef &Message, const SVal &Val,
- CheckerContext &C, ExplodedNode *ErrNode) const;
+ SVal LHS, SVal RHS) const;
+ void verifyAdvance(CheckerContext &C, SVal LHS, SVal RHS) const;
+ void verifyPrev(CheckerContext &C, SVal LHS, SVal RHS) const;
+ void verifyNext(CheckerContext &C, SVal LHS, SVal RHS) const;
+ void reportBug(const StringRef &Message, SVal Val, CheckerContext &C,
+ ExplodedNode *ErrNode) const;
+
public:
IteratorRangeChecker();
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
-
+ void checkPreStmt(const UnaryOperator *UO, CheckerContext &C) const;
+ void checkPreStmt(const BinaryOperator *BO, CheckerContext &C) const;
+ void checkPreStmt(const ArraySubscriptExpr *ASE, CheckerContext &C) const;
+ void checkPreStmt(const MemberExpr *ME, CheckerContext &C) const;
+
+ using AdvanceFn = void (IteratorRangeChecker::*)(CheckerContext &, SVal,
+ SVal) const;
+
+ CallDescriptionMap<AdvanceFn> AdvanceFunctions = {
+ {{{"std", "advance"}, 2}, &IteratorRangeChecker::verifyAdvance},
+ {{{"std", "prev"}, 2}, &IteratorRangeChecker::verifyPrev},
+ {{{"std", "next"}, 2}, &IteratorRangeChecker::verifyNext},
+ };
};
bool isPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos);
@@ -107,11 +126,73 @@ void IteratorRangeChecker::checkPreCall(const CallEvent &Call,
verifyDereference(C, Call.getArgSVal(0));
}
}
+ } else {
+ const AdvanceFn *Verifier = AdvanceFunctions.lookup(Call);
+ if (Verifier) {
+ if (Call.getNumArgs() > 1) {
+ (this->**Verifier)(C, Call.getArgSVal(0), Call.getArgSVal(1));
+ } else {
+ auto &BVF = C.getSValBuilder().getBasicValueFactory();
+ (this->**Verifier)(
+ C, Call.getArgSVal(0),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
+ }
+ }
+ }
+}
+
+void IteratorRangeChecker::checkPreStmt(const UnaryOperator *UO,
+ CheckerContext &C) const {
+ if (isa<CXXThisExpr>(UO->getSubExpr()))
+ return;
+
+ ProgramStateRef State = C.getState();
+ UnaryOperatorKind OK = UO->getOpcode();
+ SVal SubVal = State->getSVal(UO->getSubExpr(), C.getLocationContext());
+
+ if (isDereferenceOperator(OK)) {
+ verifyDereference(C, SubVal);
+ } else if (isIncrementOperator(OK)) {
+ verifyIncrement(C, SubVal);
+ } else if (isDecrementOperator(OK)) {
+ verifyDecrement(C, SubVal);
+ }
+}
+
+void IteratorRangeChecker::checkPreStmt(const BinaryOperator *BO,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ BinaryOperatorKind OK = BO->getOpcode();
+ SVal LVal = State->getSVal(BO->getLHS(), C.getLocationContext());
+
+ if (isDereferenceOperator(OK)) {
+ verifyDereference(C, LVal);
+ } else if (isRandomIncrOrDecrOperator(OK)) {
+ SVal RVal = State->getSVal(BO->getRHS(), C.getLocationContext());
+ verifyRandomIncrOrDecr(C, BinaryOperator::getOverloadedOperator(OK), LVal,
+ RVal);
}
}
+void IteratorRangeChecker::checkPreStmt(const ArraySubscriptExpr *ASE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal LVal = State->getSVal(ASE->getLHS(), C.getLocationContext());
+ verifyDereference(C, LVal);
+}
+
+void IteratorRangeChecker::checkPreStmt(const MemberExpr *ME,
+ CheckerContext &C) const {
+ if (!ME->isArrow() || ME->isImplicitAccess())
+ return;
+
+ ProgramStateRef State = C.getState();
+ SVal BaseVal = State->getSVal(ME->getBase(), C.getLocationContext());
+ verifyDereference(C, BaseVal);
+}
+
void IteratorRangeChecker::verifyDereference(CheckerContext &C,
- const SVal &Val) const {
+ SVal Val) const {
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Val);
if (Pos && isPastTheEnd(State, *Pos)) {
@@ -123,24 +204,21 @@ void IteratorRangeChecker::verifyDereference(CheckerContext &C,
}
}
-void IteratorRangeChecker::verifyIncrement(CheckerContext &C,
- const SVal &Iter) const {
+void IteratorRangeChecker::verifyIncrement(CheckerContext &C, SVal Iter) const {
auto &BVF = C.getSValBuilder().getBasicValueFactory();
verifyRandomIncrOrDecr(C, OO_Plus, Iter,
nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
}
-void IteratorRangeChecker::verifyDecrement(CheckerContext &C,
- const SVal &Iter) const {
+void IteratorRangeChecker::verifyDecrement(CheckerContext &C, SVal Iter) const {
auto &BVF = C.getSValBuilder().getBasicValueFactory();
verifyRandomIncrOrDecr(C, OO_Minus, Iter,
nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
}
void IteratorRangeChecker::verifyRandomIncrOrDecr(CheckerContext &C,
- OverloadedOperatorKind Op,
- const SVal &LHS,
- const SVal &RHS) const {
+ OverloadedOperatorKind Op,
+ SVal LHS, SVal RHS) const {
auto State = C.getState();
auto Value = RHS;
@@ -180,12 +258,32 @@ void IteratorRangeChecker::verifyRandomIncrOrDecr(CheckerContext &C,
}
}
-void IteratorRangeChecker::reportBug(const StringRef &Message,
- const SVal &Val, CheckerContext &C,
- ExplodedNode *ErrNode) const {
+void IteratorRangeChecker::verifyAdvance(CheckerContext &C, SVal LHS,
+ SVal RHS) const {
+ verifyRandomIncrOrDecr(C, OO_PlusEqual, LHS, RHS);
+}
+
+void IteratorRangeChecker::verifyPrev(CheckerContext &C, SVal LHS,
+ SVal RHS) const {
+ verifyRandomIncrOrDecr(C, OO_Minus, LHS, RHS);
+}
+
+void IteratorRangeChecker::verifyNext(CheckerContext &C, SVal LHS,
+ SVal RHS) const {
+ verifyRandomIncrOrDecr(C, OO_Plus, LHS, RHS);
+}
+
+void IteratorRangeChecker::reportBug(const StringRef &Message, SVal Val,
+ CheckerContext &C,
+ ExplodedNode *ErrNode) const {
auto R = std::make_unique<PathSensitiveBugReport>(*OutOfRangeBugType, Message,
ErrNode);
+
+ const auto *Pos = getIteratorPosition(C.getState(), Val);
+ assert(Pos && "Iterator without known position cannot be out-of-range.");
+
R->markInteresting(Val);
+ R->markInteresting(Pos->getContainer());
C.emitReport(std::move(R));
}
@@ -268,6 +366,6 @@ void ento::registerIteratorRangeChecker(CheckerManager &mgr) {
mgr.registerChecker<IteratorRangeChecker>();
}
-bool ento::shouldRegisterIteratorRangeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterIteratorRangeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index 0d64fbd6f62e..3e6756efe0e6 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -739,7 +739,7 @@ void ento::registerIvarInvalidationModeling(CheckerManager &mgr) {
mgr.registerChecker<IvarInvalidationChecker>();
}
-bool ento::shouldRegisterIvarInvalidationModeling(const LangOptions &LO) {
+bool ento::shouldRegisterIvarInvalidationModeling(const CheckerManager &mgr) {
return true;
}
@@ -751,7 +751,7 @@ bool ento::shouldRegisterIvarInvalidationModeling(const LangOptions &LO) {
checker->Filter.checkName_##name = mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { return true; }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(InstanceVariableInvalidation)
REGISTER_CHECKER(MissingInvalidationMethod)
diff --git a/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index 7522fdd0a99b..1f3d8844d330 100644
--- a/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -314,6 +314,6 @@ void ento::registerLLVMConventionsChecker(CheckerManager &mgr) {
mgr.registerChecker<LLVMConventionsChecker>();
}
-bool ento::shouldRegisterLLVMConventionsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterLLVMConventionsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 79de1844e745..252377f24bd7 100644
--- a/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -1403,7 +1403,7 @@ void ento::registerNonLocalizedStringChecker(CheckerManager &mgr) {
checker, "AggressiveReport");
}
-bool ento::shouldRegisterNonLocalizedStringChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNonLocalizedStringChecker(const CheckerManager &mgr) {
return true;
}
@@ -1412,7 +1412,7 @@ void ento::registerEmptyLocalizationContextChecker(CheckerManager &mgr) {
}
bool ento::shouldRegisterEmptyLocalizationContextChecker(
- const LangOptions &LO) {
+ const CheckerManager &mgr) {
return true;
}
@@ -1420,6 +1420,6 @@ void ento::registerPluralMisuseChecker(CheckerManager &mgr) {
mgr.registerChecker<PluralMisuseChecker>();
}
-bool ento::shouldRegisterPluralMisuseChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPluralMisuseChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
index d73e2eb92d42..837213875a60 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
@@ -210,15 +210,16 @@ void MIGChecker::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
if (!PVD || State->contains<RefCountedParameters>(PVD))
return;
- const NoteTag *T = C.getNoteTag([this, PVD](BugReport &BR) -> std::string {
- if (&BR.getBugType() != &BT)
- return "";
- SmallString<64> Str;
- llvm::raw_svector_ostream OS(Str);
- OS << "Value passed through parameter '" << PVD->getName()
- << "\' is deallocated";
- return OS.str();
- });
+ const NoteTag *T =
+ C.getNoteTag([this, PVD](PathSensitiveBugReport &BR) -> std::string {
+ if (&BR.getBugType() != &BT)
+ return "";
+ SmallString<64> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "Value passed through parameter '" << PVD->getName()
+ << "\' is deallocated";
+ return std::string(OS.str());
+ });
C.addTransition(State->set<ReleasedParameter>(true), T);
}
@@ -292,6 +293,6 @@ void ento::registerMIGChecker(CheckerManager &Mgr) {
Mgr.registerChecker<MIGChecker>();
}
-bool ento::shouldRegisterMIGChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMIGChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
index 7f9ba0de1dc2..7ac7a38dacf3 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -16,6 +16,7 @@
#include "MPIChecker.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
namespace clang {
namespace ento {
@@ -146,7 +147,7 @@ void MPIChecker::allRegionsUsedByWait(
llvm::SmallVector<const MemRegion *, 2> &ReqRegions,
const MemRegion *const MR, const CallEvent &CE, CheckerContext &Ctx) const {
- MemRegionManager *const RegionManager = MR->getMemRegionManager();
+ MemRegionManager &RegionManager = MR->getMemRegionManager();
if (FuncClassifier->isMPI_Waitall(CE.getCalleeIdentifier())) {
const SubRegion *SuperRegion{nullptr};
@@ -160,15 +161,16 @@ void MPIChecker::allRegionsUsedByWait(
return;
}
- const auto &Size = Ctx.getStoreManager().getSizeInElements(
- Ctx.getState(), SuperRegion,
+ DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
+ Ctx.getState(), SuperRegion, Ctx.getSValBuilder(),
CE.getArgExpr(1)->getType()->getPointeeType());
- const llvm::APSInt &ArrSize = Size.getAs<nonloc::ConcreteInt>()->getValue();
+ const llvm::APSInt &ArrSize =
+ ElementCount.getAs<nonloc::ConcreteInt>()->getValue();
for (size_t i = 0; i < ArrSize; ++i) {
const NonLoc Idx = Ctx.getSValBuilder().makeArrayIndex(i);
- const ElementRegion *const ER = RegionManager->getElementRegion(
+ const ElementRegion *const ER = RegionManager.getElementRegion(
CE.getArgExpr(1)->getType()->getPointeeType(), Idx, SuperRegion,
Ctx.getASTContext());
@@ -188,6 +190,6 @@ void clang::ento::registerMPIChecker(CheckerManager &MGR) {
MGR.registerChecker<clang::ento::mpi::MPIChecker>();
}
-bool clang::ento::shouldRegisterMPIChecker(const LangOptions &LO) {
+bool clang::ento::shouldRegisterMPIChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index e064ca6bd88f..87477e96d2d1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -66,7 +66,7 @@ public:
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
void printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) const;
+ const char *NL, const char *Sep) const override;
private:
typedef std::pair<SymbolRef, const AllocationState*> AllocationPair;
@@ -667,6 +667,6 @@ void ento::registerMacOSKeychainAPIChecker(CheckerManager &mgr) {
mgr.registerChecker<MacOSKeychainAPIChecker>();
}
-bool ento::shouldRegisterMacOSKeychainAPIChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMacOSKeychainAPIChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index 410721d8b6ff..04e7f8dec8d7 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -176,6 +176,6 @@ void ento::registerMacOSXAPIChecker(CheckerManager &mgr) {
mgr.registerChecker<MacOSXAPIChecker>();
}
-bool ento::shouldRegisterMacOSXAPIChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMacOSXAPIChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 09306383d53f..d5b0a5b2220f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -44,34 +44,49 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "AllocationState.h"
#include "InterCheckerAPI.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/ParentMap.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
-#include "AllocationState.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include <climits>
+#include <functional>
#include <utility>
using namespace clang;
using namespace ento;
+using namespace std::placeholders;
//===----------------------------------------------------------------------===//
-// The types of allocation we're modeling.
+// The types of allocation we're modeling. This is used to check whether a
+// dynamically allocated object is deallocated with the correct function, like
+// not using operator delete on an object created by malloc(), or alloca regions
+// aren't ever deallocated manually.
//===----------------------------------------------------------------------===//
namespace {
@@ -87,26 +102,16 @@ enum AllocationFamily {
AF_InnerBuffer
};
-struct MemFunctionInfoTy;
-
} // end of anonymous namespace
-/// Determine family of a deallocation expression.
-static AllocationFamily
-getAllocationFamily(const MemFunctionInfoTy &MemFunctionInfo, CheckerContext &C,
- const Stmt *S);
-
/// Print names of allocators and deallocators.
///
/// \returns true on success.
-static bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
- const Expr *E);
+static bool printMemFnName(raw_ostream &os, CheckerContext &C, const Expr *E);
-/// Print expected name of an allocator based on the deallocator's
-/// family derived from the DeallocExpr.
-static void printExpectedAllocName(raw_ostream &os,
- const MemFunctionInfoTy &MemFunctionInfo,
- CheckerContext &C, const Expr *E);
+/// Print expected name of an allocator based on the deallocator's family
+/// derived from the DeallocExpr.
+static void printExpectedAllocName(raw_ostream &os, AllocationFamily Family);
/// Print expected name of a deallocator based on the allocator's
/// family.
@@ -207,7 +212,7 @@ static bool isReleased(SymbolRef Sym, CheckerContext &C);
/// value; if unspecified, the value of expression \p E is used.
static ProgramStateRef MallocUpdateRefState(CheckerContext &C, const Expr *E,
ProgramStateRef State,
- AllocationFamily Family = AF_Malloc,
+ AllocationFamily Family,
Optional<SVal> RetVal = None);
//===----------------------------------------------------------------------===//
@@ -265,60 +270,14 @@ struct ReallocPair {
REGISTER_MAP_WITH_PROGRAMSTATE(ReallocPairs, SymbolRef, ReallocPair)
-//===----------------------------------------------------------------------===//
-// Kinds of memory operations, information about resource managing functions.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-enum class MemoryOperationKind { MOK_Allocate, MOK_Free, MOK_Any };
-
-struct MemFunctionInfoTy {
- /// The value of the MallocChecker:Optimistic is stored in this variable.
- ///
- /// In pessimistic mode, the checker assumes that it does not know which
- /// functions might free the memory.
- /// In optimistic mode, the checker assumes that all user-defined functions
- /// which might free a pointer are annotated.
- DefaultBool ShouldIncludeOwnershipAnnotatedFunctions;
-
- // TODO: Change these to CallDescription, and get rid of lazy initialization.
- mutable IdentifierInfo *II_alloca = nullptr, *II_win_alloca = nullptr,
- *II_malloc = nullptr, *II_free = nullptr,
- *II_realloc = nullptr, *II_calloc = nullptr,
- *II_valloc = nullptr, *II_reallocf = nullptr,
- *II_strndup = nullptr, *II_strdup = nullptr,
- *II_win_strdup = nullptr, *II_kmalloc = nullptr,
- *II_if_nameindex = nullptr,
- *II_if_freenameindex = nullptr, *II_wcsdup = nullptr,
- *II_win_wcsdup = nullptr, *II_g_malloc = nullptr,
- *II_g_malloc0 = nullptr, *II_g_realloc = nullptr,
- *II_g_try_malloc = nullptr,
- *II_g_try_malloc0 = nullptr,
- *II_g_try_realloc = nullptr, *II_g_free = nullptr,
- *II_g_memdup = nullptr, *II_g_malloc_n = nullptr,
- *II_g_malloc0_n = nullptr, *II_g_realloc_n = nullptr,
- *II_g_try_malloc_n = nullptr,
- *II_g_try_malloc0_n = nullptr, *II_kfree = nullptr,
- *II_g_try_realloc_n = nullptr;
-
- void initIdentifierInfo(ASTContext &C) const;
-
- ///@{
- /// Check if this is one of the functions which can allocate/reallocate
- /// memory pointed to by one of its arguments.
- bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
- bool isCMemFunction(const FunctionDecl *FD, ASTContext &C,
- AllocationFamily Family,
- MemoryOperationKind MemKind) const;
-
- /// Tells if the callee is one of the builtin new/delete operators, including
- /// placement operators and other standard overloads.
- bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const;
- ///@}
-};
-
-} // end of anonymous namespace
+/// Tells if the callee is one of the builtin new/delete operators, including
+/// placement operators and other standard overloads.
+static bool isStandardNewDelete(const FunctionDecl *FD);
+static bool isStandardNewDelete(const CallEvent &Call) {
+ if (!Call.getDecl() || !isa<FunctionDecl>(Call.getDecl()))
+ return false;
+ return isStandardNewDelete(cast<FunctionDecl>(Call.getDecl()));
+}
//===----------------------------------------------------------------------===//
// Definition of the MallocChecker class.
@@ -329,13 +288,15 @@ namespace {
class MallocChecker
: public Checker<check::DeadSymbols, check::PointerEscape,
check::ConstPointerEscape, check::PreStmt<ReturnStmt>,
- check::EndFunction, check::PreCall,
- check::PostStmt<CallExpr>, check::PostStmt<CXXNewExpr>,
- check::NewAllocator, check::PreStmt<CXXDeleteExpr>,
- check::PostStmt<BlockExpr>, check::PostObjCMessage,
- check::Location, eval::Assume> {
+ check::EndFunction, check::PreCall, check::PostCall,
+ check::NewAllocator, check::PostStmt<BlockExpr>,
+ check::PostObjCMessage, check::Location, eval::Assume> {
public:
- MemFunctionInfoTy MemFunctionInfo;
+ /// In pessimistic mode, the checker assumes that it does not know which
+ /// functions might free the memory.
+ /// In optimistic mode, the checker assumes that all user-defined functions
+ /// which might free a pointer are annotated.
+ DefaultBool ShouldIncludeOwnershipAnnotatedFunctions;
/// Many checkers are essentially built into this one, so enabling them will
/// make MallocChecker perform additional modeling and reporting.
@@ -357,11 +318,8 @@ public:
CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
- void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkPostStmt(const CXXNewExpr *NE, CheckerContext &C) const;
- void checkNewAllocator(const CXXNewExpr *NE, SVal Target,
- CheckerContext &C) const;
- void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkNewAllocator(const CXXAllocatorCall &Call, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
@@ -395,23 +353,107 @@ private:
mutable std::unique_ptr<BugType> BT_OffsetFree[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_UseZerroAllocated[CK_NumCheckKinds];
+#define CHECK_FN(NAME) \
+ void NAME(const CallEvent &Call, CheckerContext &C) const;
+
+ CHECK_FN(checkFree)
+ CHECK_FN(checkIfNameIndex)
+ CHECK_FN(checkBasicAlloc)
+ CHECK_FN(checkKernelMalloc)
+ CHECK_FN(checkCalloc)
+ CHECK_FN(checkAlloca)
+ CHECK_FN(checkStrdup)
+ CHECK_FN(checkIfFreeNameIndex)
+ CHECK_FN(checkCXXNewOrCXXDelete)
+ CHECK_FN(checkGMalloc0)
+ CHECK_FN(checkGMemdup)
+ CHECK_FN(checkGMallocN)
+ CHECK_FN(checkGMallocN0)
+ CHECK_FN(checkReallocN)
+ CHECK_FN(checkOwnershipAttr)
+
+ void checkRealloc(const CallEvent &Call, CheckerContext &C,
+ bool ShouldFreeOnFail) const;
+
+ using CheckFn = std::function<void(const MallocChecker *,
+ const CallEvent &Call, CheckerContext &C)>;
+
+ const CallDescriptionMap<CheckFn> FreeingMemFnMap{
+ {{"free", 1}, &MallocChecker::checkFree},
+ {{"if_freenameindex", 1}, &MallocChecker::checkIfFreeNameIndex},
+ {{"kfree", 1}, &MallocChecker::checkFree},
+ {{"g_free", 1}, &MallocChecker::checkFree},
+ };
+
+ bool isFreeingCall(const CallEvent &Call) const;
+
+ CallDescriptionMap<CheckFn> AllocatingMemFnMap{
+ {{"alloca", 1}, &MallocChecker::checkAlloca},
+ {{"_alloca", 1}, &MallocChecker::checkAlloca},
+ {{"malloc", 1}, &MallocChecker::checkBasicAlloc},
+ {{"malloc", 3}, &MallocChecker::checkKernelMalloc},
+ {{"calloc", 2}, &MallocChecker::checkCalloc},
+ {{"valloc", 1}, &MallocChecker::checkBasicAlloc},
+ {{CDF_MaybeBuiltin, "strndup", 2}, &MallocChecker::checkStrdup},
+ {{CDF_MaybeBuiltin, "strdup", 1}, &MallocChecker::checkStrdup},
+ {{"_strdup", 1}, &MallocChecker::checkStrdup},
+ {{"kmalloc", 2}, &MallocChecker::checkKernelMalloc},
+ {{"if_nameindex", 1}, &MallocChecker::checkIfNameIndex},
+ {{CDF_MaybeBuiltin, "wcsdup", 1}, &MallocChecker::checkStrdup},
+ {{CDF_MaybeBuiltin, "_wcsdup", 1}, &MallocChecker::checkStrdup},
+ {{"g_malloc", 1}, &MallocChecker::checkBasicAlloc},
+ {{"g_malloc0", 1}, &MallocChecker::checkGMalloc0},
+ {{"g_try_malloc", 1}, &MallocChecker::checkBasicAlloc},
+ {{"g_try_malloc0", 1}, &MallocChecker::checkGMalloc0},
+ {{"g_memdup", 2}, &MallocChecker::checkGMemdup},
+ {{"g_malloc_n", 2}, &MallocChecker::checkGMallocN},
+ {{"g_malloc0_n", 2}, &MallocChecker::checkGMallocN0},
+ {{"g_try_malloc_n", 2}, &MallocChecker::checkGMallocN},
+ {{"g_try_malloc0_n", 2}, &MallocChecker::checkGMallocN0},
+ };
+
+ CallDescriptionMap<CheckFn> ReallocatingMemFnMap{
+ {{"realloc", 2},
+ std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
+ {{"reallocf", 2},
+ std::bind(&MallocChecker::checkRealloc, _1, _2, _3, true)},
+ {{"g_realloc", 2},
+ std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
+ {{"g_try_realloc", 2},
+ std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
+ {{"g_realloc_n", 3}, &MallocChecker::checkReallocN},
+ {{"g_try_realloc_n", 3}, &MallocChecker::checkReallocN},
+ };
+
+ bool isMemCall(const CallEvent &Call) const;
+
// TODO: Remove mutable by moving the initializtaion to the registry function.
mutable Optional<uint64_t> KernelZeroFlagVal;
+ using KernelZeroSizePtrValueTy = Optional<int>;
+ /// Store the value of macro called `ZERO_SIZE_PTR`.
+ /// The value is initialized at first use, before first use the outer
+ /// Optional is empty, afterwards it contains another Optional that indicates
+ /// if the macro value could be determined, and if yes the value itself.
+ mutable Optional<KernelZeroSizePtrValueTy> KernelZeroSizePtrValue;
+
/// Process C++ operator new()'s allocation, which is the part of C++
/// new-expression that goes before the constructor.
- void processNewAllocation(const CXXNewExpr *NE, CheckerContext &C,
- SVal Target) const;
+ LLVM_NODISCARD
+ ProgramStateRef processNewAllocation(const CXXAllocatorCall &Call,
+ CheckerContext &C,
+ AllocationFamily Family) const;
/// Perform a zero-allocation check.
///
- /// \param [in] E The expression that allocates memory.
+ /// \param [in] Call The expression that allocates memory.
/// \param [in] IndexOfSizeArg Index of the argument that specifies the size
/// of the memory that needs to be allocated. E.g. for malloc, this would be
/// 0.
/// \param [in] RetVal Specifies the newly allocated pointer value;
/// if unspecified, the value of expression \p E is used.
- static ProgramStateRef ProcessZeroAllocCheck(CheckerContext &C, const Expr *E,
+ LLVM_NODISCARD
+ static ProgramStateRef ProcessZeroAllocCheck(const CallEvent &Call,
const unsigned IndexOfSizeArg,
ProgramStateRef State,
Optional<SVal> RetVal = None);
@@ -428,50 +470,54 @@ private:
/// - first: name of the resource (e.g. 'malloc')
/// - (OPTIONAL) second: size of the allocated region
///
- /// \param [in] CE The expression that allocates memory.
+ /// \param [in] Call The expression that allocates memory.
/// \param [in] Att The ownership_returns attribute.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
- const CallExpr *CE,
- const OwnershipAttr* Att,
+ LLVM_NODISCARD
+ ProgramStateRef MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call,
+ const OwnershipAttr *Att,
ProgramStateRef State) const;
/// Models memory allocation.
///
- /// \param [in] CE The expression that allocates memory.
+ /// \param [in] Call The expression that allocates memory.
/// \param [in] SizeEx Size of the memory that needs to be allocated.
/// \param [in] Init The value the allocated memory needs to be initialized.
/// with. For example, \c calloc initializes the allocated memory to 0,
/// malloc leaves it undefined.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallEvent &Call,
const Expr *SizeEx, SVal Init,
ProgramStateRef State,
- AllocationFamily Family = AF_Malloc);
+ AllocationFamily Family);
/// Models memory allocation.
///
- /// \param [in] CE The expression that allocates memory.
+ /// \param [in] Call The expression that allocates memory.
/// \param [in] Size Size of the memory that needs to be allocated.
/// \param [in] Init The value the allocated memory needs to be initialized.
/// with. For example, \c calloc initializes the allocated memory to 0,
/// malloc leaves it undefined.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallEvent &Call,
SVal Size, SVal Init,
ProgramStateRef State,
- AllocationFamily Family = AF_Malloc);
+ AllocationFamily Family);
+ LLVM_NODISCARD
static ProgramStateRef addExtentSize(CheckerContext &C, const CXXNewExpr *NE,
ProgramStateRef State, SVal Target);
// Check if this malloc() for special flags. At present that means M_ZERO or
// __GFP_ZERO (in which case, treat it like calloc).
+ LLVM_NODISCARD
llvm::Optional<ProgramStateRef>
- performKernelMalloc(const CallExpr *CE, CheckerContext &C,
+ performKernelMalloc(const CallEvent &Call, CheckerContext &C,
const ProgramStateRef &State) const;
/// Model functions with the ownership_takes and ownership_holds attributes.
@@ -487,17 +533,18 @@ private:
/// - first: name of the resource (e.g. 'malloc')
/// - second: index of the parameter the attribute applies to
///
- /// \param [in] CE The expression that frees memory.
+ /// \param [in] Call The expression that frees memory.
/// \param [in] Att The ownership_takes or ownership_holds attribute.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after deallocation.
- ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
- const OwnershipAttr* Att,
+ LLVM_NODISCARD
+ ProgramStateRef FreeMemAttr(CheckerContext &C, const CallEvent &Call,
+ const OwnershipAttr *Att,
ProgramStateRef State) const;
/// Models memory deallocation.
///
- /// \param [in] CE The expression that frees memory.
+ /// \param [in] Call The expression that frees memory.
/// \param [in] State The \c ProgramState right before allocation.
/// \param [in] Num Index of the argument that needs to be freed. This is
/// normally 0, but for custom free functions it may be different.
@@ -514,15 +561,17 @@ private:
/// \param [in] ReturnsNullOnFailure Whether the memory deallocation function
/// we're modeling returns with Null on failure.
/// \returns The ProgramState right after deallocation.
- ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ ProgramStateRef FreeMemAux(CheckerContext &C, const CallEvent &Call,
ProgramStateRef State, unsigned Num, bool Hold,
bool &IsKnownToBeAllocated,
+ AllocationFamily Family,
bool ReturnsNullOnFailure = false) const;
/// Models memory deallocation.
///
/// \param [in] ArgExpr The variable who's pointee needs to be freed.
- /// \param [in] ParentExpr The expression that frees the memory.
+ /// \param [in] Call The expression that frees the memory.
/// \param [in] State The \c ProgramState right before allocation.
/// normally 0, but for custom free functions it may be different.
/// \param [in] Hold Whether the parameter at \p Index has the ownership_holds
@@ -538,9 +587,11 @@ private:
/// \param [in] ReturnsNullOnFailure Whether the memory deallocation function
/// we're modeling returns with Null on failure.
/// \returns The ProgramState right after deallocation.
+ LLVM_NODISCARD
ProgramStateRef FreeMemAux(CheckerContext &C, const Expr *ArgExpr,
- const Expr *ParentExpr, ProgramStateRef State,
+ const CallEvent &Call, ProgramStateRef State,
bool Hold, bool &IsKnownToBeAllocated,
+ AllocationFamily Family,
bool ReturnsNullOnFailure = false) const;
// TODO: Needs some refactoring, as all other deallocation modeling
@@ -549,15 +600,17 @@ private:
//
/// Models memory reallocation.
///
- /// \param [in] CE The expression that reallocated memory
+ /// \param [in] Call The expression that reallocated memory
/// \param [in] ShouldFreeOnFail Whether if reallocation fails, the supplied
/// memory should be freed.
/// \param [in] State The \c ProgramState right before reallocation.
/// \param [in] SuffixWithN Whether the reallocation function we're modeling
/// has an '_n' suffix, such as g_realloc_n.
/// \returns The ProgramState right after reallocation.
- ProgramStateRef ReallocMemAux(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ ProgramStateRef ReallocMemAux(CheckerContext &C, const CallEvent &Call,
bool ShouldFreeOnFail, ProgramStateRef State,
+ AllocationFamily Family,
bool SuffixWithN = false) const;
/// Evaluates the buffer size that needs to be allocated.
@@ -565,20 +618,22 @@ private:
/// \param [in] Blocks The amount of blocks that needs to be allocated.
/// \param [in] BlockBytes The size of a block.
/// \returns The symbolic value of \p Blocks * \p BlockBytes.
+ LLVM_NODISCARD
static SVal evalMulForBufferSize(CheckerContext &C, const Expr *Blocks,
const Expr *BlockBytes);
/// Models zero initialized array allocation.
///
- /// \param [in] CE The expression that reallocated memory
+ /// \param [in] Call The expression that reallocated memory
/// \param [in] State The \c ProgramState right before reallocation.
/// \returns The ProgramState right after allocation.
- static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE,
+ LLVM_NODISCARD
+ static ProgramStateRef CallocMem(CheckerContext &C, const CallEvent &Call,
ProgramStateRef State);
/// See if deallocation happens in a suspicious context. If so, escape the
/// pointers that otherwise would have been deallocated and return true.
- bool suppressDeallocationsInSuspiciousContexts(const CallExpr *CE,
+ bool suppressDeallocationsInSuspiciousContexts(const CallEvent &Call,
CheckerContext &C) const;
/// If in \p S \p Sym is used, check whether \p Sym was already freed.
@@ -607,6 +662,7 @@ private:
SymbolRef &EscapingSymbol) const;
/// Implementation of the checkPointerEscape callbacks.
+ LLVM_NODISCARD
ProgramStateRef checkPointerEscapeAux(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
@@ -622,44 +678,53 @@ private:
/// family/call/symbol.
Optional<CheckKind> getCheckIfTracked(AllocationFamily Family,
bool IsALeakCheck = false) const;
- Optional<CheckKind> getCheckIfTracked(CheckerContext &C,
- const Stmt *AllocDeallocStmt,
- bool IsALeakCheck = false) const;
+
Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
bool IsALeakCheck = false) const;
///@}
static bool SummarizeValue(raw_ostream &os, SVal V);
static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
- void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
- const Expr *DeallocExpr) const;
- void ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
+ void HandleNonHeapDealloc(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *DeallocExpr,
+ AllocationFamily Family) const;
+
+ void HandleFreeAlloca(CheckerContext &C, SVal ArgVal,
SourceRange Range) const;
- void ReportMismatchedDealloc(CheckerContext &C, SourceRange Range,
+
+ void HandleMismatchedDealloc(CheckerContext &C, SourceRange Range,
const Expr *DeallocExpr, const RefState *RS,
SymbolRef Sym, bool OwnershipTransferred) const;
- void ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
- const Expr *DeallocExpr,
+
+ void HandleOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *DeallocExpr, AllocationFamily Family,
const Expr *AllocExpr = nullptr) const;
- void ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+
+ void HandleUseAfterFree(CheckerContext &C, SourceRange Range,
SymbolRef Sym) const;
- void ReportDoubleFree(CheckerContext &C, SourceRange Range, bool Released,
+
+ void HandleDoubleFree(CheckerContext &C, SourceRange Range, bool Released,
SymbolRef Sym, SymbolRef PrevSym) const;
- void ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const;
+ void HandleDoubleDelete(CheckerContext &C, SymbolRef Sym) const;
- void ReportUseZeroAllocated(CheckerContext &C, SourceRange Range,
- SymbolRef Sym) const;
+ void HandleUseZeroAlloc(CheckerContext &C, SourceRange Range,
+ SymbolRef Sym) const;
- void ReportFunctionPointerFree(CheckerContext &C, SVal ArgVal,
- SourceRange Range, const Expr *FreeExpr) const;
+ void HandleFunctionPtrFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
+ const Expr *FreeExpr,
+ AllocationFamily Family) const;
/// Find the location of the allocation for Sym on the path leading to the
/// exploded node N.
static LeakInfo getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
CheckerContext &C);
- void reportLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const;
+ void HandleLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const;
+
+ /// Test if value in ArgVal equals to value in macro `ZERO_SIZE_PTR`.
+ bool isArgZERO_SIZE_PTR(ProgramStateRef State, CheckerContext &C,
+ SVal ArgVal) const;
};
//===----------------------------------------------------------------------===//
@@ -782,7 +847,7 @@ private:
os << "Reallocation of " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex)
<< " parameter failed";
- return os.str();
+ return std::string(os.str());
}
std::string getMessageForReturn(const CallExpr *CallExpr) override {
@@ -800,6 +865,7 @@ REGISTER_MAP_WITH_PROGRAMSTATE(FreeReturnValue, SymbolRef, SymbolRef)
namespace {
class StopTrackingCallback final : public SymbolVisitor {
ProgramStateRef state;
+
public:
StopTrackingCallback(ProgramStateRef st) : state(std::move(st)) {}
ProgramStateRef getState() const { return state; }
@@ -811,160 +877,57 @@ public:
};
} // end anonymous namespace
-//===----------------------------------------------------------------------===//
-// Methods of MemFunctionInfoTy.
-//===----------------------------------------------------------------------===//
-
-void MemFunctionInfoTy::initIdentifierInfo(ASTContext &Ctx) const {
- if (II_malloc)
- return;
- II_alloca = &Ctx.Idents.get("alloca");
- II_malloc = &Ctx.Idents.get("malloc");
- II_free = &Ctx.Idents.get("free");
- II_realloc = &Ctx.Idents.get("realloc");
- II_reallocf = &Ctx.Idents.get("reallocf");
- II_calloc = &Ctx.Idents.get("calloc");
- II_valloc = &Ctx.Idents.get("valloc");
- II_strdup = &Ctx.Idents.get("strdup");
- II_strndup = &Ctx.Idents.get("strndup");
- II_wcsdup = &Ctx.Idents.get("wcsdup");
- II_kmalloc = &Ctx.Idents.get("kmalloc");
- II_kfree = &Ctx.Idents.get("kfree");
- II_if_nameindex = &Ctx.Idents.get("if_nameindex");
- II_if_freenameindex = &Ctx.Idents.get("if_freenameindex");
-
- //MSVC uses `_`-prefixed instead, so we check for them too.
- II_win_strdup = &Ctx.Idents.get("_strdup");
- II_win_wcsdup = &Ctx.Idents.get("_wcsdup");
- II_win_alloca = &Ctx.Idents.get("_alloca");
-
- // Glib
- II_g_malloc = &Ctx.Idents.get("g_malloc");
- II_g_malloc0 = &Ctx.Idents.get("g_malloc0");
- II_g_realloc = &Ctx.Idents.get("g_realloc");
- II_g_try_malloc = &Ctx.Idents.get("g_try_malloc");
- II_g_try_malloc0 = &Ctx.Idents.get("g_try_malloc0");
- II_g_try_realloc = &Ctx.Idents.get("g_try_realloc");
- II_g_free = &Ctx.Idents.get("g_free");
- II_g_memdup = &Ctx.Idents.get("g_memdup");
- II_g_malloc_n = &Ctx.Idents.get("g_malloc_n");
- II_g_malloc0_n = &Ctx.Idents.get("g_malloc0_n");
- II_g_realloc_n = &Ctx.Idents.get("g_realloc_n");
- II_g_try_malloc_n = &Ctx.Idents.get("g_try_malloc_n");
- II_g_try_malloc0_n = &Ctx.Idents.get("g_try_malloc0_n");
- II_g_try_realloc_n = &Ctx.Idents.get("g_try_realloc_n");
-}
-
-bool MemFunctionInfoTy::isMemFunction(const FunctionDecl *FD,
- ASTContext &C) const {
- if (isCMemFunction(FD, C, AF_Malloc, MemoryOperationKind::MOK_Any))
- return true;
-
- if (isCMemFunction(FD, C, AF_IfNameIndex, MemoryOperationKind::MOK_Any))
- return true;
-
- if (isCMemFunction(FD, C, AF_Alloca, MemoryOperationKind::MOK_Any))
- return true;
-
- if (isStandardNewDelete(FD, C))
- return true;
-
- return false;
-}
-
-bool MemFunctionInfoTy::isCMemFunction(const FunctionDecl *FD, ASTContext &C,
- AllocationFamily Family,
- MemoryOperationKind MemKind) const {
+static bool isStandardNewDelete(const FunctionDecl *FD) {
if (!FD)
return false;
- bool CheckFree = (MemKind == MemoryOperationKind::MOK_Any ||
- MemKind == MemoryOperationKind::MOK_Free);
- bool CheckAlloc = (MemKind == MemoryOperationKind::MOK_Any ||
- MemKind == MemoryOperationKind::MOK_Allocate);
-
- if (FD->getKind() == Decl::Function) {
- const IdentifierInfo *FunI = FD->getIdentifier();
- initIdentifierInfo(C);
-
- if (Family == AF_Malloc && CheckFree) {
- if (FunI == II_free || FunI == II_realloc || FunI == II_reallocf ||
- FunI == II_g_free || FunI == II_kfree)
- return true;
- }
-
- if (Family == AF_Malloc && CheckAlloc) {
- if (FunI == II_malloc || FunI == II_realloc || FunI == II_reallocf ||
- FunI == II_calloc || FunI == II_valloc || FunI == II_strdup ||
- FunI == II_win_strdup || FunI == II_strndup || FunI == II_wcsdup ||
- FunI == II_win_wcsdup || FunI == II_kmalloc ||
- FunI == II_g_malloc || FunI == II_g_malloc0 ||
- FunI == II_g_realloc || FunI == II_g_try_malloc ||
- FunI == II_g_try_malloc0 || FunI == II_g_try_realloc ||
- FunI == II_g_memdup || FunI == II_g_malloc_n ||
- FunI == II_g_malloc0_n || FunI == II_g_realloc_n ||
- FunI == II_g_try_malloc_n || FunI == II_g_try_malloc0_n ||
- FunI == II_g_try_realloc_n)
- return true;
- }
-
- if (Family == AF_IfNameIndex && CheckFree) {
- if (FunI == II_if_freenameindex)
- return true;
- }
+ OverloadedOperatorKind Kind = FD->getOverloadedOperator();
+ if (Kind != OO_New && Kind != OO_Array_New && Kind != OO_Delete &&
+ Kind != OO_Array_Delete)
+ return false;
- if (Family == AF_IfNameIndex && CheckAlloc) {
- if (FunI == II_if_nameindex)
- return true;
- }
+ // This is standard if and only if it's not defined in a user file.
+ SourceLocation L = FD->getLocation();
+ // If the header for operator delete is not included, it's still defined
+ // in an invalid source location. Check to make sure we don't crash.
+ return !L.isValid() ||
+ FD->getASTContext().getSourceManager().isInSystemHeader(L);
+}
- if (Family == AF_Alloca && CheckAlloc) {
- if (FunI == II_alloca || FunI == II_win_alloca)
- return true;
- }
- }
+//===----------------------------------------------------------------------===//
+// Methods of MallocChecker and MallocBugVisitor.
+//===----------------------------------------------------------------------===//
- if (Family != AF_Malloc)
- return false;
+bool MallocChecker::isFreeingCall(const CallEvent &Call) const {
+ if (FreeingMemFnMap.lookup(Call) || ReallocatingMemFnMap.lookup(Call))
+ return true;
- if (ShouldIncludeOwnershipAnnotatedFunctions && FD->hasAttrs()) {
- for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
+ const auto *Func = dyn_cast<FunctionDecl>(Call.getDecl());
+ if (Func && Func->hasAttrs()) {
+ for (const auto *I : Func->specific_attrs<OwnershipAttr>()) {
OwnershipAttr::OwnershipKind OwnKind = I->getOwnKind();
- if(OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds) {
- if (CheckFree)
- return true;
- } else if (OwnKind == OwnershipAttr::Returns) {
- if (CheckAlloc)
- return true;
- }
+ if (OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds)
+ return true;
}
}
-
return false;
}
-bool MemFunctionInfoTy::isStandardNewDelete(const FunctionDecl *FD,
- ASTContext &C) const {
- if (!FD)
- return false;
- OverloadedOperatorKind Kind = FD->getOverloadedOperator();
- if (Kind != OO_New && Kind != OO_Array_New &&
- Kind != OO_Delete && Kind != OO_Array_Delete)
+bool MallocChecker::isMemCall(const CallEvent &Call) const {
+ if (FreeingMemFnMap.lookup(Call) || AllocatingMemFnMap.lookup(Call) ||
+ ReallocatingMemFnMap.lookup(Call))
+ return true;
+
+ if (!ShouldIncludeOwnershipAnnotatedFunctions)
return false;
- // This is standard if and only if it's not defined in a user file.
- SourceLocation L = FD->getLocation();
- // If the header for operator delete is not included, it's still defined
- // in an invalid source location. Check to make sure we don't crash.
- return !L.isValid() || C.getSourceManager().isInSystemHeader(L);
+ const auto *Func = dyn_cast<FunctionDecl>(Call.getDecl());
+ return Func && Func->hasAttr<OwnershipAttr>();
}
-//===----------------------------------------------------------------------===//
-// Methods of MallocChecker and MallocBugVisitor.
-//===----------------------------------------------------------------------===//
-
-llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
- const CallExpr *CE, CheckerContext &C, const ProgramStateRef &State) const {
+llvm::Optional<ProgramStateRef>
+MallocChecker::performKernelMalloc(const CallEvent &Call, CheckerContext &C,
+ const ProgramStateRef &State) const {
// 3-argument malloc(), as commonly used in {Free,Net,Open}BSD Kernels:
//
// void *malloc(unsigned long size, struct malloc_type *mtp, int flags);
@@ -1006,10 +969,10 @@ llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
// We treat the last argument as the flags argument, and callers fall-back to
// normal malloc on a None return. This works for the FreeBSD kernel malloc
// as well as Linux kmalloc.
- if (CE->getNumArgs() < 2)
+ if (Call.getNumArgs() < 2)
return None;
- const Expr *FlagsEx = CE->getArg(CE->getNumArgs() - 1);
+ const Expr *FlagsEx = Call.getArgExpr(Call.getNumArgs() - 1);
const SVal V = C.getSVal(FlagsEx);
if (!V.getAs<NonLoc>()) {
// The case where 'V' can be a location can only be due to a bad header,
@@ -1035,7 +998,8 @@ llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
// If M_ZERO is set, treat this like calloc (initialized).
if (TrueState && !FalseState) {
SVal ZeroVal = C.getSValBuilder().makeZeroVal(Ctx.CharTy);
- return MallocMemAux(C, CE, CE->getArg(0), ZeroVal, TrueState);
+ return MallocMemAux(C, Call, Call.getArgExpr(0), ZeroVal, TrueState,
+ AF_Malloc);
}
return None;
@@ -1052,161 +1016,234 @@ SVal MallocChecker::evalMulForBufferSize(CheckerContext &C, const Expr *Blocks,
return TotalSize;
}
-void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
- if (C.wasInlined)
+void MallocChecker::checkBasicAlloc(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State,
+ AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkKernelMalloc(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ llvm::Optional<ProgramStateRef> MaybeState =
+ performKernelMalloc(Call, C, State);
+ if (MaybeState.hasValue())
+ State = MaybeState.getValue();
+ else
+ State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State,
+ AF_Malloc);
+ C.addTransition(State);
+}
+
+static bool isStandardRealloc(const CallEvent &Call) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(Call.getDecl());
+ assert(FD);
+ ASTContext &AC = FD->getASTContext();
+
+ if (isa<CXXMethodDecl>(FD))
+ return false;
+
+ return FD->getDeclaredReturnType().getDesugaredType(AC) == AC.VoidPtrTy &&
+ FD->getParamDecl(0)->getType().getDesugaredType(AC) == AC.VoidPtrTy &&
+ FD->getParamDecl(1)->getType().getDesugaredType(AC) ==
+ AC.getSizeType();
+}
+
+static bool isGRealloc(const CallEvent &Call) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(Call.getDecl());
+ assert(FD);
+ ASTContext &AC = FD->getASTContext();
+
+ if (isa<CXXMethodDecl>(FD))
+ return false;
+
+ return FD->getDeclaredReturnType().getDesugaredType(AC) == AC.VoidPtrTy &&
+ FD->getParamDecl(0)->getType().getDesugaredType(AC) == AC.VoidPtrTy &&
+ FD->getParamDecl(1)->getType().getDesugaredType(AC) ==
+ AC.UnsignedLongTy;
+}
+
+void MallocChecker::checkRealloc(const CallEvent &Call, CheckerContext &C,
+ bool ShouldFreeOnFail) const {
+ // HACK: CallDescription currently recognizes non-standard realloc functions
+ // as standard because it doesn't check the type, or wether its a non-method
+ // function. This should be solved by making CallDescription smarter.
+ // Mind that this came from a bug report, and all other functions suffer from
+ // this.
+ // https://bugs.llvm.org/show_bug.cgi?id=46253
+ if (!isStandardRealloc(Call) && !isGRealloc(Call))
return;
+ ProgramStateRef State = C.getState();
+ State = ReallocMemAux(C, Call, ShouldFreeOnFail, State, AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
- const FunctionDecl *FD = C.getCalleeDecl(CE);
- if (!FD)
+void MallocChecker::checkCalloc(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = CallocMem(C, Call, State);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkFree(const CallEvent &Call, CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ bool IsKnownToBeAllocatedMemory = false;
+ if (suppressDeallocationsInSuspiciousContexts(Call, C))
return;
+ State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory,
+ AF_Malloc);
+ C.addTransition(State);
+}
+void MallocChecker::checkAlloca(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State,
+ AF_Alloca);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkStrdup(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+ State = MallocUpdateRefState(C, CE, State, AF_Malloc);
+
+ C.addTransition(State);
+}
+
+void MallocChecker::checkIfNameIndex(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ // Should we model this differently? We can allocate a fixed number of
+ // elements with zeros in the last one.
+ State =
+ MallocMemAux(C, Call, UnknownVal(), UnknownVal(), State, AF_IfNameIndex);
+
+ C.addTransition(State);
+}
+
+void MallocChecker::checkIfFreeNameIndex(const CallEvent &Call,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
bool IsKnownToBeAllocatedMemory = false;
+ State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory,
+ AF_IfNameIndex);
+ C.addTransition(State);
+}
- if (FD->getKind() == Decl::Function) {
- MemFunctionInfo.initIdentifierInfo(C.getASTContext());
- IdentifierInfo *FunI = FD->getIdentifier();
+void MallocChecker::checkCXXNewOrCXXDelete(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ bool IsKnownToBeAllocatedMemory = false;
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
- if (FunI == MemFunctionInfo.II_malloc ||
- FunI == MemFunctionInfo.II_g_malloc ||
- FunI == MemFunctionInfo.II_g_try_malloc) {
- switch (CE->getNumArgs()) {
- default:
- return;
- case 1:
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- break;
- case 2:
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- break;
- case 3:
- llvm::Optional<ProgramStateRef> MaybeState =
- performKernelMalloc(CE, C, State);
- if (MaybeState.hasValue())
- State = MaybeState.getValue();
- else
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- break;
- }
- } else if (FunI == MemFunctionInfo.II_kmalloc) {
- if (CE->getNumArgs() < 1)
- return;
- llvm::Optional<ProgramStateRef> MaybeState =
- performKernelMalloc(CE, C, State);
- if (MaybeState.hasValue())
- State = MaybeState.getValue();
- else
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- } else if (FunI == MemFunctionInfo.II_valloc) {
- if (CE->getNumArgs() < 1)
- return;
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- } else if (FunI == MemFunctionInfo.II_realloc ||
- FunI == MemFunctionInfo.II_g_realloc ||
- FunI == MemFunctionInfo.II_g_try_realloc) {
- State = ReallocMemAux(C, CE, /*ShouldFreeOnFail*/ false, State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_reallocf) {
- State = ReallocMemAux(C, CE, /*ShouldFreeOnFail*/ true, State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_calloc) {
- State = CallocMem(C, CE, State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_free ||
- FunI == MemFunctionInfo.II_g_free ||
- FunI == MemFunctionInfo.II_kfree) {
- if (suppressDeallocationsInSuspiciousContexts(CE, C))
- return;
+ assert(isStandardNewDelete(Call));
- State = FreeMemAux(C, CE, State, 0, false, IsKnownToBeAllocatedMemory);
- } else if (FunI == MemFunctionInfo.II_strdup ||
- FunI == MemFunctionInfo.II_win_strdup ||
- FunI == MemFunctionInfo.II_wcsdup ||
- FunI == MemFunctionInfo.II_win_wcsdup) {
- State = MallocUpdateRefState(C, CE, State);
- } else if (FunI == MemFunctionInfo.II_strndup) {
- State = MallocUpdateRefState(C, CE, State);
- } else if (FunI == MemFunctionInfo.II_alloca ||
- FunI == MemFunctionInfo.II_win_alloca) {
- if (CE->getNumArgs() < 1)
- return;
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
- AF_Alloca);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- } else if (MemFunctionInfo.isStandardNewDelete(FD, C.getASTContext())) {
- // Process direct calls to operator new/new[]/delete/delete[] functions
- // as distinct from new/new[]/delete/delete[] expressions that are
- // processed by the checkPostStmt callbacks for CXXNewExpr and
- // CXXDeleteExpr.
- switch (FD->getOverloadedOperator()) {
- case OO_New:
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
- AF_CXXNew);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- break;
- case OO_Array_New:
- State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
- AF_CXXNewArray);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- break;
- case OO_Delete:
- case OO_Array_Delete:
- State = FreeMemAux(C, CE, State, 0, false, IsKnownToBeAllocatedMemory);
- break;
- default:
- llvm_unreachable("not a new/delete operator");
- }
- } else if (FunI == MemFunctionInfo.II_if_nameindex) {
- // Should we model this differently? We can allocate a fixed number of
- // elements with zeros in the last one.
- State = MallocMemAux(C, CE, UnknownVal(), UnknownVal(), State,
- AF_IfNameIndex);
- } else if (FunI == MemFunctionInfo.II_if_freenameindex) {
- State = FreeMemAux(C, CE, State, 0, false, IsKnownToBeAllocatedMemory);
- } else if (FunI == MemFunctionInfo.II_g_malloc0 ||
- FunI == MemFunctionInfo.II_g_try_malloc0) {
- if (CE->getNumArgs() < 1)
- return;
- SValBuilder &svalBuilder = C.getSValBuilder();
- SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
- State = MallocMemAux(C, CE, CE->getArg(0), zeroVal, State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- } else if (FunI == MemFunctionInfo.II_g_memdup) {
- if (CE->getNumArgs() < 2)
- return;
- State = MallocMemAux(C, CE, CE->getArg(1), UndefinedVal(), State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_g_malloc_n ||
- FunI == MemFunctionInfo.II_g_try_malloc_n ||
- FunI == MemFunctionInfo.II_g_malloc0_n ||
- FunI == MemFunctionInfo.II_g_try_malloc0_n) {
- if (CE->getNumArgs() < 2)
- return;
- SVal Init = UndefinedVal();
- if (FunI == MemFunctionInfo.II_g_malloc0_n ||
- FunI == MemFunctionInfo.II_g_try_malloc0_n) {
- SValBuilder &SB = C.getSValBuilder();
- Init = SB.makeZeroVal(SB.getContext().CharTy);
- }
- SVal TotalSize = evalMulForBufferSize(C, CE->getArg(0), CE->getArg(1));
- State = MallocMemAux(C, CE, TotalSize, Init, State);
- State = ProcessZeroAllocCheck(C, CE, 0, State);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- } else if (FunI == MemFunctionInfo.II_g_realloc_n ||
- FunI == MemFunctionInfo.II_g_try_realloc_n) {
- if (CE->getNumArgs() < 3)
- return;
- State = ReallocMemAux(C, CE, /*ShouldFreeOnFail*/ false, State,
- /*SuffixWithN*/ true);
- State = ProcessZeroAllocCheck(C, CE, 1, State);
- State = ProcessZeroAllocCheck(C, CE, 2, State);
- }
+ // Process direct calls to operator new/new[]/delete/delete[] functions
+ // as distinct from new/new[]/delete/delete[] expressions that are
+ // processed by the checkPostStmt callbacks for CXXNewExpr and
+ // CXXDeleteExpr.
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ switch (FD->getOverloadedOperator()) {
+ case OO_New:
+ State =
+ MallocMemAux(C, Call, CE->getArg(0), UndefinedVal(), State, AF_CXXNew);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ break;
+ case OO_Array_New:
+ State = MallocMemAux(C, Call, CE->getArg(0), UndefinedVal(), State,
+ AF_CXXNewArray);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ break;
+ case OO_Delete:
+ State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory,
+ AF_CXXNew);
+ break;
+ case OO_Array_Delete:
+ State = FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocatedMemory,
+ AF_CXXNewArray);
+ break;
+ default:
+ llvm_unreachable("not a new/delete operator");
}
- if (MemFunctionInfo.ShouldIncludeOwnershipAnnotatedFunctions ||
+ C.addTransition(State);
+}
+
+void MallocChecker::checkGMalloc0(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
+ State = MallocMemAux(C, Call, Call.getArgExpr(0), zeroVal, State, AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkGMemdup(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = MallocMemAux(C, Call, Call.getArgExpr(1), UndefinedVal(), State,
+ AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkGMallocN(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal Init = UndefinedVal();
+ SVal TotalSize = evalMulForBufferSize(C, Call.getArgExpr(0), Call.getArgExpr(1));
+ State = MallocMemAux(C, Call, TotalSize, Init, State, AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkGMallocN0(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SValBuilder &SB = C.getSValBuilder();
+ SVal Init = SB.makeZeroVal(SB.getContext().CharTy);
+ SVal TotalSize = evalMulForBufferSize(C, Call.getArgExpr(0), Call.getArgExpr(1));
+ State = MallocMemAux(C, Call, TotalSize, Init, State, AF_Malloc);
+ State = ProcessZeroAllocCheck(Call, 0, State);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkReallocN(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = ReallocMemAux(C, Call, /*ShouldFreeOnFail=*/false, State, AF_Malloc,
+ /*SuffixWithN=*/true);
+ State = ProcessZeroAllocCheck(Call, 1, State);
+ State = ProcessZeroAllocCheck(Call, 2, State);
+ C.addTransition(State);
+}
+
+void MallocChecker::checkOwnershipAttr(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+ if (ShouldIncludeOwnershipAnnotatedFunctions ||
ChecksEnabled[CK_MismatchedDeallocatorChecker]) {
// Check all the attributes, if there are any.
// There can be multiple of these attributes.
@@ -1214,11 +1251,11 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
switch (I->getOwnKind()) {
case OwnershipAttr::Returns:
- State = MallocMemReturnsAttr(C, CE, I, State);
+ State = MallocMemReturnsAttr(C, Call, I, State);
break;
case OwnershipAttr::Takes:
case OwnershipAttr::Holds:
- State = FreeMemAttr(C, CE, I, State);
+ State = FreeMemAttr(C, Call, I, State);
break;
}
}
@@ -1226,40 +1263,73 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
C.addTransition(State);
}
+void MallocChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (C.wasInlined)
+ return;
+ if (!Call.getOriginExpr())
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ if (const CheckFn *Callback = FreeingMemFnMap.lookup(Call)) {
+ (*Callback)(this, Call, C);
+ return;
+ }
+
+ if (const CheckFn *Callback = AllocatingMemFnMap.lookup(Call)) {
+ (*Callback)(this, Call, C);
+ return;
+ }
+
+ if (const CheckFn *Callback = ReallocatingMemFnMap.lookup(Call)) {
+ (*Callback)(this, Call, C);
+ return;
+ }
+
+ if (isStandardNewDelete(Call)) {
+ checkCXXNewOrCXXDelete(Call, C);
+ return;
+ }
+
+ checkOwnershipAttr(Call, C);
+}
+
// Performs a 0-sized allocations check.
ProgramStateRef MallocChecker::ProcessZeroAllocCheck(
- CheckerContext &C, const Expr *E, const unsigned IndexOfSizeArg,
- ProgramStateRef State, Optional<SVal> RetVal) {
+ const CallEvent &Call, const unsigned IndexOfSizeArg, ProgramStateRef State,
+ Optional<SVal> RetVal) {
if (!State)
return nullptr;
if (!RetVal)
- RetVal = C.getSVal(E);
+ RetVal = Call.getReturnValue();
const Expr *Arg = nullptr;
- if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Call.getOriginExpr())) {
Arg = CE->getArg(IndexOfSizeArg);
- }
- else if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
- if (NE->isArray())
+ } else if (const CXXNewExpr *NE =
+ dyn_cast<CXXNewExpr>(Call.getOriginExpr())) {
+ if (NE->isArray()) {
Arg = *NE->getArraySize();
- else
+ } else {
return State;
- }
- else
+ }
+ } else
llvm_unreachable("not a CallExpr or CXXNewExpr");
assert(Arg);
- Optional<DefinedSVal> DefArgVal = C.getSVal(Arg).getAs<DefinedSVal>();
+ auto DefArgVal =
+ State->getSVal(Arg, Call.getLocationContext()).getAs<DefinedSVal>();
if (!DefArgVal)
return State;
// Check if the allocation size is 0.
ProgramStateRef TrueState, FalseState;
- SValBuilder &SvalBuilder = C.getSValBuilder();
+ SValBuilder &SvalBuilder = State->getStateManager().getSValBuilder();
DefinedSVal Zero =
SvalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>();
@@ -1330,44 +1400,43 @@ static bool hasNonTrivialConstructorCall(const CXXNewExpr *NE) {
return false;
}
-void MallocChecker::processNewAllocation(const CXXNewExpr *NE,
- CheckerContext &C,
- SVal Target) const {
- if (!MemFunctionInfo.isStandardNewDelete(NE->getOperatorNew(),
- C.getASTContext()))
- return;
+ProgramStateRef
+MallocChecker::processNewAllocation(const CXXAllocatorCall &Call,
+ CheckerContext &C,
+ AllocationFamily Family) const {
+ if (!isStandardNewDelete(Call))
+ return nullptr;
+ const CXXNewExpr *NE = Call.getOriginExpr();
const ParentMap &PM = C.getLocationContext()->getParentMap();
+ ProgramStateRef State = C.getState();
// Non-trivial constructors have a chance to escape 'this', but marking all
// invocations of trivial constructors as escaped would cause too great of
// reduction of true positives, so let's just do that for constructors that
// have an argument of a pointer-to-record type.
if (!PM.isConsumedExpr(NE) && hasNonTrivialConstructorCall(NE))
- return;
+ return State;
- ProgramStateRef State = C.getState();
// The return value from operator new is bound to a specified initialization
// value (if any) and we don't want to loose this value. So we call
// MallocUpdateRefState() instead of MallocMemAux() which breaks the
// existing binding.
- State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
- : AF_CXXNew, Target);
+ SVal Target = Call.getObjectUnderConstruction();
+ State = MallocUpdateRefState(C, NE, State, Family, Target);
State = addExtentSize(C, NE, State, Target);
- State = ProcessZeroAllocCheck(C, NE, 0, State, Target);
- C.addTransition(State);
-}
-
-void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
- CheckerContext &C) const {
- if (!C.getAnalysisManager().getAnalyzerOptions().MayInlineCXXAllocator)
- processNewAllocation(NE, C, C.getSVal(NE));
+ State = ProcessZeroAllocCheck(Call, 0, State, Target);
+ return State;
}
-void MallocChecker::checkNewAllocator(const CXXNewExpr *NE, SVal Target,
+void MallocChecker::checkNewAllocator(const CXXAllocatorCall &Call,
CheckerContext &C) const {
- if (!C.wasInlined)
- processNewAllocation(NE, C, Target);
+ if (!C.wasInlined) {
+ ProgramStateRef State = processNewAllocation(
+ Call, C,
+ (Call.getOriginExpr()->isArray() ? AF_CXXNewArray : AF_CXXNew));
+ C.addTransition(State);
+ }
}
// Sets the extent value of the MemRegion allocated by
@@ -1402,38 +1471,20 @@ ProgramStateRef MallocChecker::addExtentSize(CheckerContext &C,
CharUnits TypeSize = AstContext.getTypeSizeInChars(ElementType);
if (ElementCount.getAs<NonLoc>()) {
- DefinedOrUnknownSVal Extent = Region->getExtent(svalBuilder);
+ DefinedOrUnknownSVal DynSize = getDynamicSize(State, Region, svalBuilder);
+
// size in Bytes = ElementCount*TypeSize
SVal SizeInBytes = svalBuilder.evalBinOpNN(
State, BO_Mul, ElementCount.castAs<NonLoc>(),
svalBuilder.makeArrayIndex(TypeSize.getQuantity()),
svalBuilder.getArrayIndexType());
- DefinedOrUnknownSVal extentMatchesSize = svalBuilder.evalEQ(
- State, Extent, SizeInBytes.castAs<DefinedOrUnknownSVal>());
- State = State->assume(extentMatchesSize, true);
+ DefinedOrUnknownSVal DynSizeMatchesSize = svalBuilder.evalEQ(
+ State, DynSize, SizeInBytes.castAs<DefinedOrUnknownSVal>());
+ State = State->assume(DynSizeMatchesSize, true);
}
return State;
}
-void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE,
- CheckerContext &C) const {
-
- if (!ChecksEnabled[CK_NewDeleteChecker])
- if (SymbolRef Sym = C.getSVal(DE->getArgument()).getAsSymbol())
- checkUseAfterFree(Sym, C, DE->getArgument());
-
- if (!MemFunctionInfo.isStandardNewDelete(DE->getOperatorDelete(),
- C.getASTContext()))
- return;
-
- ProgramStateRef State = C.getState();
- bool IsKnownToBeAllocated;
- State = FreeMemAux(C, DE->getArgument(), DE, State,
- /*Hold*/ false, IsKnownToBeAllocated);
-
- C.addTransition(State);
-}
-
static bool isKnownDeallocObjCMethodName(const ObjCMethodCall &Call) {
// If the first selector piece is one of the names below, assume that the
// object takes ownership of the memory, promising to eventually deallocate it
@@ -1474,50 +1525,52 @@ void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
bool IsKnownToBeAllocatedMemory;
ProgramStateRef State =
- FreeMemAux(C, Call.getArgExpr(0), Call.getOriginExpr(), C.getState(),
- /*Hold=*/true, IsKnownToBeAllocatedMemory,
+ FreeMemAux(C, Call.getArgExpr(0), Call, C.getState(),
+ /*Hold=*/true, IsKnownToBeAllocatedMemory, AF_Malloc,
/*RetNullOnFailure=*/true);
C.addTransition(State);
}
ProgramStateRef
-MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE,
+MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call,
const OwnershipAttr *Att,
ProgramStateRef State) const {
if (!State)
return nullptr;
- if (Att->getModule() != MemFunctionInfo.II_malloc)
+ if (Att->getModule()->getName() != "malloc")
return nullptr;
OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
if (I != E) {
- return MallocMemAux(C, CE, CE->getArg(I->getASTIndex()), UndefinedVal(),
- State);
+ return MallocMemAux(C, Call, Call.getArgExpr(I->getASTIndex()),
+ UndefinedVal(), State, AF_Malloc);
}
- return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), State);
+ return MallocMemAux(C, Call, UnknownVal(), UndefinedVal(), State, AF_Malloc);
}
ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
- const CallExpr *CE,
+ const CallEvent &Call,
const Expr *SizeEx, SVal Init,
ProgramStateRef State,
AllocationFamily Family) {
if (!State)
return nullptr;
- return MallocMemAux(C, CE, C.getSVal(SizeEx), Init, State, Family);
+ assert(SizeEx);
+ return MallocMemAux(C, Call, C.getSVal(SizeEx), Init, State, Family);
}
ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
- const CallExpr *CE,
- SVal Size, SVal Init,
- ProgramStateRef State,
- AllocationFamily Family) {
+ const CallEvent &Call, SVal Size,
+ SVal Init, ProgramStateRef State,
+ AllocationFamily Family) {
if (!State)
return nullptr;
+ const Expr *CE = Call.getOriginExpr();
+
// We expect the malloc functions to return a pointer.
if (!Loc::isLocType(CE->getType()))
return nullptr;
@@ -1542,12 +1595,12 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
return nullptr;
if (Optional<DefinedOrUnknownSVal> DefinedSize =
Size.getAs<DefinedOrUnknownSVal>()) {
- SValBuilder &svalBuilder = C.getSValBuilder();
- DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
- DefinedOrUnknownSVal extentMatchesSize =
- svalBuilder.evalEQ(State, Extent, *DefinedSize);
+ DefinedOrUnknownSVal DynSize = getDynamicSize(State, R, svalBuilder);
+
+ DefinedOrUnknownSVal DynSizeMatchesSize =
+ svalBuilder.evalEQ(State, DynSize, *DefinedSize);
- State = State->assume(extentMatchesSize, true);
+ State = State->assume(DynSizeMatchesSize, true);
assert(State);
}
@@ -1579,39 +1632,42 @@ static ProgramStateRef MallocUpdateRefState(CheckerContext &C, const Expr *E,
}
ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
- const CallExpr *CE,
+ const CallEvent &Call,
const OwnershipAttr *Att,
ProgramStateRef State) const {
if (!State)
return nullptr;
- if (Att->getModule() != MemFunctionInfo.II_malloc)
+ if (Att->getModule()->getName() != "malloc")
return nullptr;
bool IsKnownToBeAllocated = false;
for (const auto &Arg : Att->args()) {
- ProgramStateRef StateI = FreeMemAux(
- C, CE, State, Arg.getASTIndex(),
- Att->getOwnKind() == OwnershipAttr::Holds, IsKnownToBeAllocated);
+ ProgramStateRef StateI =
+ FreeMemAux(C, Call, State, Arg.getASTIndex(),
+ Att->getOwnKind() == OwnershipAttr::Holds,
+ IsKnownToBeAllocated, AF_Malloc);
if (StateI)
State = StateI;
}
return State;
}
-ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
+ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
+ const CallEvent &Call,
ProgramStateRef State, unsigned Num,
bool Hold, bool &IsKnownToBeAllocated,
+ AllocationFamily Family,
bool ReturnsNullOnFailure) const {
if (!State)
return nullptr;
- if (CE->getNumArgs() < (Num + 1))
+ if (Call.getNumArgs() < (Num + 1))
return nullptr;
- return FreeMemAux(C, CE->getArg(Num), CE, State, Hold, IsKnownToBeAllocated,
- ReturnsNullOnFailure);
+ return FreeMemAux(C, Call.getArgExpr(Num), Call, State, Hold,
+ IsKnownToBeAllocated, Family, ReturnsNullOnFailure);
}
/// Checks if the previous call to free on the given symbol failed - if free
@@ -1629,58 +1685,7 @@ static bool didPreviousFreeFail(ProgramStateRef State,
return false;
}
-static AllocationFamily
-getAllocationFamily(const MemFunctionInfoTy &MemFunctionInfo, CheckerContext &C,
- const Stmt *S) {
-
- if (!S)
- return AF_None;
-
- if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
- const FunctionDecl *FD = C.getCalleeDecl(CE);
-
- if (!FD)
- FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
-
- ASTContext &Ctx = C.getASTContext();
-
- if (MemFunctionInfo.isCMemFunction(FD, Ctx, AF_Malloc,
- MemoryOperationKind::MOK_Any))
- return AF_Malloc;
-
- if (MemFunctionInfo.isStandardNewDelete(FD, Ctx)) {
- OverloadedOperatorKind Kind = FD->getOverloadedOperator();
- if (Kind == OO_New || Kind == OO_Delete)
- return AF_CXXNew;
- else if (Kind == OO_Array_New || Kind == OO_Array_Delete)
- return AF_CXXNewArray;
- }
-
- if (MemFunctionInfo.isCMemFunction(FD, Ctx, AF_IfNameIndex,
- MemoryOperationKind::MOK_Any))
- return AF_IfNameIndex;
-
- if (MemFunctionInfo.isCMemFunction(FD, Ctx, AF_Alloca,
- MemoryOperationKind::MOK_Any))
- return AF_Alloca;
-
- return AF_None;
- }
-
- if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(S))
- return NE->isArray() ? AF_CXXNewArray : AF_CXXNew;
-
- if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(S))
- return DE->isArrayForm() ? AF_CXXNewArray : AF_CXXNew;
-
- if (isa<ObjCMessageExpr>(S))
- return AF_Malloc;
-
- return AF_None;
-}
-
-static bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
- const Expr *E) {
+static bool printMemFnName(raw_ostream &os, CheckerContext &C, const Expr *E) {
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
// FIXME: This doesn't handle indirect calls.
const FunctionDecl *FD = CE->getDirectCallee();
@@ -1719,10 +1724,7 @@ static bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
return false;
}
-static void printExpectedAllocName(raw_ostream &os,
- const MemFunctionInfoTy &MemFunctionInfo,
- CheckerContext &C, const Expr *E) {
- AllocationFamily Family = getAllocationFamily(MemFunctionInfo, C, E);
+static void printExpectedAllocName(raw_ostream &os, AllocationFamily Family) {
switch(Family) {
case AF_Malloc: os << "malloc()"; return;
@@ -1747,12 +1749,10 @@ static void printExpectedDeallocName(raw_ostream &os, AllocationFamily Family) {
}
}
-ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
- const Expr *ArgExpr,
- const Expr *ParentExpr,
- ProgramStateRef State, bool Hold,
- bool &IsKnownToBeAllocated,
- bool ReturnsNullOnFailure) const {
+ProgramStateRef MallocChecker::FreeMemAux(
+ CheckerContext &C, const Expr *ArgExpr, const CallEvent &Call,
+ ProgramStateRef State, bool Hold, bool &IsKnownToBeAllocated,
+ AllocationFamily Family, bool ReturnsNullOnFailure) const {
if (!State)
return nullptr;
@@ -1778,11 +1778,28 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
return nullptr;
const MemRegion *R = ArgVal.getAsRegion();
+ const Expr *ParentExpr = Call.getOriginExpr();
+
+ // NOTE: We detected a bug, but the checker under whose name we would emit the
+ // error could be disabled. Generally speaking, the MallocChecker family is an
+ // integral part of the Static Analyzer, and disabling any part of it should
+ // only be done under exceptional circumstances, such as frequent false
+ // positives. If this is the case, we can reasonably believe that there are
+ // serious faults in our understanding of the source code, and even if we
+ // don't emit an warning, we should terminate further analysis with a sink
+ // node.
// Nonlocs can't be freed, of course.
// Non-region locations (labels and fixed addresses) also shouldn't be freed.
if (!R) {
- ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ // Exception:
+ // If the macro ZERO_SIZE_PTR is defined, this could be a kernel source
+ // code. In that case, the ZERO_SIZE_PTR defines a special value used for a
+ // zero-sized memory block which is allowed to be freed, despite not being a
+ // null pointer.
+ if (Family != AF_Malloc || !isArgZERO_SIZE_PTR(State, C, ArgVal))
+ HandleNonHeapDealloc(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family);
return nullptr;
}
@@ -1790,7 +1807,8 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// Blocks might show up as heap data, but should not be free()d
if (isa<BlockDataRegion>(R)) {
- ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ HandleNonHeapDealloc(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family);
return nullptr;
}
@@ -1808,9 +1826,10 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// False negatives are better than false positives.
if (isa<AllocaRegion>(R))
- ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
+ HandleFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
else
- ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ HandleNonHeapDealloc(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family);
return nullptr;
}
@@ -1832,14 +1851,14 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
// Memory returned by alloca() shouldn't be freed.
if (RsBase->getAllocationFamily() == AF_Alloca) {
- ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
+ HandleFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
return nullptr;
}
// Check for double free first.
if ((RsBase->isReleased() || RsBase->isRelinquished()) &&
!didPreviousFreeFail(State, SymBase, PreviousRetStatusSymbol)) {
- ReportDoubleFree(C, ParentExpr->getSourceRange(), RsBase->isReleased(),
+ HandleDoubleFree(C, ParentExpr->getSourceRange(), RsBase->isReleased(),
SymBase, PreviousRetStatusSymbol);
return nullptr;
@@ -1849,12 +1868,10 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
RsBase->isEscaped()) {
// Check if an expected deallocation function matches the real one.
- bool DeallocMatchesAlloc =
- RsBase->getAllocationFamily() ==
- getAllocationFamily(MemFunctionInfo, C, ParentExpr);
+ bool DeallocMatchesAlloc = RsBase->getAllocationFamily() == Family;
if (!DeallocMatchesAlloc) {
- ReportMismatchedDealloc(C, ArgExpr->getSourceRange(),
- ParentExpr, RsBase, SymBase, Hold);
+ HandleMismatchedDealloc(C, ArgExpr->getSourceRange(), ParentExpr,
+ RsBase, SymBase, Hold);
return nullptr;
}
@@ -1865,15 +1882,16 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
!Offset.hasSymbolicOffset() &&
Offset.getOffset() != 0) {
const Expr *AllocExpr = cast<Expr>(RsBase->getStmt());
- ReportOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
- AllocExpr);
+ HandleOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family, AllocExpr);
return nullptr;
}
}
}
if (SymBase->getType()->isFunctionPointerType()) {
- ReportFunctionPointerFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+ HandleFunctionPtrFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
+ Family);
return nullptr;
}
@@ -1891,9 +1909,12 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
}
}
- AllocationFamily Family =
- RsBase ? RsBase->getAllocationFamily()
- : getAllocationFamily(MemFunctionInfo, C, ParentExpr);
+ // If we don't know anything about this symbol, a free on it may be totally
+ // valid. If this is the case, lets assume that the allocation family of the
+ // freeing function is the same as the symbols allocation family, and go with
+ // that.
+ assert(!RsBase || (RsBase && RsBase->getAllocationFamily() == Family));
+
// Normal free.
if (Hold)
return State->set<RegionState>(SymBase,
@@ -1940,14 +1961,6 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family,
}
Optional<MallocChecker::CheckKind>
-MallocChecker::getCheckIfTracked(CheckerContext &C,
- const Stmt *AllocDeallocStmt,
- bool IsALeakCheck) const {
- return getCheckIfTracked(
- getAllocationFamily(MemFunctionInfo, C, AllocDeallocStmt), IsALeakCheck);
-}
-
-Optional<MallocChecker::CheckKind>
MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
bool IsALeakCheck) const {
if (C.getState()->contains<ReallocSizeZeroSymbols>(Sym))
@@ -2045,16 +2058,17 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
}
}
-void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
- SourceRange Range,
- const Expr *DeallocExpr) const {
+void MallocChecker::HandleNonHeapDealloc(CheckerContext &C, SVal ArgVal,
+ SourceRange Range,
+ const Expr *DeallocExpr,
+ AllocationFamily Family) const {
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
- Optional<MallocChecker::CheckKind> CheckKind =
- getCheckIfTracked(C, DeallocExpr);
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
if (!CheckKind.hasValue())
return;
@@ -2071,7 +2085,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
MR = ER->getSuperRegion();
os << "Argument to ";
- if (!printAllocDeallocName(os, C, DeallocExpr))
+ if (!printMemFnName(os, C, DeallocExpr))
os << "deallocator";
os << " is ";
@@ -2082,7 +2096,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
else
os << "not memory allocated by ";
- printExpectedAllocName(os, MemFunctionInfo, C, DeallocExpr);
+ printExpectedAllocName(os, Family);
auto R = std::make_unique<PathSensitiveBugReport>(*BT_BadFree[*CheckKind],
os.str(), N);
@@ -2092,7 +2106,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
}
}
-void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
+void MallocChecker::HandleFreeAlloca(CheckerContext &C, SVal ArgVal,
SourceRange Range) const {
Optional<MallocChecker::CheckKind> CheckKind;
@@ -2101,8 +2115,10 @@ void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
CheckKind = CK_MallocChecker;
else if (ChecksEnabled[CK_MismatchedDeallocatorChecker])
CheckKind = CK_MismatchedDeallocatorChecker;
- else
+ else {
+ C.addSink();
return;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_FreeAlloca[*CheckKind])
@@ -2118,15 +2134,16 @@ void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
}
}
-void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
+void MallocChecker::HandleMismatchedDealloc(CheckerContext &C,
SourceRange Range,
const Expr *DeallocExpr,
- const RefState *RS,
- SymbolRef Sym,
+ const RefState *RS, SymbolRef Sym,
bool OwnershipTransferred) const {
- if (!ChecksEnabled[CK_MismatchedDeallocatorChecker])
+ if (!ChecksEnabled[CK_MismatchedDeallocatorChecker]) {
+ C.addSink();
return;
+ }
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_MismatchedDealloc)
@@ -2144,25 +2161,25 @@ void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
llvm::raw_svector_ostream DeallocOs(DeallocBuf);
if (OwnershipTransferred) {
- if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
+ if (printMemFnName(DeallocOs, C, DeallocExpr))
os << DeallocOs.str() << " cannot";
else
os << "Cannot";
os << " take ownership of memory";
- if (printAllocDeallocName(AllocOs, C, AllocExpr))
+ if (printMemFnName(AllocOs, C, AllocExpr))
os << " allocated by " << AllocOs.str();
} else {
os << "Memory";
- if (printAllocDeallocName(AllocOs, C, AllocExpr))
+ if (printMemFnName(AllocOs, C, AllocExpr))
os << " allocated by " << AllocOs.str();
os << " should be deallocated by ";
printExpectedDeallocName(os, RS->getAllocationFamily());
- if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
- os << ", not " << DeallocOs.str();
+ if (printMemFnName(DeallocOs, C, DeallocExpr))
+ os << ", not " << DeallocOs.str();
}
auto R = std::make_unique<PathSensitiveBugReport>(*BT_MismatchedDealloc,
@@ -2174,17 +2191,17 @@ void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
}
}
-void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
+void MallocChecker::HandleOffsetFree(CheckerContext &C, SVal ArgVal,
SourceRange Range, const Expr *DeallocExpr,
+ AllocationFamily Family,
const Expr *AllocExpr) const {
-
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
- Optional<MallocChecker::CheckKind> CheckKind =
- getCheckIfTracked(C, AllocExpr);
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
if (!CheckKind.hasValue())
return;
@@ -2213,14 +2230,14 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
int offsetBytes = Offset.getOffset() / C.getASTContext().getCharWidth();
os << "Argument to ";
- if (!printAllocDeallocName(os, C, DeallocExpr))
+ if (!printMemFnName(os, C, DeallocExpr))
os << "deallocator";
os << " is offset by "
<< offsetBytes
<< " "
<< ((abs(offsetBytes) > 1) ? "bytes" : "byte")
<< " from the start of ";
- if (AllocExpr && printAllocDeallocName(AllocNameOs, C, AllocExpr))
+ if (AllocExpr && printMemFnName(AllocNameOs, C, AllocExpr))
os << "memory allocated by " << AllocNameOs.str();
else
os << "allocated memory";
@@ -2232,13 +2249,14 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
C.emitReport(std::move(R));
}
-void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+void MallocChecker::HandleUseAfterFree(CheckerContext &C, SourceRange Range,
SymbolRef Sym) const {
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker] &&
- !ChecksEnabled[CK_InnerPointerChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker] &&
+ !ChecksEnabled[CK_InnerPointerChecker]) {
+ C.addSink();
return;
+ }
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
@@ -2270,13 +2288,14 @@ void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
}
}
-void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
+void MallocChecker::HandleDoubleFree(CheckerContext &C, SourceRange Range,
bool Released, SymbolRef Sym,
SymbolRef PrevSym) const {
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
@@ -2301,10 +2320,12 @@ void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
}
}
-void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
+void MallocChecker::HandleDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
- if (!ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
@@ -2325,13 +2346,13 @@ void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
}
}
-void MallocChecker::ReportUseZeroAllocated(CheckerContext &C,
- SourceRange Range,
- SymbolRef Sym) const {
+void MallocChecker::HandleUseZeroAlloc(CheckerContext &C, SourceRange Range,
+ SymbolRef Sym) const {
- if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker])
+ if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) {
+ C.addSink();
return;
+ }
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
@@ -2356,13 +2377,16 @@ void MallocChecker::ReportUseZeroAllocated(CheckerContext &C,
}
}
-void MallocChecker::ReportFunctionPointerFree(CheckerContext &C, SVal ArgVal,
- SourceRange Range,
- const Expr *FreeExpr) const {
- if (!ChecksEnabled[CK_MallocChecker])
+void MallocChecker::HandleFunctionPtrFree(CheckerContext &C, SVal ArgVal,
+ SourceRange Range,
+ const Expr *FreeExpr,
+ AllocationFamily Family) const {
+ if (!ChecksEnabled[CK_MallocChecker]) {
+ C.addSink();
return;
+ }
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, FreeExpr);
+ Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
if (!CheckKind.hasValue())
return;
@@ -2379,7 +2403,7 @@ void MallocChecker::ReportFunctionPointerFree(CheckerContext &C, SVal ArgVal,
MR = ER->getSuperRegion();
Os << "Argument to ";
- if (!printAllocDeallocName(Os, C, FreeExpr))
+ if (!printMemFnName(Os, C, FreeExpr))
Os << "deallocator";
Os << " is a function pointer";
@@ -2392,14 +2416,15 @@ void MallocChecker::ReportFunctionPointerFree(CheckerContext &C, SVal ArgVal,
}
}
-ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
- const CallExpr *CE,
- bool ShouldFreeOnFail,
- ProgramStateRef State,
- bool SuffixWithN) const {
+ProgramStateRef
+MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call,
+ bool ShouldFreeOnFail, ProgramStateRef State,
+ AllocationFamily Family, bool SuffixWithN) const {
if (!State)
return nullptr;
+ const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr());
+
if (SuffixWithN && CE->getNumArgs() < 3)
return nullptr;
else if (CE->getNumArgs() < 2)
@@ -2443,21 +2468,15 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
// If the ptr is NULL and the size is not 0, the call is equivalent to
// malloc(size).
if (PrtIsNull && !SizeIsZero) {
- ProgramStateRef stateMalloc = MallocMemAux(C, CE, TotalSize,
- UndefinedVal(), StatePtrIsNull);
+ ProgramStateRef stateMalloc = MallocMemAux(
+ C, Call, TotalSize, UndefinedVal(), StatePtrIsNull, Family);
return stateMalloc;
}
if (PrtIsNull && SizeIsZero)
return State;
- // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
assert(!PrtIsNull);
- SymbolRef FromPtr = arg0Val.getAsSymbol();
- SVal RetVal = C.getSVal(CE);
- SymbolRef ToPtr = RetVal.getAsSymbol();
- if (!FromPtr || !ToPtr)
- return nullptr;
bool IsKnownToBeAllocated = false;
@@ -2467,16 +2486,16 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
// If size was equal to 0, either NULL or a pointer suitable to be passed
// to free() is returned. We just free the input pointer and do not add
// any constrains on the output pointer.
- if (ProgramStateRef stateFree =
- FreeMemAux(C, CE, StateSizeIsZero, 0, false, IsKnownToBeAllocated))
+ if (ProgramStateRef stateFree = FreeMemAux(
+ C, Call, StateSizeIsZero, 0, false, IsKnownToBeAllocated, Family))
return stateFree;
// Default behavior.
if (ProgramStateRef stateFree =
- FreeMemAux(C, CE, State, 0, false, IsKnownToBeAllocated)) {
+ FreeMemAux(C, Call, State, 0, false, IsKnownToBeAllocated, Family)) {
- ProgramStateRef stateRealloc = MallocMemAux(C, CE, TotalSize,
- UnknownVal(), stateFree);
+ ProgramStateRef stateRealloc =
+ MallocMemAux(C, Call, TotalSize, UnknownVal(), stateFree, Family);
if (!stateRealloc)
return nullptr;
@@ -2486,6 +2505,14 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
else if (!IsKnownToBeAllocated)
Kind = OAR_DoNotTrackAfterFailure;
+ // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
+ SymbolRef FromPtr = arg0Val.getLocSymbolInBase();
+ SVal RetVal = C.getSVal(CE);
+ SymbolRef ToPtr = RetVal.getAsSymbol();
+ assert(FromPtr && ToPtr &&
+ "By this point, FreeMemAux and MallocMemAux should have checked "
+ "whether the argument or the return value is symbolic!");
+
// Record the info about the reallocated symbol so that we could properly
// process failed reallocation.
stateRealloc = stateRealloc->set<ReallocPairs>(ToPtr,
@@ -2497,19 +2524,21 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
return nullptr;
}
-ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE,
+ProgramStateRef MallocChecker::CallocMem(CheckerContext &C,
+ const CallEvent &Call,
ProgramStateRef State) {
if (!State)
return nullptr;
- if (CE->getNumArgs() < 2)
+ if (Call.getNumArgs() < 2)
return nullptr;
SValBuilder &svalBuilder = C.getSValBuilder();
SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
- SVal TotalSize = evalMulForBufferSize(C, CE->getArg(0), CE->getArg(1));
+ SVal TotalSize =
+ evalMulForBufferSize(C, Call.getArgExpr(0), Call.getArgExpr(1));
- return MallocMemAux(C, CE, TotalSize, zeroVal, State);
+ return MallocMemAux(C, Call, TotalSize, zeroVal, State, AF_Malloc);
}
MallocChecker::LeakInfo MallocChecker::getAllocationSite(const ExplodedNode *N,
@@ -2553,7 +2582,7 @@ MallocChecker::LeakInfo MallocChecker::getAllocationSite(const ExplodedNode *N,
return LeakInfo(AllocNode, ReferenceRegion);
}
-void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
+void MallocChecker::HandleLeak(SymbolRef Sym, ExplodedNode *N,
CheckerContext &C) const {
if (!ChecksEnabled[CK_MallocChecker] &&
@@ -2669,7 +2698,7 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
if (N) {
for (SmallVectorImpl<SymbolRef>::iterator
I = Errors.begin(), E = Errors.end(); I != E; ++I) {
- reportLeak(*I, N, C);
+ HandleLeak(*I, N, C);
}
}
}
@@ -2680,7 +2709,27 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
void MallocChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- if (const CXXDestructorCall *DC = dyn_cast<CXXDestructorCall>(&Call)) {
+ if (const auto *DC = dyn_cast<CXXDeallocatorCall>(&Call)) {
+ const CXXDeleteExpr *DE = DC->getOriginExpr();
+
+ if (!ChecksEnabled[CK_NewDeleteChecker])
+ if (SymbolRef Sym = C.getSVal(DE->getArgument()).getAsSymbol())
+ checkUseAfterFree(Sym, C, DE->getArgument());
+
+ if (!isStandardNewDelete(DC->getDecl()))
+ return;
+
+ ProgramStateRef State = C.getState();
+ bool IsKnownToBeAllocated;
+ State = FreeMemAux(C, DE->getArgument(), Call, State,
+ /*Hold*/ false, IsKnownToBeAllocated,
+ (DE->isArrayForm() ? AF_CXXNewArray : AF_CXXNew));
+
+ C.addTransition(State);
+ return;
+ }
+
+ if (const auto *DC = dyn_cast<CXXDestructorCall>(&Call)) {
SymbolRef Sym = DC->getCXXThisVal().getAsSymbol();
if (!Sym || checkDoubleDelete(Sym, C))
return;
@@ -2692,12 +2741,7 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
if (!FD)
return;
- ASTContext &Ctx = C.getASTContext();
- if (ChecksEnabled[CK_MallocChecker] &&
- (MemFunctionInfo.isCMemFunction(FD, Ctx, AF_Malloc,
- MemoryOperationKind::MOK_Free) ||
- MemFunctionInfo.isCMemFunction(FD, Ctx, AF_IfNameIndex,
- MemoryOperationKind::MOK_Free)))
+ if (ChecksEnabled[CK_MallocChecker] && isFreeingCall(Call))
return;
}
@@ -2807,8 +2851,8 @@ static bool isReleased(SymbolRef Sym, CheckerContext &C) {
}
bool MallocChecker::suppressDeallocationsInSuspiciousContexts(
- const CallExpr *CE, CheckerContext &C) const {
- if (CE->getNumArgs() == 0)
+ const CallEvent &Call, CheckerContext &C) const {
+ if (Call.getNumArgs() == 0)
return false;
StringRef FunctionStr = "";
@@ -2826,7 +2870,7 @@ bool MallocChecker::suppressDeallocationsInSuspiciousContexts(
ProgramStateRef State = C.getState();
- for (const Expr *Arg : CE->arguments())
+ for (const Expr *Arg : cast<CallExpr>(Call.getOriginExpr())->arguments())
if (SymbolRef Sym = C.getSVal(Arg).getAsSymbol())
if (const RefState *RS = State->get<RegionState>(Sym))
State = State->set<RegionState>(Sym, RefState::getEscaped(RS));
@@ -2839,7 +2883,7 @@ bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
const Stmt *S) const {
if (isReleased(Sym, C)) {
- ReportUseAfterFree(C, S->getSourceRange(), Sym);
+ HandleUseAfterFree(C, S->getSourceRange(), Sym);
return true;
}
@@ -2852,17 +2896,17 @@ void MallocChecker::checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
if (const RefState *RS = C.getState()->get<RegionState>(Sym)) {
if (RS->isAllocatedOfSizeZero())
- ReportUseZeroAllocated(C, RS->getStmt()->getSourceRange(), Sym);
+ HandleUseZeroAlloc(C, RS->getStmt()->getSourceRange(), Sym);
}
else if (C.getState()->contains<ReallocSizeZeroSymbols>(Sym)) {
- ReportUseZeroAllocated(C, S->getSourceRange(), Sym);
+ HandleUseZeroAlloc(C, S->getSourceRange(), Sym);
}
}
bool MallocChecker::checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const {
if (isReleased(Sym, C)) {
- ReportDoubleDelete(C, Sym);
+ HandleDoubleDelete(C, Sym);
return true;
}
return false;
@@ -2994,11 +3038,9 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
if (!FD)
return true;
- ASTContext &ASTC = State->getStateManager().getContext();
-
// If it's one of the allocation functions we can reason about, we model
// its behavior explicitly.
- if (MemFunctionInfo.isMemFunction(FD, ASTC))
+ if (isMemCall(*Call))
return false;
// If it's not a system call, assume it frees memory.
@@ -3142,6 +3184,18 @@ ProgramStateRef MallocChecker::checkPointerEscapeAux(
return State;
}
+bool MallocChecker::isArgZERO_SIZE_PTR(ProgramStateRef State, CheckerContext &C,
+ SVal ArgVal) const {
+ if (!KernelZeroSizePtrValue)
+ KernelZeroSizePtrValue =
+ tryExpandAsInteger("ZERO_SIZE_PTR", C.getPreprocessor());
+
+ const llvm::APSInt *ArgValKnown =
+ C.getSValBuilder().getKnownValue(State, ArgVal);
+ return ArgValKnown && *KernelZeroSizePtrValue &&
+ ArgValKnown->getSExtValue() == **KernelZeroSizePtrValue;
+}
+
static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
ProgramStateRef prevState) {
ReallocPairsTy currMap = currState->get<ReallocPairs>();
@@ -3404,11 +3458,11 @@ void ento::registerInnerPointerCheckerAux(CheckerManager &mgr) {
void ento::registerDynamicMemoryModeling(CheckerManager &mgr) {
auto *checker = mgr.registerChecker<MallocChecker>();
- checker->MemFunctionInfo.ShouldIncludeOwnershipAnnotatedFunctions =
+ checker->ShouldIncludeOwnershipAnnotatedFunctions =
mgr.getAnalyzerOptions().getCheckerBooleanOption(checker, "Optimistic");
}
-bool ento::shouldRegisterDynamicMemoryModeling(const LangOptions &LO) {
+bool ento::shouldRegisterDynamicMemoryModeling(const CheckerManager &mgr) {
return true;
}
@@ -3420,7 +3474,7 @@ bool ento::shouldRegisterDynamicMemoryModeling(const LangOptions &LO) {
mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { return true; }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(MallocChecker)
REGISTER_CHECKER(NewDeleteChecker)
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index 4fd06f24c5bc..e31630f63b5a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -337,6 +337,6 @@ void ento::registerMallocOverflowSecurityChecker(CheckerManager &mgr) {
mgr.registerChecker<MallocOverflowSecurityChecker>();
}
-bool ento::shouldRegisterMallocOverflowSecurityChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMallocOverflowSecurityChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index b5881a9e6533..71f593cb2b56 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -250,6 +250,6 @@ void ento::registerMallocSizeofChecker(CheckerManager &mgr) {
mgr.registerChecker<MallocSizeofChecker>();
}
-bool ento::shouldRegisterMallocSizeofChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMallocSizeofChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp
index 143910588959..1960873599f7 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp
@@ -28,7 +28,7 @@ using namespace iterator;
namespace {
class MismatchedIteratorChecker
- : public Checker<check::PreCall> {
+ : public Checker<check::PreCall, check::PreStmt<BinaryOperator>> {
std::unique_ptr<BugType> MismatchedBugType;
@@ -47,6 +47,7 @@ public:
MismatchedIteratorChecker();
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreStmt(const BinaryOperator *BO, CheckerContext &C) const;
};
@@ -141,7 +142,7 @@ void MismatchedIteratorChecker::checkPreCall(const CallEvent &Call,
// Example:
// template<typename I1, typename I2>
// void f(I1 first1, I1 last1, I2 first2, I2 last2);
- //
+ //
// In this case the first two arguments to f() must be iterators must belong
// to the same container and the last to also to the same container but
// not necessarily to the same as the first two.
@@ -188,6 +189,17 @@ void MismatchedIteratorChecker::checkPreCall(const CallEvent &Call,
}
}
+void MismatchedIteratorChecker::checkPreStmt(const BinaryOperator *BO,
+ CheckerContext &C) const {
+ if (!BO->isComparisonOp())
+ return;
+
+ ProgramStateRef State = C.getState();
+ SVal LVal = State->getSVal(BO->getLHS(), C.getLocationContext());
+ SVal RVal = State->getSVal(BO->getRHS(), C.getLocationContext());
+ verifyMatch(C, LVal, RVal);
+}
+
void MismatchedIteratorChecker::verifyMatch(CheckerContext &C, const SVal &Iter,
const MemRegion *Cont) const {
// Verify match between a container and the container of an iterator
@@ -290,6 +302,6 @@ void ento::registerMismatchedIteratorChecker(CheckerManager &mgr) {
mgr.registerChecker<MismatchedIteratorChecker>();
}
-bool ento::shouldRegisterMismatchedIteratorChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMismatchedIteratorChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
index ceea62160545..5d63d6efd234 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -88,6 +88,6 @@ void ento::registerMmapWriteExecChecker(CheckerManager &mgr) {
.getCheckerIntegerOption(Mwec, "MmapProtRead");
}
-bool ento::shouldRegisterMmapWriteExecChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMmapWriteExecChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
index 40eb113e3f8e..7f0519c695b0 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -757,6 +757,6 @@ void ento::registerMoveChecker(CheckerManager &mgr) {
mgr.getAnalyzerOptions().getCheckerStringOption(chk, "WarnOn"), mgr);
}
-bool ento::shouldRegisterMoveChecker(const LangOptions &LO) {
+bool ento::shouldRegisterMoveChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index 41b7fe5e43b6..be17e401fb53 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -80,6 +80,7 @@ void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
mgr.registerChecker<NSAutoreleasePoolChecker>();
}
-bool ento::shouldRegisterNSAutoreleasePoolChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNSAutoreleasePoolChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.getGC() != LangOptions::NonGC;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index 85370bf133cd..90c5583d8969 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -95,6 +95,15 @@ public:
};
}
+static bool hasReservedReturnType(const FunctionDecl *D) {
+ if (isa<CXXConstructorDecl>(D))
+ return true;
+
+ // operators delete and delete[] are required to have 'void' return type
+ auto OperatorKind = D->getOverloadedOperator();
+ return OperatorKind == OO_Delete || OperatorKind == OO_Array_Delete;
+}
+
void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
AnalysisManager &mgr,
BugReporter &BR) const {
@@ -102,6 +111,8 @@ void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
return;
if (!D->getReturnType()->isVoidType())
return;
+ if (hasReservedReturnType(D))
+ return;
if (!II)
II = &D->getASTContext().Idents.get("CFErrorRef");
@@ -133,14 +144,14 @@ namespace {
class NSErrorDerefBug : public BugType {
public:
- NSErrorDerefBug(const CheckerBase *Checker)
+ NSErrorDerefBug(const CheckerNameRef Checker)
: BugType(Checker, "NSError** null dereference",
"Coding conventions (Apple)") {}
};
class CFErrorDerefBug : public BugType {
public:
- CFErrorDerefBug(const CheckerBase *Checker)
+ CFErrorDerefBug(const CheckerNameRef Checker)
: BugType(Checker, "CFErrorRef* null dereference",
"Coding conventions (Apple)") {}
};
@@ -155,9 +166,9 @@ class NSOrCFErrorDerefChecker
mutable std::unique_ptr<NSErrorDerefBug> NSBT;
mutable std::unique_ptr<CFErrorDerefBug> CFBT;
public:
- bool ShouldCheckNSError, ShouldCheckCFError;
- NSOrCFErrorDerefChecker() : NSErrorII(nullptr), CFErrorII(nullptr),
- ShouldCheckNSError(0), ShouldCheckCFError(0) { }
+ DefaultBool ShouldCheckNSError, ShouldCheckCFError;
+ CheckerNameRef NSErrorName, CFErrorName;
+ NSOrCFErrorDerefChecker() : NSErrorII(nullptr), CFErrorII(nullptr) {}
void checkLocation(SVal loc, bool isLoad, const Stmt *S,
CheckerContext &C) const;
@@ -265,12 +276,12 @@ void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
BugType *bug = nullptr;
if (isNSError) {
if (!NSBT)
- NSBT.reset(new NSErrorDerefBug(this));
+ NSBT.reset(new NSErrorDerefBug(NSErrorName));
bug = NSBT.get();
}
else {
if (!CFBT)
- CFBT.reset(new CFErrorDerefBug(this));
+ CFBT.reset(new CFErrorDerefBug(CFErrorName));
bug = CFBT.get();
}
BR.emitReport(
@@ -312,7 +323,7 @@ void ento::registerNSOrCFErrorDerefChecker(CheckerManager &mgr) {
mgr.registerChecker<NSOrCFErrorDerefChecker>();
}
-bool ento::shouldRegisterNSOrCFErrorDerefChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNSOrCFErrorDerefChecker(const CheckerManager &mgr) {
return true;
}
@@ -320,9 +331,10 @@ void ento::registerNSErrorChecker(CheckerManager &mgr) {
mgr.registerChecker<NSErrorMethodChecker>();
NSOrCFErrorDerefChecker *checker = mgr.getChecker<NSOrCFErrorDerefChecker>();
checker->ShouldCheckNSError = true;
+ checker->NSErrorName = mgr.getCurrentCheckerName();
}
-bool ento::shouldRegisterNSErrorChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNSErrorChecker(const CheckerManager &mgr) {
return true;
}
@@ -330,8 +342,9 @@ void ento::registerCFErrorChecker(CheckerManager &mgr) {
mgr.registerChecker<CFErrorFunctionChecker>();
NSOrCFErrorDerefChecker *checker = mgr.getChecker<NSOrCFErrorDerefChecker>();
checker->ShouldCheckCFError = true;
+ checker->CFErrorName = mgr.getCurrentCheckerName();
}
-bool ento::shouldRegisterCFErrorChecker(const LangOptions &LO) {
+bool ento::shouldRegisterCFErrorChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index fc34255bf6c9..af208e867318 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -143,6 +143,6 @@ void ento::registerNoReturnFunctionChecker(CheckerManager &mgr) {
mgr.registerChecker<NoReturnFunctionChecker>();
}
-bool ento::shouldRegisterNoReturnFunctionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNoReturnFunctionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 6ffc89745365..534b5d68434f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -14,57 +14,97 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
+#include "clang/Analysis/AnyCall.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace ento;
namespace {
class NonNullParamChecker
- : public Checker< check::PreCall, EventDispatcher<ImplicitNullDerefEvent> > {
+ : public Checker<check::PreCall, check::BeginFunction,
+ EventDispatcher<ImplicitNullDerefEvent>> {
mutable std::unique_ptr<BugType> BTAttrNonNull;
mutable std::unique_ptr<BugType> BTNullRefArg;
public:
-
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkBeginFunction(CheckerContext &C) const;
std::unique_ptr<PathSensitiveBugReport>
- genReportNullAttrNonNull(const ExplodedNode *ErrorN,
- const Expr *ArgE,
+ genReportNullAttrNonNull(const ExplodedNode *ErrorN, const Expr *ArgE,
unsigned IdxOfArg) const;
std::unique_ptr<PathSensitiveBugReport>
genReportReferenceToNullPointer(const ExplodedNode *ErrorN,
const Expr *ArgE) const;
};
-} // end anonymous namespace
-/// \return Bitvector marking non-null attributes.
-static llvm::SmallBitVector getNonNullAttrs(const CallEvent &Call) {
+template <class CallType>
+void setBitsAccordingToFunctionAttributes(const CallType &Call,
+ llvm::SmallBitVector &AttrNonNull) {
const Decl *FD = Call.getDecl();
- unsigned NumArgs = Call.getNumArgs();
- llvm::SmallBitVector AttrNonNull(NumArgs);
+
for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
if (!NonNull->args_size()) {
- AttrNonNull.set(0, NumArgs);
+ // Lack of attribute parameters means that all of the parameters are
+ // implicitly marked as non-null.
+ AttrNonNull.set();
break;
}
+
for (const ParamIdx &Idx : NonNull->args()) {
+ // 'nonnull' attribute's parameters are 1-based and should be adjusted to
+ // match actual AST parameter/argument indices.
unsigned IdxAST = Idx.getASTIndex();
- if (IdxAST >= NumArgs)
+ if (IdxAST >= AttrNonNull.size())
continue;
AttrNonNull.set(IdxAST);
}
}
+}
+
+template <class CallType>
+void setBitsAccordingToParameterAttributes(const CallType &Call,
+ llvm::SmallBitVector &AttrNonNull) {
+ for (const ParmVarDecl *Parameter : Call.parameters()) {
+ unsigned ParameterIndex = Parameter->getFunctionScopeIndex();
+ if (ParameterIndex == AttrNonNull.size())
+ break;
+
+ if (Parameter->hasAttr<NonNullAttr>())
+ AttrNonNull.set(ParameterIndex);
+ }
+}
+
+template <class CallType>
+llvm::SmallBitVector getNonNullAttrsImpl(const CallType &Call,
+ unsigned ExpectedSize) {
+ llvm::SmallBitVector AttrNonNull(ExpectedSize);
+
+ setBitsAccordingToFunctionAttributes(Call, AttrNonNull);
+ setBitsAccordingToParameterAttributes(Call, AttrNonNull);
+
return AttrNonNull;
}
+/// \return Bitvector marking non-null attributes.
+llvm::SmallBitVector getNonNullAttrs(const CallEvent &Call) {
+ return getNonNullAttrsImpl(Call, Call.getNumArgs());
+}
+
+/// \return Bitvector marking non-null attributes.
+llvm::SmallBitVector getNonNullAttrs(const AnyCall &Call) {
+ return getNonNullAttrsImpl(Call, Call.param_size());
+}
+} // end anonymous namespace
+
void NonNullParamChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
if (!Call.getDecl())
@@ -74,7 +114,7 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
unsigned NumArgs = Call.getNumArgs();
ProgramStateRef state = C.getState();
- ArrayRef<ParmVarDecl*> parms = Call.parameters();
+ ArrayRef<ParmVarDecl *> parms = Call.parameters();
for (unsigned idx = 0; idx < NumArgs; ++idx) {
// For vararg functions, a corresponding parameter decl may not exist.
@@ -82,15 +122,11 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
// Check if the parameter is a reference. We want to report when reference
// to a null pointer is passed as a parameter.
- bool haveRefTypeParam =
+ bool HasRefTypeParam =
HasParam ? parms[idx]->getType()->isReferenceType() : false;
- bool haveAttrNonNull = AttrNonNull[idx];
+ bool ExpectedToBeNonNull = AttrNonNull.test(idx);
- // Check if the parameter is also marked 'nonnull'.
- if (!haveAttrNonNull && HasParam)
- haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>();
-
- if (!haveAttrNonNull && !haveRefTypeParam)
+ if (!ExpectedToBeNonNull && !HasRefTypeParam)
continue;
// If the value is unknown or undefined, we can't perform this check.
@@ -100,10 +136,10 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (!DV)
continue;
- assert(!haveRefTypeParam || DV->getAs<Loc>());
+ assert(!HasRefTypeParam || DV->getAs<Loc>());
// Process the case when the argument is not a location.
- if (haveAttrNonNull && !DV->getAs<Loc>()) {
+ if (ExpectedToBeNonNull && !DV->getAs<Loc>()) {
// If the argument is a union type, we want to handle a potential
// transparent_union GCC extension.
if (!ArgE)
@@ -144,9 +180,9 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (ExplodedNode *errorNode = C.generateErrorNode(stateNull)) {
std::unique_ptr<BugReport> R;
- if (haveAttrNonNull)
+ if (ExpectedToBeNonNull)
R = genReportNullAttrNonNull(errorNode, ArgE, idx + 1);
- else if (haveRefTypeParam)
+ else if (HasRefTypeParam)
R = genReportReferenceToNullPointer(errorNode, ArgE);
// Highlight the range of the argument that was null.
@@ -163,8 +199,8 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (stateNull) {
if (ExplodedNode *N = C.generateSink(stateNull, C.getPredecessor())) {
ImplicitNullDerefEvent event = {
- V, false, N, &C.getBugReporter(),
- /*IsDirectDereference=*/haveRefTypeParam};
+ V, false, N, &C.getBugReporter(),
+ /*IsDirectDereference=*/HasRefTypeParam};
dispatchEvent(event);
}
}
@@ -179,6 +215,65 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
C.addTransition(state);
}
+/// We want to trust developer annotations and consider all 'nonnull' parameters
+/// as non-null indeed. Each marked parameter will get a corresponding
+/// constraint.
+///
+/// This approach will not only help us to get rid of some false positives, but
+/// remove duplicates and shorten warning traces as well.
+///
+/// \code
+/// void foo(int *x) [[gnu::nonnull]] {
+/// // . . .
+/// *x = 42; // we don't want to consider this as an error...
+/// // . . .
+/// }
+///
+/// foo(nullptr); // ...and report here instead
+/// \endcode
+void NonNullParamChecker::checkBeginFunction(CheckerContext &Context) const {
+ // Planned assumption makes sense only for top-level functions.
+ // Inlined functions will get similar constraints as part of 'checkPreCall'.
+ if (!Context.inTopFrame())
+ return;
+
+ const LocationContext *LocContext = Context.getLocationContext();
+
+ const Decl *FD = LocContext->getDecl();
+ // AnyCall helps us here to avoid checking for FunctionDecl and ObjCMethodDecl
+ // separately and aggregates interfaces of these classes.
+ auto AbstractCall = AnyCall::forDecl(FD);
+ if (!AbstractCall)
+ return;
+
+ ProgramStateRef State = Context.getState();
+ llvm::SmallBitVector ParameterNonNullMarks = getNonNullAttrs(*AbstractCall);
+
+ for (const ParmVarDecl *Parameter : AbstractCall->parameters()) {
+ // 1. Check parameter if it is annotated as non-null
+ if (!ParameterNonNullMarks.test(Parameter->getFunctionScopeIndex()))
+ continue;
+
+ // 2. Check that parameter is a pointer.
+ // Nonnull attribute can be applied to non-pointers (by default
+ // __attribute__(nonnull) implies "all parameters").
+ if (!Parameter->getType()->isPointerType())
+ continue;
+
+ Loc ParameterLoc = State->getLValue(Parameter, LocContext);
+ // We never consider top-level function parameters undefined.
+ auto StoredVal =
+ State->getSVal(ParameterLoc).castAs<DefinedOrUnknownSVal>();
+
+ // 3. Assume that it is indeed non-null
+ if (ProgramStateRef NewState = State->assume(StoredVal, true)) {
+ State = NewState;
+ }
+ }
+
+ Context.addTransition(State);
+}
+
std::unique_ptr<PathSensitiveBugReport>
NonNullParamChecker::genReportNullAttrNonNull(const ExplodedNode *ErrorNode,
const Expr *ArgE,
@@ -226,6 +321,6 @@ void ento::registerNonNullParamChecker(CheckerManager &mgr) {
mgr.registerChecker<NonNullParamChecker>();
}
-bool ento::shouldRegisterNonNullParamChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNonNullParamChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
index 6efba433eed2..80b705fb7392 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
@@ -147,6 +147,6 @@ void ento::registerNonnullGlobalConstantsChecker(CheckerManager &Mgr) {
Mgr.registerChecker<NonnullGlobalConstantsChecker>();
}
-bool ento::shouldRegisterNonnullGlobalConstantsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNonnullGlobalConstantsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index 922048733c7c..bc7a8a3b12a1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -81,8 +81,7 @@ class NullabilityChecker
: public Checker<check::Bind, check::PreCall, check::PreStmt<ReturnStmt>,
check::PostCall, check::PostStmt<ExplicitCastExpr>,
check::PostObjCMessage, check::DeadSymbols,
- check::Event<ImplicitNullDerefEvent>> {
- mutable std::unique_ptr<BugType> BT;
+ check::Location, check::Event<ImplicitNullDerefEvent>> {
public:
// If true, the checker will not diagnose nullabilility issues for calls
@@ -101,25 +100,32 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
void checkEvent(ImplicitNullDerefEvent Event) const;
+ void checkLocation(SVal Location, bool IsLoad, const Stmt *S,
+ CheckerContext &C) const;
void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
const char *Sep) const override;
- struct NullabilityChecksFilter {
- DefaultBool CheckNullPassedToNonnull;
- DefaultBool CheckNullReturnedFromNonnull;
- DefaultBool CheckNullableDereferenced;
- DefaultBool CheckNullablePassedToNonnull;
- DefaultBool CheckNullableReturnedFromNonnull;
-
- CheckerNameRef CheckNameNullPassedToNonnull;
- CheckerNameRef CheckNameNullReturnedFromNonnull;
- CheckerNameRef CheckNameNullableDereferenced;
- CheckerNameRef CheckNameNullablePassedToNonnull;
- CheckerNameRef CheckNameNullableReturnedFromNonnull;
+ enum CheckKind {
+ CK_NullPassedToNonnull,
+ CK_NullReturnedFromNonnull,
+ CK_NullableDereferenced,
+ CK_NullablePassedToNonnull,
+ CK_NullableReturnedFromNonnull,
+ CK_NumCheckKinds
};
- NullabilityChecksFilter Filter;
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckerNameRef CheckNames[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BTs[CK_NumCheckKinds];
+
+ const std::unique_ptr<BugType> &getBugType(CheckKind Kind) const {
+ if (!BTs[Kind])
+ BTs[Kind].reset(new BugType(CheckNames[Kind], "Nullability",
+ categories::MemoryError));
+ return BTs[Kind];
+ }
+
// When set to false no nullability information will be tracked in
// NullabilityMap. It is possible to catch errors like passing a null pointer
// to a callee that expects nonnull argument without the information that is
@@ -151,18 +157,16 @@ private:
///
/// When \p SuppressPath is set to true, no more bugs will be reported on this
/// path by this checker.
- void reportBugIfInvariantHolds(StringRef Msg, ErrorKind Error,
+ void reportBugIfInvariantHolds(StringRef Msg, ErrorKind Error, CheckKind CK,
ExplodedNode *N, const MemRegion *Region,
CheckerContext &C,
const Stmt *ValueExpr = nullptr,
- bool SuppressPath = false) const;
+ bool SuppressPath = false) const;
- void reportBug(StringRef Msg, ErrorKind Error, ExplodedNode *N,
+ void reportBug(StringRef Msg, ErrorKind Error, CheckKind CK, ExplodedNode *N,
const MemRegion *Region, BugReporter &BR,
const Stmt *ValueExpr = nullptr) const {
- if (!BT)
- BT.reset(new BugType(this, "Nullability", categories::MemoryError));
-
+ const std::unique_ptr<BugType> &BT = getBugType(CK);
auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
if (Region) {
R->markInteresting(Region);
@@ -430,9 +434,10 @@ static bool checkInvariantViolation(ProgramStateRef State, ExplodedNode *N,
return false;
}
-void NullabilityChecker::reportBugIfInvariantHolds(StringRef Msg,
- ErrorKind Error, ExplodedNode *N, const MemRegion *Region,
- CheckerContext &C, const Stmt *ValueExpr, bool SuppressPath) const {
+void NullabilityChecker::reportBugIfInvariantHolds(
+ StringRef Msg, ErrorKind Error, CheckKind CK, ExplodedNode *N,
+ const MemRegion *Region, CheckerContext &C, const Stmt *ValueExpr,
+ bool SuppressPath) const {
ProgramStateRef OriginalState = N->getState();
if (checkInvariantViolation(OriginalState, N, C))
@@ -442,7 +447,7 @@ void NullabilityChecker::reportBugIfInvariantHolds(StringRef Msg,
N = C.addTransition(OriginalState, N);
}
- reportBug(Msg, Error, N, Region, C.getBugReporter(), ValueExpr);
+ reportBug(Msg, Error, CK, N, Region, C.getBugReporter(), ValueExpr);
}
/// Cleaning up the program state.
@@ -487,34 +492,76 @@ void NullabilityChecker::checkEvent(ImplicitNullDerefEvent Event) const {
if (!TrackedNullability)
return;
- if (Filter.CheckNullableDereferenced &&
+ if (ChecksEnabled[CK_NullableDereferenced] &&
TrackedNullability->getValue() == Nullability::Nullable) {
BugReporter &BR = *Event.BR;
// Do not suppress errors on defensive code paths, because dereferencing
// a nullable pointer is always an error.
if (Event.IsDirectDereference)
reportBug("Nullable pointer is dereferenced",
- ErrorKind::NullableDereferenced, Event.SinkNode, Region, BR);
+ ErrorKind::NullableDereferenced, CK_NullableDereferenced,
+ Event.SinkNode, Region, BR);
else {
reportBug("Nullable pointer is passed to a callee that requires a "
- "non-null", ErrorKind::NullablePassedToNonnull,
+ "non-null",
+ ErrorKind::NullablePassedToNonnull, CK_NullableDereferenced,
Event.SinkNode, Region, BR);
}
}
}
+// Whenever we see a load from a typed memory region that's been annotated as
+// 'nonnull', we want to trust the user on that and assume that it is is indeed
+// non-null.
+//
+// We do so even if the value is known to have been assigned to null.
+// The user should be warned on assigning the null value to a non-null pointer
+// as opposed to warning on the later dereference of this pointer.
+//
+// \code
+// int * _Nonnull var = 0; // we want to warn the user here...
+// // . . .
+// *var = 42; // ...and not here
+// \endcode
+void NullabilityChecker::checkLocation(SVal Location, bool IsLoad,
+ const Stmt *S,
+ CheckerContext &Context) const {
+ // We should care only about loads.
+ // The main idea is to add a constraint whenever we're loading a value from
+ // an annotated pointer type.
+ if (!IsLoad)
+ return;
+
+ // Annotations that we want to consider make sense only for types.
+ const auto *Region =
+ dyn_cast_or_null<TypedValueRegion>(Location.getAsRegion());
+ if (!Region)
+ return;
+
+ ProgramStateRef State = Context.getState();
+
+ auto StoredVal = State->getSVal(Region).getAs<loc::MemRegionVal>();
+ if (!StoredVal)
+ return;
+
+ Nullability NullabilityOfTheLoadedValue =
+ getNullabilityAnnotation(Region->getValueType());
+
+ if (NullabilityOfTheLoadedValue == Nullability::Nonnull) {
+ // It doesn't matter what we think about this particular pointer, it should
+ // be considered non-null as annotated by the developer.
+ if (ProgramStateRef NewState = State->assume(*StoredVal, true)) {
+ Context.addTransition(NewState);
+ }
+ }
+}
+
/// Find the outermost subexpression of E that is not an implicit cast.
/// This looks through the implicit casts to _Nonnull that ARC adds to
/// return expressions of ObjC types when the return type of the function or
/// method is non-null but the express is not.
static const Expr *lookThroughImplicitCasts(const Expr *E) {
- assert(E);
-
- while (auto *ICE = dyn_cast<ImplicitCastExpr>(E)) {
- E = ICE->getSubExpr();
- }
-
- return E;
+ return E->IgnoreImpCasts();
}
/// This method check when nullable pointer or null value is returned from a
@@ -572,11 +619,9 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
bool NullReturnedFromNonNull = (RequiredNullability == Nullability::Nonnull &&
Nullness == NullConstraint::IsNull);
- if (Filter.CheckNullReturnedFromNonnull &&
- NullReturnedFromNonNull &&
+ if (ChecksEnabled[CK_NullReturnedFromNonnull] && NullReturnedFromNonNull &&
RetExprTypeLevelNullability != Nullability::Nonnull &&
- !InSuppressedMethodFamily &&
- C.getLocationContext()->inTopFrame()) {
+ !InSuppressedMethodFamily && C.getLocationContext()->inTopFrame()) {
static CheckerProgramPointTag Tag(this, "NullReturnedFromNonnull");
ExplodedNode *N = C.generateErrorNode(State, &Tag);
if (!N)
@@ -587,8 +632,8 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
OS << (RetExpr->getType()->isObjCObjectPointerType() ? "nil" : "Null");
OS << " returned from a " << C.getDeclDescription(D) <<
" that is expected to return a non-null value";
- reportBugIfInvariantHolds(OS.str(),
- ErrorKind::NilReturnedToNonnull, N, nullptr, C,
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NilReturnedToNonnull,
+ CK_NullReturnedFromNonnull, N, nullptr, C,
RetExpr);
return;
}
@@ -609,7 +654,7 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
State->get<NullabilityMap>(Region);
if (TrackedNullability) {
Nullability TrackedNullabValue = TrackedNullability->getValue();
- if (Filter.CheckNullableReturnedFromNonnull &&
+ if (ChecksEnabled[CK_NullableReturnedFromNonnull] &&
Nullness != NullConstraint::IsNotNull &&
TrackedNullabValue == Nullability::Nullable &&
RequiredNullability == Nullability::Nonnull) {
@@ -621,9 +666,8 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
OS << "Nullable pointer is returned from a " << C.getDeclDescription(D) <<
" that is expected to return a non-null value";
- reportBugIfInvariantHolds(OS.str(),
- ErrorKind::NullableReturnedToNonnull, N,
- Region, C);
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NullableReturnedToNonnull,
+ CK_NullableReturnedFromNonnull, N, Region, C);
}
return;
}
@@ -674,7 +718,8 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
unsigned ParamIdx = Param->getFunctionScopeIndex() + 1;
- if (Filter.CheckNullPassedToNonnull && Nullness == NullConstraint::IsNull &&
+ if (ChecksEnabled[CK_NullPassedToNonnull] &&
+ Nullness == NullConstraint::IsNull &&
ArgExprTypeLevelNullability != Nullability::Nonnull &&
RequiredNullability == Nullability::Nonnull &&
isDiagnosableCall(Call)) {
@@ -687,9 +732,9 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
OS << (Param->getType()->isObjCObjectPointerType() ? "nil" : "Null");
OS << " passed to a callee that requires a non-null " << ParamIdx
<< llvm::getOrdinalSuffix(ParamIdx) << " parameter";
- reportBugIfInvariantHolds(OS.str(), ErrorKind::NilPassedToNonnull, N,
- nullptr, C,
- ArgExpr, /*SuppressPath=*/false);
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NilPassedToNonnull,
+ CK_NullPassedToNonnull, N, nullptr, C, ArgExpr,
+ /*SuppressPath=*/false);
return;
}
@@ -705,7 +750,7 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
TrackedNullability->getValue() != Nullability::Nullable)
continue;
- if (Filter.CheckNullablePassedToNonnull &&
+ if (ChecksEnabled[CK_NullablePassedToNonnull] &&
RequiredNullability == Nullability::Nonnull &&
isDiagnosableCall(Call)) {
ExplodedNode *N = C.addTransition(State);
@@ -713,17 +758,18 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
llvm::raw_svector_ostream OS(SBuf);
OS << "Nullable pointer is passed to a callee that requires a non-null "
<< ParamIdx << llvm::getOrdinalSuffix(ParamIdx) << " parameter";
- reportBugIfInvariantHolds(OS.str(),
- ErrorKind::NullablePassedToNonnull, N,
- Region, C, ArgExpr, /*SuppressPath=*/true);
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NullablePassedToNonnull,
+ CK_NullablePassedToNonnull, N, Region, C,
+ ArgExpr, /*SuppressPath=*/true);
return;
}
- if (Filter.CheckNullableDereferenced &&
+ if (ChecksEnabled[CK_NullableDereferenced] &&
Param->getType()->isReferenceType()) {
ExplodedNode *N = C.addTransition(State);
reportBugIfInvariantHolds("Nullable pointer is dereferenced",
- ErrorKind::NullableDereferenced, N, Region,
- C, ArgExpr, /*SuppressPath=*/true);
+ ErrorKind::NullableDereferenced,
+ CK_NullableDereferenced, N, Region, C,
+ ArgExpr, /*SuppressPath=*/true);
return;
}
continue;
@@ -1083,8 +1129,7 @@ void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
bool NullAssignedToNonNull = (LocNullability == Nullability::Nonnull &&
RhsNullness == NullConstraint::IsNull);
- if (Filter.CheckNullPassedToNonnull &&
- NullAssignedToNonNull &&
+ if (ChecksEnabled[CK_NullPassedToNonnull] && NullAssignedToNonNull &&
ValNullability != Nullability::Nonnull &&
ValueExprTypeLevelNullability != Nullability::Nonnull &&
!isARCNilInitializedLocal(C, S)) {
@@ -1102,9 +1147,8 @@ void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
llvm::raw_svector_ostream OS(SBuf);
OS << (LocType->isObjCObjectPointerType() ? "nil" : "Null");
OS << " assigned to a pointer which is expected to have non-null value";
- reportBugIfInvariantHolds(OS.str(),
- ErrorKind::NilAssignedToNonnull, N, nullptr, C,
- ValueStmt);
+ reportBugIfInvariantHolds(OS.str(), ErrorKind::NilAssignedToNonnull,
+ CK_NullPassedToNonnull, N, nullptr, C, ValueStmt);
return;
}
@@ -1130,14 +1174,14 @@ void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (RhsNullness == NullConstraint::IsNotNull ||
TrackedNullability->getValue() != Nullability::Nullable)
return;
- if (Filter.CheckNullablePassedToNonnull &&
+ if (ChecksEnabled[CK_NullablePassedToNonnull] &&
LocNullability == Nullability::Nonnull) {
static CheckerProgramPointTag Tag(this, "NullablePassedToNonnull");
ExplodedNode *N = C.addTransition(State, C.getPredecessor(), &Tag);
reportBugIfInvariantHolds("Nullable pointer is assigned to a pointer "
"which is expected to have non-null value",
- ErrorKind::NullableAssignedToNonnull, N,
- ValueRegion, C);
+ ErrorKind::NullableAssignedToNonnull,
+ CK_NullablePassedToNonnull, N, ValueRegion, C);
}
return;
}
@@ -1188,15 +1232,16 @@ void ento::registerNullabilityBase(CheckerManager &mgr) {
mgr.registerChecker<NullabilityChecker>();
}
-bool ento::shouldRegisterNullabilityBase(const LangOptions &LO) {
+bool ento::shouldRegisterNullabilityBase(const CheckerManager &mgr) {
return true;
}
#define REGISTER_CHECKER(name, trackingRequired) \
void ento::register##name##Checker(CheckerManager &mgr) { \
NullabilityChecker *checker = mgr.getChecker<NullabilityChecker>(); \
- checker->Filter.Check##name = true; \
- checker->Filter.CheckName##name = mgr.getCurrentCheckerName(); \
+ checker->ChecksEnabled[NullabilityChecker::CK_##name] = true; \
+ checker->CheckNames[NullabilityChecker::CK_##name] = \
+ mgr.getCurrentCheckerName(); \
checker->NeedTracking = checker->NeedTracking || trackingRequired; \
checker->NoDiagnoseCallsToSystemHeaders = \
checker->NoDiagnoseCallsToSystemHeaders || \
@@ -1204,7 +1249,7 @@ bool ento::shouldRegisterNullabilityBase(const LangOptions &LO) {
checker, "NoDiagnoseCallsToSystemHeaders", true); \
} \
\
- bool ento::shouldRegister##name##Checker(const LangOptions &LO) { \
+ bool ento::shouldRegister##name##Checker(const CheckerManager &mgr) { \
return true; \
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index 1053424ae6fa..df01cc760e7e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -57,7 +57,7 @@ public:
Callback(const NumberObjectConversionChecker *C,
BugReporter &BR, AnalysisDeclContext *ADC)
: C(C), BR(BR), ADC(ADC) {}
- virtual void run(const MatchFinder::MatchResult &Result);
+ void run(const MatchFinder::MatchResult &Result) override;
};
} // end of anonymous namespace
@@ -338,7 +338,7 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
MatchFinder F;
Callback CB(this, BR, AM.getAnalysisDeclContext(D));
- F.addMatcher(stmt(forEachDescendant(FinalM)), &CB);
+ F.addMatcher(traverse(TK_AsIs, stmt(forEachDescendant(FinalM))), &CB);
F.match(*D->getBody(), AM.getASTContext());
}
@@ -349,6 +349,6 @@ void ento::registerNumberObjectConversionChecker(CheckerManager &Mgr) {
Mgr.getAnalyzerOptions().getCheckerBooleanOption(Chk, "Pedantic");
}
-bool ento::shouldRegisterNumberObjectConversionChecker(const LangOptions &LO) {
+bool ento::shouldRegisterNumberObjectConversionChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp b/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
index 5b9895c338d8..53ed0e187a4c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
@@ -55,8 +55,7 @@ static void emitDiagnostics(const BoundNodes &Nodes,
CE->getSourceRange());
}
-static auto hasTypePointingTo(DeclarationMatcher DeclM)
- -> decltype(hasType(pointerType())) {
+static decltype(auto) hasTypePointingTo(DeclarationMatcher DeclM) {
return hasType(pointerType(pointee(hasDeclaration(DeclM))));
}
@@ -85,6 +84,6 @@ void ento::registerOSObjectCStyleCast(CheckerManager &Mgr) {
Mgr.registerChecker<OSObjectCStyleCastChecker>();
}
-bool ento::shouldRegisterOSObjectCStyleCast(const LangOptions &LO) {
+bool ento::shouldRegisterOSObjectCStyleCast(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index 0e25817c8793..43af4bb14286 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -91,6 +91,7 @@ void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCAtSyncChecker>();
}
-bool ento::shouldRegisterObjCAtSyncChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCAtSyncChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.ObjC;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
index d2371fe60d21..7fd6e2abef4c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
@@ -30,6 +30,7 @@
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/Twine.h"
@@ -44,6 +45,7 @@ const char *ProblematicWriteBind = "problematicwrite";
const char *CapturedBind = "capturedbind";
const char *ParamBind = "parambind";
const char *IsMethodBind = "ismethodbind";
+const char *IsARPBind = "isautoreleasepoolbind";
class ObjCAutoreleaseWriteChecker : public Checker<check::ASTCodeBody> {
public:
@@ -100,8 +102,7 @@ static inline std::vector<llvm::StringRef> toRefs(std::vector<std::string> V) {
return std::vector<llvm::StringRef>(V.begin(), V.end());
}
-static auto callsNames(std::vector<std::string> FunctionNames)
- -> decltype(callee(functionDecl())) {
+static decltype(auto) callsNames(std::vector<std::string> FunctionNames) {
return callee(functionDecl(hasAnyName(toRefs(FunctionNames))));
}
@@ -129,21 +130,39 @@ static void emitDiagnostics(BoundNodes &Match, const Decl *D, BugReporter &BR,
SourceRange Range = MarkedStmt->getSourceRange();
PathDiagnosticLocation Location = PathDiagnosticLocation::createBegin(
MarkedStmt, BR.getSourceManager(), ADC);
+
bool IsMethod = Match.getNodeAs<ObjCMethodDecl>(IsMethodBind) != nullptr;
- const char *Name = IsMethod ? "method" : "function";
-
- BR.EmitBasicReport(
- ADC->getDecl(), Checker,
- /*Name=*/(llvm::Twine(ActionMsg)
- + " autoreleasing out parameter inside autorelease pool").str(),
- /*BugCategory=*/"Memory",
- (llvm::Twine(ActionMsg) + " autoreleasing out parameter " +
- (IsCapture ? "'" + PVD->getName() + "'" + " " : "") + "inside " +
- "autorelease pool that may exit before " + Name + " returns; consider "
- "writing first to a strong local variable declared outside of the block")
- .str(),
- Location,
- Range);
+ const char *FunctionDescription = IsMethod ? "method" : "function";
+ bool IsARP = Match.getNodeAs<ObjCAutoreleasePoolStmt>(IsARPBind) != nullptr;
+
+ llvm::SmallString<128> BugNameBuf;
+ llvm::raw_svector_ostream BugName(BugNameBuf);
+ BugName << ActionMsg
+ << " autoreleasing out parameter inside autorelease pool";
+
+ llvm::SmallString<128> BugMessageBuf;
+ llvm::raw_svector_ostream BugMessage(BugMessageBuf);
+ BugMessage << ActionMsg << " autoreleasing out parameter ";
+ if (IsCapture)
+ BugMessage << "'" + PVD->getName() + "' ";
+
+ BugMessage << "inside ";
+ if (IsARP)
+ BugMessage << "locally-scoped autorelease pool;";
+ else
+ BugMessage << "autorelease pool that may exit before "
+ << FunctionDescription << " returns;";
+
+ BugMessage << " consider writing first to a strong local variable"
+ " declared outside ";
+ if (IsARP)
+ BugMessage << "of the autorelease pool";
+ else
+ BugMessage << "of the block";
+
+ BR.EmitBasicReport(ADC->getDecl(), Checker, BugName.str(),
+ categories::MemoryRefCount, BugMessage.str(), Location,
+ Range);
}
void ObjCAutoreleaseWriteChecker::checkASTCodeBody(const Decl *D,
@@ -189,9 +208,16 @@ void ObjCAutoreleaseWriteChecker::checkASTCodeBody(const Decl *D,
WritesOrCapturesInBlockM))
));
- auto HasParamAndWritesInMarkedFuncM = allOf(
- hasAnyParameter(DoublePointerParamM),
- forEachDescendant(BlockPassedToMarkedFuncM));
+ // WritesIntoM happens inside an explicit @autoreleasepool.
+ auto WritesOrCapturesInPoolM =
+ autoreleasePoolStmt(
+ forEachDescendant(stmt(anyOf(WritesIntoM, CapturedInParamM))))
+ .bind(IsARPBind);
+
+ auto HasParamAndWritesInMarkedFuncM =
+ allOf(hasAnyParameter(DoublePointerParamM),
+ anyOf(forEachDescendant(BlockPassedToMarkedFuncM),
+ forEachDescendant(WritesOrCapturesInPoolM)));
auto MatcherM = decl(anyOf(
objcMethodDecl(HasParamAndWritesInMarkedFuncM).bind(IsMethodBind),
@@ -207,6 +233,6 @@ void ento::registerAutoreleaseWriteChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCAutoreleaseWriteChecker>();
}
-bool ento::shouldRegisterAutoreleaseWriteChecker(const LangOptions &LO) {
+bool ento::shouldRegisterAutoreleaseWriteChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index 4450c464f89d..8428b2294ba6 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -172,6 +172,6 @@ void ento::registerObjCContainersASTChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCContainersASTChecker>();
}
-bool ento::shouldRegisterObjCContainersASTChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCContainersASTChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 8abb926d4862..8c2008a7ceb4 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -58,7 +58,7 @@ public:
PointerEscapeKind Kind) const;
void printState(raw_ostream &OS, ProgramStateRef State,
- const char *NL, const char *Sep) const;
+ const char *NL, const char *Sep) const override;
};
} // end anonymous namespace
@@ -188,6 +188,6 @@ void ento::registerObjCContainersChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCContainersChecker>();
}
-bool ento::shouldRegisterObjCContainersChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCContainersChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index 1870c08432de..24e2a4dea922 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -221,7 +221,7 @@ void ento::registerObjCSuperCallChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCSuperCallChecker>();
}
-bool ento::shouldRegisterObjCSuperCallChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCSuperCallChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
index 9a49200545e3..4636fd160511 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
@@ -79,6 +79,6 @@ void ento::registerObjCPropertyChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCPropertyChecker>();
}
-bool ento::shouldRegisterObjCPropertyChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCPropertyChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 344285750f0e..17d3c042ac40 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -437,6 +437,6 @@ void ento::registerObjCSelfInitChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCSelfInitChecker>();
}
-bool ento::shouldRegisterObjCSelfInitChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCSelfInitChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index 0575be845374..3547b7bb61a2 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -116,13 +116,14 @@ void ObjCSuperDeallocChecker::checkPostObjCMessage(const ObjCMethodCall &M,
return;
ProgramStateRef State = C.getState();
- SymbolRef ReceiverSymbol = M.getSelfSVal().getAsSymbol();
- assert(ReceiverSymbol && "No receiver symbol at call to [super dealloc]?");
+ const LocationContext *LC = C.getLocationContext();
+ SymbolRef SelfSymbol = State->getSelfSVal(LC).getAsSymbol();
+ assert(SelfSymbol && "No receiver symbol at call to [super dealloc]?");
// We add this transition in checkPostObjCMessage to avoid warning when
// we inline a call to [super dealloc] where the inlined call itself
// calls [super dealloc].
- State = State->add<CalledSuperDealloc>(ReceiverSymbol);
+ State = State->add<CalledSuperDealloc>(SelfSymbol);
C.addTransition(State);
}
@@ -284,6 +285,6 @@ void ento::registerObjCSuperDeallocChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCSuperDeallocChecker>();
}
-bool ento::shouldRegisterObjCSuperDeallocChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCSuperDeallocChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index cb4770451572..c9828c36a06a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -186,6 +186,6 @@ void ento::registerObjCUnusedIvarsChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCUnusedIvarsChecker>();
}
-bool ento::shouldRegisterObjCUnusedIvarsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterObjCUnusedIvarsChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 4a3c2b8cd40e..0b00664c7c10 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -353,6 +353,6 @@ void ento::registerPaddingChecker(CheckerManager &Mgr) {
Checker, "AllowedPad", "a non-negative value");
}
-bool ento::shouldRegisterPaddingChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPaddingChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index 259f23abdc95..d3e2849a0ce6 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -343,6 +343,6 @@ void ento::registerPointerArithChecker(CheckerManager &mgr) {
mgr.registerChecker<PointerArithChecker>();
}
-bool ento::shouldRegisterPointerArithChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPointerArithChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp
index 307e59b8eebc..8aca6d009cdb 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PointerIterationChecker.cpp
@@ -95,6 +95,7 @@ void ento::registerPointerIterationChecker(CheckerManager &Mgr) {
Mgr.registerChecker<PointerIterationChecker>();
}
-bool ento::shouldRegisterPointerIterationChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPointerIterationChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp
index 586d9d3af2a6..25d87f4acfc9 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PointerSortingChecker.cpp
@@ -54,7 +54,7 @@ static void emitDiagnostics(const BoundNodes &Match, const Decl *D,
OS.str(), Location, Range);
}
-auto callsName(const char *FunctionName) -> decltype(callee(functionDecl())) {
+decltype(auto) callsName(const char *FunctionName) {
return callee(functionDecl(hasName(FunctionName)));
}
@@ -86,8 +86,9 @@ auto matchSortWithPointers() -> decltype(decl()) {
)))
))));
- auto PointerSortM = stmt(callExpr(allOf(SortFuncM, IteratesPointerEltsM))
- ).bind(WarnAtNode);
+ auto PointerSortM = traverse(
+ TK_AsIs,
+ stmt(callExpr(allOf(SortFuncM, IteratesPointerEltsM))).bind(WarnAtNode));
return decl(forEachDescendant(PointerSortM));
}
@@ -108,6 +109,7 @@ void ento::registerPointerSortingChecker(CheckerManager &Mgr) {
Mgr.registerChecker<PointerSortingChecker>();
}
-bool ento::shouldRegisterPointerSortingChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPointerSortingChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index 88d0eb2ae748..81c19d9a0940 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -74,6 +74,6 @@ void ento::registerPointerSubChecker(CheckerManager &mgr) {
mgr.registerChecker<PointerSubChecker>();
}
-bool ento::shouldRegisterPointerSubChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPointerSubChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 8649b8b96dd0..285d2da104f1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -6,8 +6,14 @@
//
//===----------------------------------------------------------------------===//
//
-// This defines PthreadLockChecker, a simple lock -> unlock checker.
-// Also handles XNU locks, which behave similarly enough to share code.
+// This file defines:
+// * PthreadLockChecker, a simple lock -> unlock checker.
+// Which also checks for XNU locks, which behave similarly enough to share
+// code.
+// * FuchsiaLocksChecker, which is also rather similar.
+// * C11LockChecker which also closely follows Pthread semantics.
+//
+// TODO: Path notes.
//
//===----------------------------------------------------------------------===//
@@ -15,8 +21,8 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
using namespace clang;
using namespace ento;
@@ -46,9 +52,7 @@ public:
return LockState(UnlockedAndPossiblyDestroyed);
}
- bool operator==(const LockState &X) const {
- return K == X.K;
- }
+ bool operator==(const LockState &X) const { return K == X.K; }
bool isLocked() const { return K == Locked; }
bool isUnlocked() const { return K == Unlocked; }
@@ -60,40 +64,182 @@ public:
return K == UnlockedAndPossiblyDestroyed;
}
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(K);
- }
+ void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(K); }
};
-class PthreadLockChecker
- : public Checker<check::PostStmt<CallExpr>, check::DeadSymbols> {
- mutable std::unique_ptr<BugType> BT_doublelock;
- mutable std::unique_ptr<BugType> BT_doubleunlock;
- mutable std::unique_ptr<BugType> BT_destroylock;
- mutable std::unique_ptr<BugType> BT_initlock;
- mutable std::unique_ptr<BugType> BT_lor;
- enum LockingSemantics {
- NotApplicable = 0,
- PthreadSemantics,
- XNUSemantics
- };
+class PthreadLockChecker : public Checker<check::PostCall, check::DeadSymbols,
+ check::RegionChanges> {
public:
- void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
- void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
- void printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) const override;
+ enum LockingSemantics { NotApplicable = 0, PthreadSemantics, XNUSemantics };
+ enum CheckerKind {
+ CK_PthreadLockChecker,
+ CK_FuchsiaLockChecker,
+ CK_C11LockChecker,
+ CK_NumCheckKinds
+ };
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckerNameRef CheckNames[CK_NumCheckKinds];
+
+private:
+ typedef void (PthreadLockChecker::*FnCheck)(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkkind) const;
+ CallDescriptionMap<FnCheck> PThreadCallbacks = {
+ // Init.
+ {{"pthread_mutex_init", 2}, &PthreadLockChecker::InitAnyLock},
+ // TODO: pthread_rwlock_init(2 arguments).
+ // TODO: lck_mtx_init(3 arguments).
+ // TODO: lck_mtx_alloc_init(2 arguments) => returns the mutex.
+ // TODO: lck_rw_init(3 arguments).
+ // TODO: lck_rw_alloc_init(2 arguments) => returns the mutex.
+
+ // Acquire.
+ {{"pthread_mutex_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"pthread_rwlock_rdlock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"pthread_rwlock_wrlock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"lck_mtx_lock", 1}, &PthreadLockChecker::AcquireXNULock},
+ {{"lck_rw_lock_exclusive", 1}, &PthreadLockChecker::AcquireXNULock},
+ {{"lck_rw_lock_shared", 1}, &PthreadLockChecker::AcquireXNULock},
+
+ // Try.
+ {{"pthread_mutex_trylock", 1}, &PthreadLockChecker::TryPthreadLock},
+ {{"pthread_rwlock_tryrdlock", 1}, &PthreadLockChecker::TryPthreadLock},
+ {{"pthread_rwlock_trywrlock", 1}, &PthreadLockChecker::TryPthreadLock},
+ {{"lck_mtx_try_lock", 1}, &PthreadLockChecker::TryXNULock},
+ {{"lck_rw_try_lock_exclusive", 1}, &PthreadLockChecker::TryXNULock},
+ {{"lck_rw_try_lock_shared", 1}, &PthreadLockChecker::TryXNULock},
+
+ // Release.
+ {{"pthread_mutex_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"pthread_rwlock_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"lck_mtx_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"lck_rw_unlock_exclusive", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"lck_rw_unlock_shared", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"lck_rw_done", 1}, &PthreadLockChecker::ReleaseAnyLock},
+
+ // Destroy.
+ {{"pthread_mutex_destroy", 1}, &PthreadLockChecker::DestroyPthreadLock},
+ {{"lck_mtx_destroy", 2}, &PthreadLockChecker::DestroyXNULock},
+ // TODO: pthread_rwlock_destroy(1 argument).
+ // TODO: lck_rw_destroy(2 arguments).
+ };
- void AcquireLock(CheckerContext &C, const CallExpr *CE, SVal lock,
- bool isTryLock, enum LockingSemantics semantics) const;
+ CallDescriptionMap<FnCheck> FuchsiaCallbacks = {
+ // Init.
+ {{"spin_lock_init", 1}, &PthreadLockChecker::InitAnyLock},
+
+ // Acquire.
+ {{"spin_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"spin_lock_save", 3}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"sync_mutex_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{"sync_mutex_lock_with_waiter", 1},
+ &PthreadLockChecker::AcquirePthreadLock},
+
+ // Try.
+ {{"spin_trylock", 1}, &PthreadLockChecker::TryFuchsiaLock},
+ {{"sync_mutex_trylock", 1}, &PthreadLockChecker::TryFuchsiaLock},
+ {{"sync_mutex_timedlock", 2}, &PthreadLockChecker::TryFuchsiaLock},
+
+ // Release.
+ {{"spin_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"spin_unlock_restore", 3}, &PthreadLockChecker::ReleaseAnyLock},
+ {{"sync_mutex_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ };
+
+ CallDescriptionMap<FnCheck> C11Callbacks = {
+ // Init.
+ {{"mtx_init", 2}, &PthreadLockChecker::InitAnyLock},
+
+ // Acquire.
+ {{"mtx_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+
+ // Try.
+ {{"mtx_trylock", 1}, &PthreadLockChecker::TryC11Lock},
+ {{"mtx_timedlock", 2}, &PthreadLockChecker::TryC11Lock},
+
+ // Release.
+ {{"mtx_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+
+ // Destroy
+ {{"mtx_destroy", 1}, &PthreadLockChecker::DestroyPthreadLock},
+ };
- void ReleaseLock(CheckerContext &C, const CallExpr *CE, SVal lock) const;
- void DestroyLock(CheckerContext &C, const CallExpr *CE, SVal Lock,
- enum LockingSemantics semantics) const;
- void InitLock(CheckerContext &C, const CallExpr *CE, SVal Lock) const;
- void reportUseDestroyedBug(CheckerContext &C, const CallExpr *CE) const;
ProgramStateRef resolvePossiblyDestroyedMutex(ProgramStateRef state,
const MemRegion *lockR,
const SymbolRef *sym) const;
+ void reportUseDestroyedBug(const CallEvent &Call, CheckerContext &C,
+ unsigned ArgNo, CheckerKind checkKind) const;
+
+ // Init.
+ void InitAnyLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void InitLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
+ SVal Lock, CheckerKind checkkind) const;
+
+ // Lock, Try-lock.
+ void AcquirePthreadLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void AcquireXNULock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void TryPthreadLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void TryXNULock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void TryFuchsiaLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void TryC11Lock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void AcquireLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
+ SVal lock, bool isTryLock, LockingSemantics semantics,
+ CheckerKind checkkind) const;
+
+ // Release.
+ void ReleaseAnyLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void ReleaseLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
+ SVal lock, CheckerKind checkkind) const;
+
+ // Destroy.
+ void DestroyPthreadLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void DestroyXNULock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkkind) const;
+ void DestroyLockAux(const CallEvent &Call, CheckerContext &C, unsigned ArgNo,
+ SVal Lock, LockingSemantics semantics,
+ CheckerKind checkkind) const;
+
+public:
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef State, const InvalidatedSymbols *Symbols,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const LocationContext *LCtx, const CallEvent *Call) const;
+ void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const char *Sep) const override;
+
+private:
+ mutable std::unique_ptr<BugType> BT_doublelock[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_doubleunlock[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_destroylock[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_initlock[CK_NumCheckKinds];
+ mutable std::unique_ptr<BugType> BT_lor[CK_NumCheckKinds];
+
+ void initBugType(CheckerKind checkKind) const {
+ if (BT_doublelock[checkKind])
+ return;
+ BT_doublelock[checkKind].reset(
+ new BugType{CheckNames[checkKind], "Double locking", "Lock checker"});
+ BT_doubleunlock[checkKind].reset(
+ new BugType{CheckNames[checkKind], "Double unlocking", "Lock checker"});
+ BT_destroylock[checkKind].reset(new BugType{
+ CheckNames[checkKind], "Use destroyed lock", "Lock checker"});
+ BT_initlock[checkKind].reset(new BugType{
+ CheckNames[checkKind], "Init invalid lock", "Lock checker"});
+ BT_lor[checkKind].reset(new BugType{CheckNames[checkKind],
+ "Lock order reversal", "Lock checker"});
+ }
};
} // end anonymous namespace
@@ -106,43 +252,23 @@ REGISTER_MAP_WITH_PROGRAMSTATE(LockMap, const MemRegion *, LockState)
// Return values for unresolved calls to pthread_mutex_destroy().
REGISTER_MAP_WITH_PROGRAMSTATE(DestroyRetVal, const MemRegion *, SymbolRef)
-void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
+void PthreadLockChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
- StringRef FName = C.getCalleeName(CE);
- if (FName.empty())
+ // An additional umbrella check that all functions modeled by this checker
+ // are global C functions.
+ // TODO: Maybe make this the default behavior of CallDescription
+ // with exactly one identifier?
+ // FIXME: Try to handle cases when the implementation was inlined rather
+ // than just giving up.
+ if (!Call.isGlobalCFunction() || C.wasInlined)
return;
- if (CE->getNumArgs() != 1 && CE->getNumArgs() != 2)
- return;
-
- if (FName == "pthread_mutex_lock" ||
- FName == "pthread_rwlock_rdlock" ||
- FName == "pthread_rwlock_wrlock")
- AcquireLock(C, CE, C.getSVal(CE->getArg(0)), false, PthreadSemantics);
- else if (FName == "lck_mtx_lock" ||
- FName == "lck_rw_lock_exclusive" ||
- FName == "lck_rw_lock_shared")
- AcquireLock(C, CE, C.getSVal(CE->getArg(0)), false, XNUSemantics);
- else if (FName == "pthread_mutex_trylock" ||
- FName == "pthread_rwlock_tryrdlock" ||
- FName == "pthread_rwlock_trywrlock")
- AcquireLock(C, CE, C.getSVal(CE->getArg(0)),
- true, PthreadSemantics);
- else if (FName == "lck_mtx_try_lock" ||
- FName == "lck_rw_try_lock_exclusive" ||
- FName == "lck_rw_try_lock_shared")
- AcquireLock(C, CE, C.getSVal(CE->getArg(0)), true, XNUSemantics);
- else if (FName == "pthread_mutex_unlock" ||
- FName == "pthread_rwlock_unlock" ||
- FName == "lck_mtx_unlock" ||
- FName == "lck_rw_done")
- ReleaseLock(C, CE, C.getSVal(CE->getArg(0)));
- else if (FName == "pthread_mutex_destroy")
- DestroyLock(C, CE, C.getSVal(CE->getArg(0)), PthreadSemantics);
- else if (FName == "lck_mtx_destroy")
- DestroyLock(C, CE, C.getSVal(CE->getArg(0)), XNUSemantics);
- else if (FName == "pthread_mutex_init")
- InitLock(C, CE, C.getSVal(CE->getArg(0)));
+ if (const FnCheck *Callback = PThreadCallbacks.lookup(Call))
+ (this->**Callback)(Call, C, CK_PthreadLockChecker);
+ else if (const FnCheck *Callback = FuchsiaCallbacks.lookup(Call))
+ (this->**Callback)(Call, C, CK_FuchsiaLockChecker);
+ else if (const FnCheck *Callback = C11Callbacks.lookup(Call))
+ (this->**Callback)(Call, C, CK_C11LockChecker);
}
// When a lock is destroyed, in some semantics(like PthreadSemantics) we are not
@@ -204,7 +330,7 @@ void PthreadLockChecker::printState(raw_ostream &Out, ProgramStateRef State,
LockSetTy LS = State->get<LockSet>();
if (!LS.isEmpty()) {
Out << Sep << "Mutex lock order:" << NL;
- for (auto I: LS) {
+ for (auto I : LS) {
I->dumpToStream(Out);
Out << NL;
}
@@ -213,9 +339,53 @@ void PthreadLockChecker::printState(raw_ostream &Out, ProgramStateRef State,
// TODO: Dump destroyed mutex symbols?
}
-void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
- SVal lock, bool isTryLock,
- enum LockingSemantics semantics) const {
+void PthreadLockChecker::AcquirePthreadLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), false, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::AcquireXNULock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), false, XNUSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::TryPthreadLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::TryXNULock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::TryFuchsiaLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::TryC11Lock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkKind) const {
+ AcquireLockAux(Call, C, 0, Call.getArgSVal(0), true, PthreadSemantics,
+ checkKind);
+}
+
+void PthreadLockChecker::AcquireLockAux(const CallEvent &Call,
+ CheckerContext &C, unsigned ArgNo,
+ SVal lock, bool isTryLock,
+ enum LockingSemantics semantics,
+ CheckerKind checkKind) const {
+ if (!ChecksEnabled[checkKind])
+ return;
const MemRegion *lockR = lock.getAsRegion();
if (!lockR)
@@ -226,27 +396,19 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
if (sym)
state = resolvePossiblyDestroyedMutex(state, lockR, sym);
- SVal X = C.getSVal(CE);
- if (X.isUnknownOrUndef())
- return;
-
- DefinedSVal retVal = X.castAs<DefinedSVal>();
-
if (const LockState *LState = state->get<LockMap>(lockR)) {
if (LState->isLocked()) {
- if (!BT_doublelock)
- BT_doublelock.reset(new BugType(this, "Double locking",
- "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
+ initBugType(checkKind);
auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_doublelock, "This lock has already been acquired", N);
- report->addRange(CE->getArg(0)->getSourceRange());
+ *BT_doublelock[checkKind], "This lock has already been acquired", N);
+ report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(report));
return;
} else if (LState->isDestroyed()) {
- reportUseDestroyedBug(C, CE);
+ reportUseDestroyedBug(Call, C, ArgNo, checkKind);
return;
}
}
@@ -254,25 +416,35 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
ProgramStateRef lockSucc = state;
if (isTryLock) {
// Bifurcate the state, and allow a mode where the lock acquisition fails.
- ProgramStateRef lockFail;
- switch (semantics) {
- case PthreadSemantics:
- std::tie(lockFail, lockSucc) = state->assume(retVal);
- break;
- case XNUSemantics:
- std::tie(lockSucc, lockFail) = state->assume(retVal);
- break;
- default:
- llvm_unreachable("Unknown tryLock locking semantics");
+ SVal RetVal = Call.getReturnValue();
+ if (auto DefinedRetVal = RetVal.getAs<DefinedSVal>()) {
+ ProgramStateRef lockFail;
+ switch (semantics) {
+ case PthreadSemantics:
+ std::tie(lockFail, lockSucc) = state->assume(*DefinedRetVal);
+ break;
+ case XNUSemantics:
+ std::tie(lockSucc, lockFail) = state->assume(*DefinedRetVal);
+ break;
+ default:
+ llvm_unreachable("Unknown tryLock locking semantics");
+ }
+ assert(lockFail && lockSucc);
+ C.addTransition(lockFail);
}
- assert(lockFail && lockSucc);
- C.addTransition(lockFail);
-
+ // We might want to handle the case when the mutex lock function was inlined
+ // and returned an Unknown or Undefined value.
} else if (semantics == PthreadSemantics) {
// Assume that the return value was 0.
- lockSucc = state->assume(retVal, false);
- assert(lockSucc);
-
+ SVal RetVal = Call.getReturnValue();
+ if (auto DefinedRetVal = RetVal.getAs<DefinedSVal>()) {
+ // FIXME: If the lock function was inlined and returned true,
+ // we need to behave sanely - at least generate sink.
+ lockSucc = state->assume(*DefinedRetVal, false);
+ assert(lockSucc);
+ }
+ // We might want to handle the case when the mutex lock function was inlined
+ // and returned an Unknown or Undefined value.
} else {
// XNU locking semantics return void on non-try locks
assert((semantics == XNUSemantics) && "Unknown locking semantics");
@@ -285,8 +457,18 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
C.addTransition(lockSucc);
}
-void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
- SVal lock) const {
+void PthreadLockChecker::ReleaseAnyLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ ReleaseLockAux(Call, C, 0, Call.getArgSVal(0), checkKind);
+}
+
+void PthreadLockChecker::ReleaseLockAux(const CallEvent &Call,
+ CheckerContext &C, unsigned ArgNo,
+ SVal lock,
+ CheckerKind checkKind) const {
+ if (!ChecksEnabled[checkKind])
+ return;
const MemRegion *lockR = lock.getAsRegion();
if (!lockR)
@@ -299,39 +481,37 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
if (const LockState *LState = state->get<LockMap>(lockR)) {
if (LState->isUnlocked()) {
- if (!BT_doubleunlock)
- BT_doubleunlock.reset(new BugType(this, "Double unlocking",
- "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
+ initBugType(checkKind);
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_doubleunlock, "This lock has already been unlocked", N);
- Report->addRange(CE->getArg(0)->getSourceRange());
+ *BT_doubleunlock[checkKind], "This lock has already been unlocked",
+ N);
+ Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(Report));
return;
} else if (LState->isDestroyed()) {
- reportUseDestroyedBug(C, CE);
+ reportUseDestroyedBug(Call, C, ArgNo, checkKind);
return;
}
}
LockSetTy LS = state->get<LockSet>();
- // FIXME: Better analysis requires IPA for wrappers.
-
if (!LS.isEmpty()) {
const MemRegion *firstLockR = LS.getHead();
if (firstLockR != lockR) {
- if (!BT_lor)
- BT_lor.reset(new BugType(this, "Lock order reversal", "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
+ initBugType(checkKind);
auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_lor, "This was not the most recently acquired lock. Possible "
- "lock order reversal", N);
- report->addRange(CE->getArg(0)->getSourceRange());
+ *BT_lor[checkKind],
+ "This was not the most recently acquired lock. Possible "
+ "lock order reversal",
+ N);
+ report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(report));
return;
}
@@ -343,9 +523,25 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
C.addTransition(state);
}
-void PthreadLockChecker::DestroyLock(CheckerContext &C, const CallExpr *CE,
- SVal Lock,
- enum LockingSemantics semantics) const {
+void PthreadLockChecker::DestroyPthreadLock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ DestroyLockAux(Call, C, 0, Call.getArgSVal(0), PthreadSemantics, checkKind);
+}
+
+void PthreadLockChecker::DestroyXNULock(const CallEvent &Call,
+ CheckerContext &C,
+ CheckerKind checkKind) const {
+ DestroyLockAux(Call, C, 0, Call.getArgSVal(0), XNUSemantics, checkKind);
+}
+
+void PthreadLockChecker::DestroyLockAux(const CallEvent &Call,
+ CheckerContext &C, unsigned ArgNo,
+ SVal Lock,
+ enum LockingSemantics semantics,
+ CheckerKind checkKind) const {
+ if (!ChecksEnabled[checkKind])
+ return;
const MemRegion *LockR = Lock.getAsRegion();
if (!LockR)
@@ -362,7 +558,7 @@ void PthreadLockChecker::DestroyLock(CheckerContext &C, const CallExpr *CE,
// PthreadSemantics
if (semantics == PthreadSemantics) {
if (!LState || LState->isUnlocked()) {
- SymbolRef sym = C.getSVal(CE).getAsSymbol();
+ SymbolRef sym = Call.getReturnValue().getAsSymbol();
if (!sym) {
State = State->remove<LockMap>(LockR);
C.addTransition(State);
@@ -393,20 +589,26 @@ void PthreadLockChecker::DestroyLock(CheckerContext &C, const CallExpr *CE,
Message = "This lock has already been destroyed";
}
- if (!BT_destroylock)
- BT_destroylock.reset(new BugType(this, "Destroy invalid lock",
- "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
- auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_destroylock, Message, N);
- Report->addRange(CE->getArg(0)->getSourceRange());
+ initBugType(checkKind);
+ auto Report = std::make_unique<PathSensitiveBugReport>(
+ *BT_destroylock[checkKind], Message, N);
+ Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(Report));
}
-void PthreadLockChecker::InitLock(CheckerContext &C, const CallExpr *CE,
- SVal Lock) const {
+void PthreadLockChecker::InitAnyLock(const CallEvent &Call, CheckerContext &C,
+ CheckerKind checkKind) const {
+ InitLockAux(Call, C, 0, Call.getArgSVal(0), checkKind);
+}
+
+void PthreadLockChecker::InitLockAux(const CallEvent &Call, CheckerContext &C,
+ unsigned ArgNo, SVal Lock,
+ CheckerKind checkKind) const {
+ if (!ChecksEnabled[checkKind])
+ return;
const MemRegion *LockR = Lock.getAsRegion();
if (!LockR)
@@ -433,29 +635,27 @@ void PthreadLockChecker::InitLock(CheckerContext &C, const CallExpr *CE,
Message = "This lock has already been initialized";
}
- if (!BT_initlock)
- BT_initlock.reset(new BugType(this, "Init invalid lock",
- "Lock checker"));
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
- auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_initlock, Message, N);
- Report->addRange(CE->getArg(0)->getSourceRange());
+ initBugType(checkKind);
+ auto Report = std::make_unique<PathSensitiveBugReport>(
+ *BT_initlock[checkKind], Message, N);
+ Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(Report));
}
-void PthreadLockChecker::reportUseDestroyedBug(CheckerContext &C,
- const CallExpr *CE) const {
- if (!BT_destroylock)
- BT_destroylock.reset(new BugType(this, "Use destroyed lock",
- "Lock checker"));
+void PthreadLockChecker::reportUseDestroyedBug(const CallEvent &Call,
+ CheckerContext &C,
+ unsigned ArgNo,
+ CheckerKind checkKind) const {
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
+ initBugType(checkKind);
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_destroylock, "This lock has already been destroyed", N);
- Report->addRange(CE->getArg(0)->getSourceRange());
+ *BT_destroylock[checkKind], "This lock has already been destroyed", N);
+ Report->addRange(Call.getArgExpr(ArgNo)->getSourceRange());
C.emitReport(std::move(Report));
}
@@ -463,26 +663,80 @@ void PthreadLockChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- // TODO: Clean LockMap when a mutex region dies.
-
- DestroyRetValTy TrackedSymbols = State->get<DestroyRetVal>();
- for (DestroyRetValTy::iterator I = TrackedSymbols.begin(),
- E = TrackedSymbols.end();
- I != E; ++I) {
- const SymbolRef Sym = I->second;
- const MemRegion *lockR = I->first;
- bool IsSymDead = SymReaper.isDead(Sym);
- // Remove the dead symbol from the return value symbols map.
- if (IsSymDead)
- State = resolvePossiblyDestroyedMutex(State, lockR, &Sym);
+ for (auto I : State->get<DestroyRetVal>()) {
+ // Once the return value symbol dies, no more checks can be performed
+ // against it. See if the return value was checked before this point.
+ // This would remove the symbol from the map as well.
+ if (SymReaper.isDead(I.second))
+ State = resolvePossiblyDestroyedMutex(State, I.first, &I.second);
+ }
+
+ for (auto I : State->get<LockMap>()) {
+ // Stop tracking dead mutex regions as well.
+ if (!SymReaper.isLiveRegion(I.first))
+ State = State->remove<LockMap>(I.first);
}
+
+ // TODO: We probably need to clean up the lock stack as well.
+ // It is tricky though: even if the mutex cannot be unlocked anymore,
+ // it can still participate in lock order reversal resolution.
+
C.addTransition(State);
}
-void ento::registerPthreadLockChecker(CheckerManager &mgr) {
- mgr.registerChecker<PthreadLockChecker>();
+ProgramStateRef PthreadLockChecker::checkRegionChanges(
+ ProgramStateRef State, const InvalidatedSymbols *Symbols,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions, const LocationContext *LCtx,
+ const CallEvent *Call) const {
+
+ bool IsLibraryFunction = false;
+ if (Call && Call->isGlobalCFunction()) {
+ // Avoid invalidating mutex state when a known supported function is called.
+ if (PThreadCallbacks.lookup(*Call) || FuchsiaCallbacks.lookup(*Call) ||
+ C11Callbacks.lookup(*Call))
+ return State;
+
+ if (Call->isInSystemHeader())
+ IsLibraryFunction = true;
+ }
+
+ for (auto R : Regions) {
+ // We assume that system library function wouldn't touch the mutex unless
+ // it takes the mutex explicitly as an argument.
+ // FIXME: This is a bit quadratic.
+ if (IsLibraryFunction &&
+ std::find(ExplicitRegions.begin(), ExplicitRegions.end(), R) ==
+ ExplicitRegions.end())
+ continue;
+
+ State = State->remove<LockMap>(R);
+ State = State->remove<DestroyRetVal>(R);
+
+ // TODO: We need to invalidate the lock stack as well. This is tricky
+ // to implement correctly and efficiently though, because the effects
+ // of mutex escapes on lock order may be fairly varied.
+ }
+
+ return State;
}
-bool ento::shouldRegisterPthreadLockChecker(const LangOptions &LO) {
- return true;
+void ento::registerPthreadLockBase(CheckerManager &mgr) {
+ mgr.registerChecker<PthreadLockChecker>();
}
+
+bool ento::shouldRegisterPthreadLockBase(const CheckerManager &mgr) { return true; }
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &mgr) { \
+ PthreadLockChecker *checker = mgr.getChecker<PthreadLockChecker>(); \
+ checker->ChecksEnabled[PthreadLockChecker::CK_##name] = true; \
+ checker->CheckNames[PthreadLockChecker::CK_##name] = \
+ mgr.getCurrentCheckerName(); \
+ } \
+ \
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
+
+REGISTER_CHECKER(PthreadLockChecker)
+REGISTER_CHECKER(FuchsiaLockChecker)
+REGISTER_CHECKER(C11LockChecker)
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
index 6f8cb1432bb1..3f3267ff9391 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -12,12 +12,12 @@
//===----------------------------------------------------------------------===//
#include "RetainCountChecker.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
using namespace clang;
using namespace ento;
using namespace retaincountchecker;
-using llvm::StrInStrNoCase;
REGISTER_MAP_WITH_PROGRAMSTATE(RefBindings, SymbolRef, RefVal)
@@ -701,7 +701,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
for (ProgramStateRef St : Out) {
if (DeallocSent) {
- C.addTransition(St, C.getPredecessor(), &DeallocSentTag);
+ C.addTransition(St, C.getPredecessor(), &getDeallocSentTag());
} else {
C.addTransition(St);
}
@@ -844,13 +844,13 @@ RetainCountChecker::errorKindToBugKind(RefVal::Kind ErrorKind,
SymbolRef Sym) const {
switch (ErrorKind) {
case RefVal::ErrorUseAfterRelease:
- return useAfterRelease;
+ return *UseAfterRelease;
case RefVal::ErrorReleaseNotOwned:
- return releaseNotOwned;
+ return *ReleaseNotOwned;
case RefVal::ErrorDeallocNotOwned:
if (Sym->getType()->getPointeeCXXRecordDecl())
- return freeNotOwned;
- return deallocNotOwned;
+ return *FreeNotOwned;
+ return *DeallocNotOwned;
default:
llvm_unreachable("Unhandled error.");
}
@@ -946,7 +946,7 @@ bool RetainCountChecker::evalCall(const CallEvent &Call,
// Assume that output is zero on the other branch.
NullOutputState = NullOutputState->BindExpr(
CE, LCtx, C.getSValBuilder().makeNull(), /*Invalidate=*/false);
- C.addTransition(NullOutputState, &CastFailTag);
+ C.addTransition(NullOutputState, &getCastFailTag());
// And on the original branch assume that both input and
// output are non-zero.
@@ -1095,7 +1095,7 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
if (N) {
const LangOptions &LOpts = C.getASTContext().getLangOpts();
auto R =
- std::make_unique<RefLeakReport>(leakAtReturn, LOpts, N, Sym, C);
+ std::make_unique<RefLeakReport>(*LeakAtReturn, LOpts, N, Sym, C);
C.emitReport(std::move(R));
}
return N;
@@ -1120,7 +1120,7 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
if (N) {
auto R = std::make_unique<RefCountReport>(
- returnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym);
+ *ReturnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym);
C.emitReport(std::move(R));
}
return N;
@@ -1273,8 +1273,8 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
os << "has a +" << V.getCount() << " retain count";
const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
- auto R = std::make_unique<RefCountReport>(overAutorelease, LOpts, N, Sym,
- os.str());
+ auto R = std::make_unique<RefCountReport>(*OverAutorelease, LOpts, N, Sym,
+ os.str());
Ctx.emitReport(std::move(R));
}
@@ -1320,7 +1320,7 @@ RetainCountChecker::processLeaks(ProgramStateRef state,
if (N) {
for (SymbolRef L : Leaked) {
- const RefCountBug &BT = Pred ? leakWithinFunction : leakAtReturn;
+ const RefCountBug &BT = Pred ? *LeakWithinFunction : *LeakAtReturn;
Ctx.emitReport(std::make_unique<RefLeakReport>(BT, LOpts, N, L, Ctx));
}
}
@@ -1473,48 +1473,73 @@ void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
// Checker registration.
//===----------------------------------------------------------------------===//
+std::unique_ptr<CheckerProgramPointTag> RetainCountChecker::DeallocSentTag;
+std::unique_ptr<CheckerProgramPointTag> RetainCountChecker::CastFailTag;
+
void ento::registerRetainCountBase(CheckerManager &Mgr) {
- Mgr.registerChecker<RetainCountChecker>();
+ auto *Chk = Mgr.registerChecker<RetainCountChecker>();
+ Chk->DeallocSentTag =
+ std::make_unique<CheckerProgramPointTag>(Chk, "DeallocSent");
+ Chk->CastFailTag =
+ std::make_unique<CheckerProgramPointTag>(Chk, "DynamicCastFail");
}
-bool ento::shouldRegisterRetainCountBase(const LangOptions &LO) {
+bool ento::shouldRegisterRetainCountBase(const CheckerManager &mgr) {
return true;
}
-
-// FIXME: remove this, hack for backwards compatibility:
-// it should be possible to enable the NS/CF retain count checker as
-// osx.cocoa.RetainCount, and it should be possible to disable
-// osx.OSObjectRetainCount using osx.cocoa.RetainCount:CheckOSObject=false.
-static bool getOption(AnalyzerOptions &Options,
- StringRef Postfix,
- StringRef Value) {
- auto I = Options.Config.find(
- (StringRef("osx.cocoa.RetainCount:") + Postfix).str());
- if (I != Options.Config.end())
- return I->getValue() == Value;
- return false;
-}
-
void ento::registerRetainCountChecker(CheckerManager &Mgr) {
auto *Chk = Mgr.getChecker<RetainCountChecker>();
Chk->TrackObjCAndCFObjects = true;
- Chk->TrackNSCFStartParam = getOption(Mgr.getAnalyzerOptions(),
- "TrackNSCFStartParam",
- "true");
+ Chk->TrackNSCFStartParam = Mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ Mgr.getCurrentCheckerName(), "TrackNSCFStartParam");
+
+#define INIT_BUGTYPE(KIND) \
+ Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \
+ RefCountBug::KIND);
+ // TODO: Ideally, we should have a checker for each of these bug types.
+ INIT_BUGTYPE(UseAfterRelease)
+ INIT_BUGTYPE(ReleaseNotOwned)
+ INIT_BUGTYPE(DeallocNotOwned)
+ INIT_BUGTYPE(FreeNotOwned)
+ INIT_BUGTYPE(OverAutorelease)
+ INIT_BUGTYPE(ReturnNotOwnedForOwned)
+ INIT_BUGTYPE(LeakWithinFunction)
+ INIT_BUGTYPE(LeakAtReturn)
+#undef INIT_BUGTYPE
}
-bool ento::shouldRegisterRetainCountChecker(const LangOptions &LO) {
+bool ento::shouldRegisterRetainCountChecker(const CheckerManager &mgr) {
return true;
}
void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) {
auto *Chk = Mgr.getChecker<RetainCountChecker>();
- if (!getOption(Mgr.getAnalyzerOptions(),
- "CheckOSObject",
- "false"))
- Chk->TrackOSObjects = true;
+ Chk->TrackOSObjects = true;
+
+ // FIXME: We want bug reports to always have the same checker name associated
+ // with them, yet here, if RetainCountChecker is disabled but
+ // OSObjectRetainCountChecker is enabled, the checker names will be different.
+ // This hack will make it so that the checker name depends on which checker is
+ // enabled rather than on the registration order.
+ // For the most part, we want **non-hidden checkers** to be associated with
+ // diagnostics, and **hidden checker options** with the fine-tuning of
+ // modeling. Following this logic, OSObjectRetainCountChecker should be the
+ // latter, but we can't just remove it for backward compatibility reasons.
+#define LAZY_INIT_BUGTYPE(KIND) \
+ if (!Chk->KIND) \
+ Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \
+ RefCountBug::KIND);
+ LAZY_INIT_BUGTYPE(UseAfterRelease)
+ LAZY_INIT_BUGTYPE(ReleaseNotOwned)
+ LAZY_INIT_BUGTYPE(DeallocNotOwned)
+ LAZY_INIT_BUGTYPE(FreeNotOwned)
+ LAZY_INIT_BUGTYPE(OverAutorelease)
+ LAZY_INIT_BUGTYPE(ReturnNotOwnedForOwned)
+ LAZY_INIT_BUGTYPE(LeakWithinFunction)
+ LAZY_INIT_BUGTYPE(LeakAtReturn)
+#undef LAZY_INIT_BUGTYPE
}
-bool ento::shouldRegisterOSObjectRetainCountChecker(const LangOptions &LO) {
+bool ento::shouldRegisterOSObjectRetainCountChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
index dd79bbef321c..223e28c2c5b8 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
@@ -251,20 +251,20 @@ class RetainCountChecker
eval::Assume,
eval::Call > {
- RefCountBug useAfterRelease{this, RefCountBug::UseAfterRelease};
- RefCountBug releaseNotOwned{this, RefCountBug::ReleaseNotOwned};
- RefCountBug deallocNotOwned{this, RefCountBug::DeallocNotOwned};
- RefCountBug freeNotOwned{this, RefCountBug::FreeNotOwned};
- RefCountBug overAutorelease{this, RefCountBug::OverAutorelease};
- RefCountBug returnNotOwnedForOwned{this, RefCountBug::ReturnNotOwnedForOwned};
- RefCountBug leakWithinFunction{this, RefCountBug::LeakWithinFunction};
- RefCountBug leakAtReturn{this, RefCountBug::LeakAtReturn};
-
- CheckerProgramPointTag DeallocSentTag{this, "DeallocSent"};
- CheckerProgramPointTag CastFailTag{this, "DynamicCastFail"};
+public:
+ std::unique_ptr<RefCountBug> UseAfterRelease;
+ std::unique_ptr<RefCountBug> ReleaseNotOwned;
+ std::unique_ptr<RefCountBug> DeallocNotOwned;
+ std::unique_ptr<RefCountBug> FreeNotOwned;
+ std::unique_ptr<RefCountBug> OverAutorelease;
+ std::unique_ptr<RefCountBug> ReturnNotOwnedForOwned;
+ std::unique_ptr<RefCountBug> LeakWithinFunction;
+ std::unique_ptr<RefCountBug> LeakAtReturn;
mutable std::unique_ptr<RetainSummaryManager> Summaries;
-public:
+
+ static std::unique_ptr<CheckerProgramPointTag> DeallocSentTag;
+ static std::unique_ptr<CheckerProgramPointTag> CastFailTag;
/// Track Objective-C and CoreFoundation objects.
bool TrackObjCAndCFObjects = false;
@@ -360,13 +360,11 @@ public:
CheckerContext &Ctx,
ExplodedNode *Pred = nullptr) const;
- const CheckerProgramPointTag &getDeallocSentTag() const {
- return DeallocSentTag;
+ static const CheckerProgramPointTag &getDeallocSentTag() {
+ return *DeallocSentTag;
}
- const CheckerProgramPointTag &getCastFailTag() const {
- return CastFailTag;
- }
+ static const CheckerProgramPointTag &getCastFailTag() { return *CastFailTag; }
private:
/// Perform the necessary checks and state adjustments at the end of the
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index 9853758f7f2c..1d8ed90f7590 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -18,7 +18,7 @@ using namespace clang;
using namespace ento;
using namespace retaincountchecker;
-StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugType BT) {
+StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugKind BT) {
switch (BT) {
case UseAfterRelease:
return "Use-after-release";
@@ -37,7 +37,7 @@ StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugType BT) {
case LeakAtReturn:
return "Leak of returned object";
}
- llvm_unreachable("Unknown RefCountBugType");
+ llvm_unreachable("Unknown RefCountBugKind");
}
StringRef RefCountBug::getDescription() const {
@@ -60,13 +60,14 @@ StringRef RefCountBug::getDescription() const {
case LeakAtReturn:
return "";
}
- llvm_unreachable("Unknown RefCountBugType");
+ llvm_unreachable("Unknown RefCountBugKind");
}
-RefCountBug::RefCountBug(const CheckerBase *Checker, RefCountBugType BT)
+RefCountBug::RefCountBug(CheckerNameRef Checker, RefCountBugKind BT)
: BugType(Checker, bugTypeToName(BT), categories::MemoryRefCount,
- /*SuppressOnSink=*/BT == LeakWithinFunction || BT == LeakAtReturn),
- BT(BT), Checker(Checker) {}
+ /*SuppressOnSink=*/BT == LeakWithinFunction ||
+ BT == LeakAtReturn),
+ BT(BT) {}
static bool isNumericLiteralExpression(const Expr *E) {
// FIXME: This set of cases was copied from SemaExprObjC.
@@ -84,7 +85,7 @@ static std::string getPrettyTypeName(QualType QT) {
QualType PT = QT->getPointeeType();
if (!PT.isNull() && !QT->getAs<TypedefType>())
if (const auto *RD = PT->getAsCXXRecordDecl())
- return RD->getName();
+ return std::string(RD->getName());
return QT.getAsString();
}
@@ -453,8 +454,6 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
PathSensitiveBugReport &BR) {
const auto &BT = static_cast<const RefCountBug&>(BR.getBugType());
- const auto *Checker =
- static_cast<const RetainCountChecker *>(BT.getChecker());
bool IsFreeUnowned = BT.getBugType() == RefCountBug::FreeNotOwned ||
BT.getBugType() == RefCountBug::DeallocNotOwned;
@@ -545,11 +544,11 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
const ProgramPointTag *Tag = N->getLocation().getTag();
- if (Tag == &Checker->getCastFailTag()) {
+ if (Tag == &RetainCountChecker::getCastFailTag()) {
os << "Assuming dynamic cast returns null due to type mismatch";
}
- if (Tag == &Checker->getDeallocSentTag()) {
+ if (Tag == &RetainCountChecker::getDeallocSentTag()) {
// We only have summaries attached to nodes after evaluating CallExpr and
// ObjCMessageExprs.
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
index e9e277754054..286a8ae2ef7d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
@@ -26,7 +26,7 @@ namespace retaincountchecker {
class RefCountBug : public BugType {
public:
- enum RefCountBugType {
+ enum RefCountBugKind {
UseAfterRelease,
ReleaseNotOwned,
DeallocNotOwned,
@@ -36,21 +36,14 @@ public:
LeakWithinFunction,
LeakAtReturn,
};
- RefCountBug(const CheckerBase *checker, RefCountBugType BT);
+ RefCountBug(CheckerNameRef Checker, RefCountBugKind BT);
StringRef getDescription() const;
- RefCountBugType getBugType() const {
- return BT;
- }
-
- const CheckerBase *getChecker() const {
- return Checker;
- }
+ RefCountBugKind getBugType() const { return BT; }
private:
- RefCountBugType BT;
- const CheckerBase *Checker;
- static StringRef bugTypeToName(RefCountBugType BT);
+ RefCountBugKind BT;
+ static StringRef bugTypeToName(RefCountBugKind BT);
};
class RefCountReport : public PathSensitiveBugReport {
diff --git a/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index abd1a074b487..599d4f306aa1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
@@ -51,15 +52,14 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
// pointer casts.
if (Idx.isZeroConstant())
return;
+
// FIXME: All of this out-of-bounds checking should eventually be refactored
// into a common place.
+ DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
+ state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
- DefinedOrUnknownSVal NumElements
- = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
- ER->getValueType());
-
- ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
@@ -91,6 +91,6 @@ void ento::registerReturnPointerRangeChecker(CheckerManager &mgr) {
mgr.registerChecker<ReturnPointerRangeChecker>();
}
-bool ento::shouldRegisterReturnPointerRangeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterReturnPointerRangeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index fbd15d864424..5266cbf86b44 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -122,6 +122,6 @@ void ento::registerReturnUndefChecker(CheckerManager &mgr) {
mgr.registerChecker<ReturnUndefChecker>();
}
-bool ento::shouldRegisterReturnUndefChecker(const LangOptions &LO) {
+bool ento::shouldRegisterReturnUndefChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
index 103208d8b5a5..14ecede17083 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
@@ -99,13 +99,13 @@ void ReturnValueChecker::checkPostCall(const CallEvent &Call,
std::string Name = getName(Call);
const NoteTag *CallTag = C.getNoteTag(
- [Name, ExpectedValue](BugReport &) -> std::string {
+ [Name, ExpectedValue](PathSensitiveBugReport &) -> std::string {
SmallString<128> Msg;
llvm::raw_svector_ostream Out(Msg);
Out << '\'' << Name << "' returns "
<< (ExpectedValue ? "true" : "false");
- return Out.str();
+ return std::string(Out.str());
},
/*IsPrunable=*/true);
@@ -154,7 +154,7 @@ void ReturnValueChecker::checkEndFunction(const ReturnStmt *RS,
Out << '\'' << Name << "' returns "
<< (ExpectedValue ? "false" : "true");
- return Out.str();
+ return std::string(Out.str());
},
/*IsPrunable=*/false);
@@ -165,6 +165,6 @@ void ento::registerReturnValueChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ReturnValueChecker>();
}
-bool ento::shouldRegisterReturnValueChecker(const LangOptions &LO) {
+bool ento::shouldRegisterReturnValueChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
index 5e305aa709b6..d9dc72ddaa21 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
@@ -203,6 +203,6 @@ void ento::registerRunLoopAutoreleaseLeakChecker(CheckerManager &mgr) {
mgr.registerChecker<RunLoopAutoreleaseLeakChecker>();
}
-bool ento::shouldRegisterRunLoopAutoreleaseLeakChecker(const LangOptions &LO) {
+bool ento::shouldRegisterRunLoopAutoreleaseLeakChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
new file mode 100644
index 000000000000..933e0146ff59
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
@@ -0,0 +1,180 @@
+//===-- STLAlgorithmModeling.cpp -----------------------------------*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Models STL algorithms.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+#include "Iterator.h"
+
+using namespace clang;
+using namespace ento;
+using namespace iterator;
+
+namespace {
+
+class STLAlgorithmModeling : public Checker<eval::Call> {
+ bool evalFind(CheckerContext &C, const CallExpr *CE) const;
+
+ void Find(CheckerContext &C, const CallExpr *CE, unsigned paramNum) const;
+
+ using FnCheck = bool (STLAlgorithmModeling::*)(CheckerContext &,
+ const CallExpr *) const;
+
+ const CallDescriptionMap<FnCheck> Callbacks = {
+ {{{"std", "find"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_if"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_if"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_if_not"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_if_not"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_first_of"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_first_of"}, 5}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_first_of"}, 6}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_end"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_end"}, 5}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "find_end"}, 6}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "lower_bound"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "lower_bound"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "upper_bound"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "upper_bound"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search"}, 3}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search"}, 5}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search"}, 6}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search_n"}, 4}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search_n"}, 5}, &STLAlgorithmModeling::evalFind},
+ {{{"std", "search_n"}, 6}, &STLAlgorithmModeling::evalFind},
+ };
+
+public:
+ STLAlgorithmModeling() = default;
+
+ bool AggressiveStdFindModeling;
+
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+}; //
+
+bool STLAlgorithmModeling::evalCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return false;
+
+ const FnCheck *Handler = Callbacks.lookup(Call);
+ if (!Handler)
+ return false;
+
+ return (this->**Handler)(C, CE);
+}
+
+bool STLAlgorithmModeling::evalFind(CheckerContext &C,
+ const CallExpr *CE) const {
+ // std::find()-like functions either take their primary range in the first
+ // two parameters, or if the first parameter is "execution policy" then in
+ // the second and third. This means that the second parameter must always be
+ // an iterator.
+ if (!isIteratorType(CE->getArg(1)->getType()))
+ return false;
+
+ // If no "execution policy" parameter is used then the first argument is the
+ // beginning of the range.
+ if (isIteratorType(CE->getArg(0)->getType())) {
+ Find(C, CE, 0);
+ return true;
+ }
+
+ // If "execution policy" parameter is used then the second argument is the
+ // beginning of the range.
+ if (isIteratorType(CE->getArg(2)->getType())) {
+ Find(C, CE, 1);
+ return true;
+ }
+
+ return false;
+}
+
+void STLAlgorithmModeling::Find(CheckerContext &C, const CallExpr *CE,
+ unsigned paramNum) const {
+ auto State = C.getState();
+ auto &SVB = C.getSValBuilder();
+ const auto *LCtx = C.getLocationContext();
+
+ SVal RetVal = SVB.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ SVal Param = State->getSVal(CE->getArg(paramNum), LCtx);
+
+ auto StateFound = State->BindExpr(CE, LCtx, RetVal);
+
+ // If we have an iterator position for the range-begin argument then we can
+ // assume that in case of successful search the position of the found element
+ // is not ahead of it.
+ // FIXME: Reverse iterators
+ const auto *Pos = getIteratorPosition(State, Param);
+ if (Pos) {
+ StateFound = createIteratorPosition(StateFound, RetVal, Pos->getContainer(),
+ CE, LCtx, C.blockCount());
+ const auto *NewPos = getIteratorPosition(StateFound, RetVal);
+ assert(NewPos && "Failed to create new iterator position.");
+
+ SVal GreaterOrEqual = SVB.evalBinOp(StateFound, BO_GE,
+ nonloc::SymbolVal(NewPos->getOffset()),
+ nonloc::SymbolVal(Pos->getOffset()),
+ SVB.getConditionType());
+ assert(GreaterOrEqual.getAs<DefinedSVal>() &&
+ "Symbol comparison must be a `DefinedSVal`");
+ StateFound = StateFound->assume(GreaterOrEqual.castAs<DefinedSVal>(), true);
+ }
+
+ Param = State->getSVal(CE->getArg(paramNum + 1), LCtx);
+
+ // If we have an iterator position for the range-end argument then we can
+ // assume that in case of successful search the position of the found element
+ // is ahead of it.
+ // FIXME: Reverse iterators
+ Pos = getIteratorPosition(State, Param);
+ if (Pos) {
+ StateFound = createIteratorPosition(StateFound, RetVal, Pos->getContainer(),
+ CE, LCtx, C.blockCount());
+ const auto *NewPos = getIteratorPosition(StateFound, RetVal);
+ assert(NewPos && "Failed to create new iterator position.");
+
+ SVal Less = SVB.evalBinOp(StateFound, BO_LT,
+ nonloc::SymbolVal(NewPos->getOffset()),
+ nonloc::SymbolVal(Pos->getOffset()),
+ SVB.getConditionType());
+ assert(Less.getAs<DefinedSVal>() &&
+ "Symbol comparison must be a `DefinedSVal`");
+ StateFound = StateFound->assume(Less.castAs<DefinedSVal>(), true);
+ }
+
+ C.addTransition(StateFound);
+
+ if (AggressiveStdFindModeling) {
+ auto StateNotFound = State->BindExpr(CE, LCtx, Param);
+ C.addTransition(StateNotFound);
+ }
+}
+
+} // namespace
+
+void ento::registerSTLAlgorithmModeling(CheckerManager &Mgr) {
+ auto *Checker = Mgr.registerChecker<STLAlgorithmModeling>();
+ Checker->AggressiveStdFindModeling =
+ Mgr.getAnalyzerOptions().getCheckerBooleanOption(Checker,
+ "AggressiveStdFindModeling");
+}
+
+bool ento::shouldRegisterSTLAlgorithmModeling(const CheckerManager &mgr) {
+ return true;
+}
+
diff --git a/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index 8193bcbef4cd..8d380ed1b93d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -271,6 +271,6 @@ void ento::registerSimpleStreamChecker(CheckerManager &mgr) {
}
// This checker should be enabled regardless of how language options are set.
-bool ento::shouldRegisterSimpleStreamChecker(const LangOptions &LO) {
+bool ento::shouldRegisterSimpleStreamChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h b/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
new file mode 100644
index 000000000000..ec43a23e30a9
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
@@ -0,0 +1,33 @@
+//=== SmartPtr.h - Tracking smart pointer state. -------------------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines inter-checker API for the smart pointer modeling. It allows
+// dependent checkers to figure out if an smart pointer is null or not.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SMARTPTR_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SMARTPTR_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+
+namespace clang {
+namespace ento {
+namespace smartptr {
+
+/// Returns true if the event call is on smart pointer.
+bool isStdSmartPtrCall(const CallEvent &Call);
+
+/// Returns whether the smart pointer is null or not.
+bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion);
+
+} // namespace smartptr
+} // namespace ento
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SMARTPTR_H
diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp
new file mode 100644
index 000000000000..7bb25f397d01
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtrChecker.cpp
@@ -0,0 +1,80 @@
+// SmartPtrChecker.cpp - Check for smart pointer dereference - C++ --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that check for null dereference of C++ smart
+// pointer.
+//
+//===----------------------------------------------------------------------===//
+#include "SmartPtr.h"
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class SmartPtrChecker : public Checker<check::PreCall> {
+ BugType NullDereferenceBugType{this, "Null SmartPtr dereference",
+ "C++ Smart Pointer"};
+
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+private:
+ void reportBug(CheckerContext &C, const CallEvent &Call) const;
+};
+} // end of anonymous namespace
+
+void SmartPtrChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!smartptr::isStdSmartPtrCall(Call))
+ return;
+ ProgramStateRef State = C.getState();
+ const auto *OC = dyn_cast<CXXMemberOperatorCall>(&Call);
+ if (!OC)
+ return;
+ const MemRegion *ThisRegion = OC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
+ return;
+
+ OverloadedOperatorKind OOK = OC->getOverloadedOperator();
+ if (OOK == OO_Star || OOK == OO_Arrow) {
+ if (smartptr::isNullSmartPtr(State, ThisRegion))
+ reportBug(C, Call);
+ }
+}
+
+void SmartPtrChecker::reportBug(CheckerContext &C,
+ const CallEvent &Call) const {
+ ExplodedNode *ErrNode = C.generateErrorNode();
+ if (!ErrNode)
+ return;
+
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ NullDereferenceBugType, "Dereference of null smart pointer", ErrNode);
+ C.emitReport(std::move(R));
+}
+
+void ento::registerSmartPtrChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<SmartPtrChecker>();
+}
+
+bool ento::shouldRegisterSmartPtrChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
+ return LO.CPlusPlus;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
index fd372aafa50d..bcc7d4103c1c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -12,27 +12,81 @@
//===----------------------------------------------------------------------===//
#include "Move.h"
+#include "SmartPtr.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
using namespace clang;
using namespace ento;
namespace {
-class SmartPtrModeling : public Checker<eval::Call> {
+class SmartPtrModeling : public Checker<eval::Call, check::DeadSymbols> {
+
bool isNullAfterMoveMethod(const CallEvent &Call) const;
public:
+ // Whether the checker should model for null dereferences of smart pointers.
+ DefaultBool ModelSmartPtrDereference;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+
+private:
+ ProgramStateRef updateTrackedRegion(const CallEvent &Call, CheckerContext &C,
+ const MemRegion *ThisValRegion) const;
+ void handleReset(const CallEvent &Call, CheckerContext &C) const;
+ void handleRelease(const CallEvent &Call, CheckerContext &C) const;
+ void handleSwap(const CallEvent &Call, CheckerContext &C) const;
+
+ using SmartPtrMethodHandlerFn =
+ void (SmartPtrModeling::*)(const CallEvent &Call, CheckerContext &) const;
+ CallDescriptionMap<SmartPtrMethodHandlerFn> SmartPtrMethodHandlers{
+ {{"reset"}, &SmartPtrModeling::handleReset},
+ {{"release"}, &SmartPtrModeling::handleRelease},
+ {{"swap", 1}, &SmartPtrModeling::handleSwap}};
};
} // end of anonymous namespace
+REGISTER_MAP_WITH_PROGRAMSTATE(TrackedRegionMap, const MemRegion *, SVal)
+
+// Define the inter-checker API.
+namespace clang {
+namespace ento {
+namespace smartptr {
+bool isStdSmartPtrCall(const CallEvent &Call) {
+ const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(Call.getDecl());
+ if (!MethodDecl || !MethodDecl->getParent())
+ return false;
+
+ const auto *RecordDecl = MethodDecl->getParent();
+ if (!RecordDecl || !RecordDecl->getDeclContext()->isStdNamespace())
+ return false;
+
+ if (RecordDecl->getDeclName().isIdentifier()) {
+ StringRef Name = RecordDecl->getName();
+ return Name == "shared_ptr" || Name == "unique_ptr" || Name == "weak_ptr";
+ }
+ return false;
+}
+
+bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion) {
+ const auto *InnerPointVal = State->get<TrackedRegionMap>(ThisRegion);
+ return InnerPointVal && InnerPointVal->isZeroConstant();
+}
+} // namespace smartptr
+} // namespace ento
+} // namespace clang
+
bool SmartPtrModeling::isNullAfterMoveMethod(const CallEvent &Call) const {
// TODO: Update CallDescription to support anonymous calls?
// TODO: Handle other methods, such as .get() or .release().
@@ -44,29 +98,136 @@ bool SmartPtrModeling::isNullAfterMoveMethod(const CallEvent &Call) const {
bool SmartPtrModeling::evalCall(const CallEvent &Call,
CheckerContext &C) const {
- if (!isNullAfterMoveMethod(Call))
+
+ if (!smartptr::isStdSmartPtrCall(Call))
return false;
- ProgramStateRef State = C.getState();
- const MemRegion *ThisR =
- cast<CXXInstanceCall>(&Call)->getCXXThisVal().getAsRegion();
+ if (isNullAfterMoveMethod(Call)) {
+ ProgramStateRef State = C.getState();
+ const MemRegion *ThisR =
+ cast<CXXInstanceCall>(&Call)->getCXXThisVal().getAsRegion();
+
+ if (!move::isMovedFrom(State, ThisR)) {
+ // TODO: Model this case as well. At least, avoid invalidation of globals.
+ return false;
+ }
+
+ // TODO: Add a note to bug reports describing this decision.
+ C.addTransition(
+ State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ C.getSValBuilder().makeZeroVal(Call.getResultType())));
+ return true;
+ }
- if (!move::isMovedFrom(State, ThisR)) {
- // TODO: Model this case as well. At least, avoid invalidation of globals.
+ if (!ModelSmartPtrDereference)
return false;
+
+ if (const auto *CC = dyn_cast<CXXConstructorCall>(&Call)) {
+ if (CC->getDecl()->isCopyOrMoveConstructor())
+ return false;
+
+ const MemRegion *ThisValRegion = CC->getCXXThisVal().getAsRegion();
+ if (!ThisValRegion)
+ return false;
+
+ auto State = updateTrackedRegion(Call, C, ThisValRegion);
+ C.addTransition(State);
+ return true;
+ }
+
+ const SmartPtrMethodHandlerFn *Handler = SmartPtrMethodHandlers.lookup(Call);
+ if (!Handler)
+ return false;
+ (this->**Handler)(Call, C);
+
+ return C.isDifferent();
+}
+
+void SmartPtrModeling::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ // Clean up dead regions from the region map.
+ TrackedRegionMapTy TrackedRegions = State->get<TrackedRegionMap>();
+ for (auto E : TrackedRegions) {
+ const MemRegion *Region = E.first;
+ bool IsRegDead = !SymReaper.isLiveRegion(Region);
+
+ if (IsRegDead)
+ State = State->remove<TrackedRegionMap>(Region);
+ }
+ C.addTransition(State);
+}
+
+void SmartPtrModeling::handleReset(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *IC = dyn_cast<CXXInstanceCall>(&Call);
+ if (!IC)
+ return;
+
+ const MemRegion *ThisValRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisValRegion)
+ return;
+ auto State = updateTrackedRegion(Call, C, ThisValRegion);
+ C.addTransition(State);
+ // TODO: Make sure to ivalidate the the region in the Store if we don't have
+ // time to model all methods.
+}
+
+void SmartPtrModeling::handleRelease(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *IC = dyn_cast<CXXInstanceCall>(&Call);
+ if (!IC)
+ return;
+
+ const MemRegion *ThisValRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisValRegion)
+ return;
+
+ auto State = updateTrackedRegion(Call, C, ThisValRegion);
+
+ const auto *InnerPointVal = State->get<TrackedRegionMap>(ThisValRegion);
+ if (InnerPointVal) {
+ State = State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ *InnerPointVal);
+ }
+ C.addTransition(State);
+ // TODO: Add support to enable MallocChecker to start tracking the raw
+ // pointer.
+}
+
+void SmartPtrModeling::handleSwap(const CallEvent &Call,
+ CheckerContext &C) const {
+ // TODO: Add support to handle swap method.
+}
+
+ProgramStateRef
+SmartPtrModeling::updateTrackedRegion(const CallEvent &Call, CheckerContext &C,
+ const MemRegion *ThisValRegion) const {
+ // TODO: Refactor and clean up handling too many things.
+ ProgramStateRef State = C.getState();
+ auto NumArgs = Call.getNumArgs();
+
+ if (NumArgs == 0) {
+ auto NullSVal = C.getSValBuilder().makeNull();
+ State = State->set<TrackedRegionMap>(ThisValRegion, NullSVal);
+ } else if (NumArgs == 1) {
+ auto ArgVal = Call.getArgSVal(0);
+ assert(Call.getArgExpr(0)->getType()->isPointerType() &&
+ "Adding a non pointer value to TrackedRegionMap");
+ State = State->set<TrackedRegionMap>(ThisValRegion, ArgVal);
}
- // TODO: Add a note to bug reports describing this decision.
- C.addTransition(
- State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
- C.getSValBuilder().makeZeroVal(Call.getResultType())));
- return true;
+ return State;
}
void ento::registerSmartPtrModeling(CheckerManager &Mgr) {
- Mgr.registerChecker<SmartPtrModeling>();
+ auto *Checker = Mgr.registerChecker<SmartPtrModeling>();
+ Checker->ModelSmartPtrDereference =
+ Mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ Checker, "ModelSmartPtrDereference");
}
-bool ento::shouldRegisterSmartPtrModeling(const LangOptions &LO) {
+bool ento::shouldRegisterSmartPtrModeling(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index 7285d27495a7..b5c9356322fc 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -43,6 +43,7 @@ public:
};
DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
@@ -156,7 +157,8 @@ void StackAddrEscapeChecker::EmitStackError(CheckerContext &C,
return;
if (!BT_returnstack)
BT_returnstack = std::make_unique<BuiltinBug>(
- this, "Return of address to stack-allocated memory");
+ CheckNames[CK_StackAddrEscapeChecker],
+ "Return of address to stack-allocated memory");
// Generate a report for this bug.
SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
@@ -195,7 +197,8 @@ void StackAddrEscapeChecker::checkAsyncExecutedBlockCaptures(
continue;
if (!BT_capturedstackasync)
BT_capturedstackasync = std::make_unique<BuiltinBug>(
- this, "Address of stack-allocated memory is captured");
+ CheckNames[CK_StackAddrAsyncEscapeChecker],
+ "Address of stack-allocated memory is captured");
SmallString<128> Buf;
llvm::raw_svector_ostream Out(Buf);
SourceRange Range = genName(Out, Region, C.getASTContext());
@@ -218,7 +221,8 @@ void StackAddrEscapeChecker::checkReturnedBlockCaptures(
continue;
if (!BT_capturedstackret)
BT_capturedstackret = std::make_unique<BuiltinBug>(
- this, "Address of stack-allocated memory is captured");
+ CheckNames[CK_StackAddrEscapeChecker],
+ "Address of stack-allocated memory is captured");
SmallString<128> Buf;
llvm::raw_svector_ostream Out(Buf);
SourceRange Range = genName(Out, Region, C.getASTContext());
@@ -277,7 +281,7 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
// The CK_CopyAndAutoreleaseBlockObject cast causes the block to be copied
// so the stack address is not escaping here.
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(RetE)) {
+ if (const auto *ICE = dyn_cast<ImplicitCastExpr>(RetE)) {
if (isa<BlockDataRegion>(R) &&
ICE->getCastKind() == CK_CopyAndAutoreleaseBlockObject) {
return;
@@ -333,7 +337,8 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
if (!BT_stackleak)
BT_stackleak = std::make_unique<BuiltinBug>(
- this, "Stack address stored into global variable",
+ CheckNames[CK_StackAddrEscapeChecker],
+ "Stack address stored into global variable",
"Stack address was saved into a global variable. "
"This is dangerous because the address will become "
"invalid after returning from the function");
@@ -365,20 +370,19 @@ void ento::registerStackAddrEscapeBase(CheckerManager &mgr) {
mgr.registerChecker<StackAddrEscapeChecker>();
}
-bool ento::shouldRegisterStackAddrEscapeBase(const LangOptions &LO) {
+bool ento::shouldRegisterStackAddrEscapeBase(const CheckerManager &mgr) {
return true;
}
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &Mgr) { \
- StackAddrEscapeChecker *Chk = \
- Mgr.getChecker<StackAddrEscapeChecker>(); \
+ StackAddrEscapeChecker *Chk = Mgr.getChecker<StackAddrEscapeChecker>(); \
Chk->ChecksEnabled[StackAddrEscapeChecker::CK_##name] = true; \
+ Chk->CheckNames[StackAddrEscapeChecker::CK_##name] = \
+ Mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name(const LangOptions &LO) { \
- return true; \
- }
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
REGISTER_CHECKER(StackAddrEscapeChecker)
REGISTER_CHECKER(StackAddrAsyncEscapeChecker)
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 2cdee8da375e..8b575f4f4759 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -7,9 +7,8 @@
//===----------------------------------------------------------------------===//
//
// This checker improves modeling of a few simple library functions.
-// It does not generate warnings.
//
-// This checker provides a specification format - `FunctionSummaryTy' - and
+// This checker provides a specification format - `Summary' - and
// contains descriptions of some library functions in this format. Each
// specification contains a list of branches for splitting the program state
// upon call, and range constraints on argument and return-value symbols that
@@ -21,7 +20,7 @@
// consider standard C function `ispunct(int x)', which returns a non-zero value
// iff `x' is a punctuation character, that is, when `x' is in range
// ['!', '/'] [':', '@'] U ['[', '\`'] U ['{', '~'].
-// `FunctionSummaryTy' provides only two branches for this function. However,
+// `Summary' provides only two branches for this function. However,
// any attempt to describe this range with if-statements in the body farm
// would result in many more branches. Because each branch needs to be analyzed
// independently, this significantly reduces performance. Additionally,
@@ -30,13 +29,13 @@
// which may lead to false positives because considering this particular path
// was not consciously intended, and therefore it might have been unreachable.
//
-// This checker uses eval::Call for modeling "pure" functions, for which
-// their `FunctionSummaryTy' is a precise model. This avoids unnecessary
-// invalidation passes. Conflicts with other checkers are unlikely because
-// if the function has no other effects, other checkers would probably never
-// want to improve upon the modeling done by this checker.
+// This checker uses eval::Call for modeling pure functions (functions without
+// side effets), for which their `Summary' is a precise model. This avoids
+// unnecessary invalidation passes. Conflicts with other checkers are unlikely
+// because if the function has no other effects, other checkers would probably
+// never want to improve upon the modeling done by this checker.
//
-// Non-"pure" functions, for which only partial improvement over the default
+// Non-pure functions, for which only partial improvement over the default
// behavior is expected, are modeled via check::PostCall, non-intrusively.
//
// The following standard C functions are currently supported:
@@ -51,203 +50,461 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
using namespace clang;
using namespace clang::ento;
namespace {
-class StdLibraryFunctionsChecker : public Checker<check::PostCall, eval::Call> {
- /// Below is a series of typedefs necessary to define function specs.
- /// We avoid nesting types here because each additional qualifier
- /// would need to be repeated in every function spec.
- struct FunctionSummaryTy;
+class StdLibraryFunctionsChecker
+ : public Checker<check::PreCall, check::PostCall, eval::Call> {
+
+ class Summary;
/// Specify how much the analyzer engine should entrust modeling this function
/// to us. If he doesn't, he performs additional invalidations.
- enum InvalidationKindTy { NoEvalCall, EvalCallAsPure };
-
- /// A pair of ValueRangeKindTy and IntRangeVectorTy would describe a range
- /// imposed on a particular argument or return value symbol.
- ///
- /// Given a range, should the argument stay inside or outside this range?
- /// The special `ComparesToArgument' value indicates that we should
- /// impose a constraint that involves other argument or return value symbols.
- enum ValueRangeKindTy { OutOfRange, WithinRange, ComparesToArgument };
+ enum InvalidationKind { NoEvalCall, EvalCallAsPure };
// The universal integral type to use in value range descriptions.
// Unsigned to make sure overflows are well-defined.
- typedef uint64_t RangeIntTy;
+ typedef uint64_t RangeInt;
/// Normally, describes a single range constraint, eg. {{0, 1}, {3, 4}} is
/// a non-negative integer, which less than 5 and not equal to 2. For
/// `ComparesToArgument', holds information about how exactly to compare to
/// the argument.
- typedef std::vector<std::pair<RangeIntTy, RangeIntTy>> IntRangeVectorTy;
+ typedef std::vector<std::pair<RangeInt, RangeInt>> IntRangeVector;
/// A reference to an argument or return value by its number.
/// ArgNo in CallExpr and CallEvent is defined as Unsigned, but
/// obviously uint32_t should be enough for all practical purposes.
- typedef uint32_t ArgNoTy;
- static const ArgNoTy Ret = std::numeric_limits<ArgNoTy>::max();
-
- /// Incapsulates a single range on a single symbol within a branch.
- class ValueRange {
- ArgNoTy ArgNo; // Argument to which we apply the range.
- ValueRangeKindTy Kind; // Kind of range definition.
- IntRangeVectorTy Args; // Polymorphic arguments.
-
+ typedef uint32_t ArgNo;
+ static const ArgNo Ret;
+
+ class ValueConstraint;
+
+ // Pointer to the ValueConstraint. We need a copyable, polymorphic and
+ // default initialize able type (vector needs that). A raw pointer was good,
+ // however, we cannot default initialize that. unique_ptr makes the Summary
+ // class non-copyable, therefore not an option. Releasing the copyability
+ // requirement would render the initialization of the Summary map infeasible.
+ using ValueConstraintPtr = std::shared_ptr<ValueConstraint>;
+
+ /// Polymorphic base class that represents a constraint on a given argument
+ /// (or return value) of a function. Derived classes implement different kind
+ /// of constraints, e.g range constraints or correlation between two
+ /// arguments.
+ class ValueConstraint {
public:
- ValueRange(ArgNoTy ArgNo, ValueRangeKindTy Kind,
- const IntRangeVectorTy &Args)
- : ArgNo(ArgNo), Kind(Kind), Args(Args) {}
-
- ArgNoTy getArgNo() const { return ArgNo; }
- ValueRangeKindTy getKind() const { return Kind; }
-
- BinaryOperator::Opcode getOpcode() const {
- assert(Kind == ComparesToArgument);
- assert(Args.size() == 1);
- BinaryOperator::Opcode Op =
- static_cast<BinaryOperator::Opcode>(Args[0].first);
- assert(BinaryOperator::isComparisonOp(Op) &&
- "Only comparison ops are supported for ComparesToArgument");
- return Op;
+ ValueConstraint(ArgNo ArgN) : ArgN(ArgN) {}
+ virtual ~ValueConstraint() {}
+ /// Apply the effects of the constraint on the given program state. If null
+ /// is returned then the constraint is not feasible.
+ virtual ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const = 0;
+ virtual ValueConstraintPtr negate() const {
+ llvm_unreachable("Not implemented");
+ };
+
+ // Check whether the constraint is malformed or not. It is malformed if the
+ // specified argument has a mismatch with the given FunctionDecl (e.g. the
+ // arg number is out-of-range of the function's argument list).
+ bool checkValidity(const FunctionDecl *FD) const {
+ const bool ValidArg = ArgN == Ret || ArgN < FD->getNumParams();
+ assert(ValidArg && "Arg out of range!");
+ if (!ValidArg)
+ return false;
+ // Subclasses may further refine the validation.
+ return checkSpecificValidity(FD);
}
+ ArgNo getArgNo() const { return ArgN; }
- ArgNoTy getOtherArgNo() const {
- assert(Kind == ComparesToArgument);
- assert(Args.size() == 1);
- return static_cast<ArgNoTy>(Args[0].second);
+ protected:
+ ArgNo ArgN; // Argument to which we apply the constraint.
+
+ /// Do polymorphic sanity check on the constraint.
+ virtual bool checkSpecificValidity(const FunctionDecl *FD) const {
+ return true;
}
+ };
+
+ /// Given a range, should the argument stay inside or outside this range?
+ enum RangeKind { OutOfRange, WithinRange };
+
+ /// Encapsulates a single range on a single symbol within a branch.
+ class RangeConstraint : public ValueConstraint {
+ RangeKind Kind; // Kind of range definition.
+ IntRangeVector Args; // Polymorphic arguments.
- const IntRangeVectorTy &getRanges() const {
- assert(Kind != ComparesToArgument);
+ public:
+ RangeConstraint(ArgNo ArgN, RangeKind Kind, const IntRangeVector &Args)
+ : ValueConstraint(ArgN), Kind(Kind), Args(Args) {}
+
+ const IntRangeVector &getRanges() const {
return Args;
}
- // We avoid creating a virtual apply() method because
- // it makes initializer lists harder to write.
private:
- ProgramStateRef
- applyAsOutOfRange(ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const;
- ProgramStateRef
- applyAsWithinRange(ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const;
- ProgramStateRef
- applyAsComparesToArgument(ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const;
-
+ ProgramStateRef applyAsOutOfRange(ProgramStateRef State,
+ const CallEvent &Call,
+ const Summary &Summary) const;
+ ProgramStateRef applyAsWithinRange(ProgramStateRef State,
+ const CallEvent &Call,
+ const Summary &Summary) const;
public:
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const {
+ const Summary &Summary,
+ CheckerContext &C) const override {
switch (Kind) {
case OutOfRange:
return applyAsOutOfRange(State, Call, Summary);
case WithinRange:
return applyAsWithinRange(State, Call, Summary);
- case ComparesToArgument:
- return applyAsComparesToArgument(State, Call, Summary);
}
- llvm_unreachable("Unknown ValueRange kind!");
+ llvm_unreachable("Unknown range kind!");
+ }
+
+ ValueConstraintPtr negate() const override {
+ RangeConstraint Tmp(*this);
+ switch (Kind) {
+ case OutOfRange:
+ Tmp.Kind = WithinRange;
+ break;
+ case WithinRange:
+ Tmp.Kind = OutOfRange;
+ break;
+ }
+ return std::make_shared<RangeConstraint>(Tmp);
+ }
+
+ bool checkSpecificValidity(const FunctionDecl *FD) const override {
+ const bool ValidArg =
+ getArgType(FD, ArgN)->isIntegralType(FD->getASTContext());
+ assert(ValidArg &&
+ "This constraint should be applied on an integral type");
+ return ValidArg;
}
};
- /// The complete list of ranges that defines a single branch.
- typedef std::vector<ValueRange> ValueRangeSet;
+ class ComparisonConstraint : public ValueConstraint {
+ BinaryOperator::Opcode Opcode;
+ ArgNo OtherArgN;
+
+ public:
+ ComparisonConstraint(ArgNo ArgN, BinaryOperator::Opcode Opcode,
+ ArgNo OtherArgN)
+ : ValueConstraint(ArgN), Opcode(Opcode), OtherArgN(OtherArgN) {}
+ ArgNo getOtherArgNo() const { return OtherArgN; }
+ BinaryOperator::Opcode getOpcode() const { return Opcode; }
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override;
+ };
+
+ class NotNullConstraint : public ValueConstraint {
+ using ValueConstraint::ValueConstraint;
+ // This variable has a role when we negate the constraint.
+ bool CannotBeNull = true;
- /// Includes information about function prototype (which is necessary to
- /// ensure we're modeling the right function and casting values properly),
- /// approach to invalidation, and a list of branches - essentially, a list
- /// of list of ranges - essentially, a list of lists of lists of segments.
- struct FunctionSummaryTy {
- const std::vector<QualType> ArgTypes;
- const QualType RetType;
- const InvalidationKindTy InvalidationKind;
- const std::vector<ValueRangeSet> Ranges;
+ public:
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ SVal V = getArgSVal(Call, getArgNo());
+ if (V.isUndef())
+ return State;
+
+ DefinedOrUnknownSVal L = V.castAs<DefinedOrUnknownSVal>();
+ if (!L.getAs<Loc>())
+ return State;
+
+ return State->assume(L, CannotBeNull);
+ }
+
+ ValueConstraintPtr negate() const override {
+ NotNullConstraint Tmp(*this);
+ Tmp.CannotBeNull = !this->CannotBeNull;
+ return std::make_shared<NotNullConstraint>(Tmp);
+ }
+
+ bool checkSpecificValidity(const FunctionDecl *FD) const override {
+ const bool ValidArg = getArgType(FD, ArgN)->isPointerType();
+ assert(ValidArg &&
+ "This constraint should be applied only on a pointer type");
+ return ValidArg;
+ }
+ };
+
+ // Represents a buffer argument with an additional size argument.
+ // E.g. the first two arguments here:
+ // ctime_s(char *buffer, rsize_t bufsz, const time_t *time);
+ // Another example:
+ // size_t fread(void *ptr, size_t size, size_t nmemb, FILE *stream);
+ // // Here, ptr is the buffer, and its minimum size is `size * nmemb`.
+ class BufferSizeConstraint : public ValueConstraint {
+ // The argument which holds the size of the buffer.
+ ArgNo SizeArgN;
+ // The argument which is a multiplier to size. This is set in case of
+ // `fread` like functions where the size is computed as a multiplication of
+ // two arguments.
+ llvm::Optional<ArgNo> SizeMultiplierArgN;
+ // The operator we use in apply. This is negated in negate().
+ BinaryOperator::Opcode Op = BO_LE;
+
+ public:
+ BufferSizeConstraint(ArgNo Buffer, ArgNo BufSize)
+ : ValueConstraint(Buffer), SizeArgN(BufSize) {}
+
+ BufferSizeConstraint(ArgNo Buffer, ArgNo BufSize, ArgNo BufSizeMultiplier)
+ : ValueConstraint(Buffer), SizeArgN(BufSize),
+ SizeMultiplierArgN(BufSizeMultiplier) {}
+
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ SValBuilder &SvalBuilder = C.getSValBuilder();
+ // The buffer argument.
+ SVal BufV = getArgSVal(Call, getArgNo());
+ // The size argument.
+ SVal SizeV = getArgSVal(Call, SizeArgN);
+ // Multiply with another argument if given.
+ if (SizeMultiplierArgN) {
+ SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
+ SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
+ Summary.getArgType(SizeArgN));
+ }
+ // The dynamic size of the buffer argument, got from the analyzer engine.
+ SVal BufDynSize = getDynamicSizeWithOffset(State, BufV);
+
+ SVal Feasible = SvalBuilder.evalBinOp(State, Op, SizeV, BufDynSize,
+ SvalBuilder.getContext().BoolTy);
+ if (auto F = Feasible.getAs<DefinedOrUnknownSVal>())
+ return State->assume(*F, true);
+
+ // We can get here only if the size argument or the dynamic size is
+ // undefined. But the dynamic size should never be undefined, only
+ // unknown. So, here, the size of the argument is undefined, i.e. we
+ // cannot apply the constraint. Actually, other checkers like
+ // CallAndMessage should catch this situation earlier, because we call a
+ // function with an uninitialized argument.
+ llvm_unreachable("Size argument or the dynamic size is Undefined");
+ }
+
+ ValueConstraintPtr negate() const override {
+ BufferSizeConstraint Tmp(*this);
+ Tmp.Op = BinaryOperator::negateComparisonOp(Op);
+ return std::make_shared<BufferSizeConstraint>(Tmp);
+ }
+ };
+
+ /// The complete list of constraints that defines a single branch.
+ typedef std::vector<ValueConstraintPtr> ConstraintSet;
+
+ using ArgTypes = std::vector<QualType>;
+
+ // A placeholder type, we use it whenever we do not care about the concrete
+ // type in a Signature.
+ const QualType Irrelevant{};
+ bool static isIrrelevant(QualType T) { return T.isNull(); }
+
+ // The signature of a function we want to describe with a summary. This is a
+ // concessive signature, meaning there may be irrelevant types in the
+ // signature which we do not check against a function with concrete types.
+ struct Signature {
+ const ArgTypes ArgTys;
+ const QualType RetTy;
+ Signature(ArgTypes ArgTys, QualType RetTy) : ArgTys(ArgTys), RetTy(RetTy) {
+ assertRetTypeSuitableForSignature(RetTy);
+ for (size_t I = 0, E = ArgTys.size(); I != E; ++I) {
+ QualType ArgTy = ArgTys[I];
+ assertArgTypeSuitableForSignature(ArgTy);
+ }
+ }
+ bool matches(const FunctionDecl *FD) const;
private:
- static void assertTypeSuitableForSummary(QualType T) {
- assert(!T->isVoidType() &&
- "We should have had no significant void types in the spec");
- assert(T.isCanonical() &&
+ static void assertArgTypeSuitableForSignature(QualType T) {
+ assert((T.isNull() || !T->isVoidType()) &&
+ "We should have no void types in the spec");
+ assert((T.isNull() || T.isCanonical()) &&
+ "We should only have canonical types in the spec");
+ }
+ static void assertRetTypeSuitableForSignature(QualType T) {
+ assert((T.isNull() || T.isCanonical()) &&
"We should only have canonical types in the spec");
- // FIXME: lift this assert (but not the ones above!)
- assert(T->isIntegralOrEnumerationType() &&
- "We only support integral ranges in the spec");
}
+ };
+
+ static QualType getArgType(const FunctionDecl *FD, ArgNo ArgN) {
+ assert(FD && "Function must be set");
+ QualType T = (ArgN == Ret)
+ ? FD->getReturnType().getCanonicalType()
+ : FD->getParamDecl(ArgN)->getType().getCanonicalType();
+ return T;
+ }
+
+ using Cases = std::vector<ConstraintSet>;
+
+ /// A summary includes information about
+ /// * function prototype (signature)
+ /// * approach to invalidation,
+ /// * a list of branches - a list of list of ranges -
+ /// A branch represents a path in the exploded graph of a function (which
+ /// is a tree). So, a branch is a series of assumptions. In other words,
+ /// branches represent split states and additional assumptions on top of
+ /// the splitting assumption.
+ /// For example, consider the branches in `isalpha(x)`
+ /// Branch 1)
+ /// x is in range ['A', 'Z'] or in ['a', 'z']
+ /// then the return value is not 0. (I.e. out-of-range [0, 0])
+ /// Branch 2)
+ /// x is out-of-range ['A', 'Z'] and out-of-range ['a', 'z']
+ /// then the return value is 0.
+ /// * a list of argument constraints, that must be true on every branch.
+ /// If these constraints are not satisfied that means a fatal error
+ /// usually resulting in undefined behaviour.
+ ///
+ /// Application of a summary:
+ /// The signature and argument constraints together contain information
+ /// about which functions are handled by the summary. The signature can use
+ /// "wildcards", i.e. Irrelevant types. Irrelevant type of a parameter in
+ /// a signature means that type is not compared to the type of the parameter
+ /// in the found FunctionDecl. Argument constraints may specify additional
+ /// rules for the given parameter's type, those rules are checked once the
+ /// signature is matched.
+ class Summary {
+ const Signature Sign;
+ const InvalidationKind InvalidationKd;
+ Cases CaseConstraints;
+ ConstraintSet ArgConstraints;
+
+ // The function to which the summary applies. This is set after lookup and
+ // match to the signature.
+ const FunctionDecl *FD = nullptr;
public:
- QualType getArgType(ArgNoTy ArgNo) const {
- QualType T = (ArgNo == Ret) ? RetType : ArgTypes[ArgNo];
- assertTypeSuitableForSummary(T);
- return T;
+ Summary(ArgTypes ArgTys, QualType RetTy, InvalidationKind InvalidationKd)
+ : Sign(ArgTys, RetTy), InvalidationKd(InvalidationKd) {}
+
+ Summary &Case(ConstraintSet&& CS) {
+ CaseConstraints.push_back(std::move(CS));
+ return *this;
+ }
+ Summary &ArgConstraint(ValueConstraintPtr VC) {
+ ArgConstraints.push_back(VC);
+ return *this;
}
- /// Try our best to figure out if the call expression is the call of
- /// *the* library function to which this specification applies.
- bool matchesCall(const CallExpr *CE) const;
- };
+ InvalidationKind getInvalidationKd() const { return InvalidationKd; }
+ const Cases &getCaseConstraints() const { return CaseConstraints; }
+ const ConstraintSet &getArgConstraints() const { return ArgConstraints; }
- // The same function (as in, function identifier) may have different
- // summaries assigned to it, with different argument and return value types.
- // We call these "variants" of the function. This can be useful for handling
- // C++ function overloads, and also it can be used when the same function
- // may have different definitions on different platforms.
- typedef std::vector<FunctionSummaryTy> FunctionVariantsTy;
+ QualType getArgType(ArgNo ArgN) const {
+ return StdLibraryFunctionsChecker::getArgType(FD, ArgN);
+ }
+
+ // Returns true if the summary should be applied to the given function.
+ // And if yes then store the function declaration.
+ bool matchesAndSet(const FunctionDecl *FD) {
+ bool Result = Sign.matches(FD) && validateByConstraints(FD);
+ if (Result) {
+ assert(!this->FD && "FD must not be set more than once");
+ this->FD = FD;
+ }
+ return Result;
+ }
+
+ private:
+ // Once we know the exact type of the function then do sanity check on all
+ // the given constraints.
+ bool validateByConstraints(const FunctionDecl *FD) const {
+ for (const ConstraintSet &Case : CaseConstraints)
+ for (const ValueConstraintPtr &Constraint : Case)
+ if (!Constraint->checkValidity(FD))
+ return false;
+ for (const ValueConstraintPtr &Constraint : ArgConstraints)
+ if (!Constraint->checkValidity(FD))
+ return false;
+ return true;
+ }
+ };
// The map of all functions supported by the checker. It is initialized
// lazily, and it doesn't change after initialization.
- typedef llvm::StringMap<FunctionVariantsTy> FunctionSummaryMapTy;
- mutable FunctionSummaryMapTy FunctionSummaryMap;
+ using FunctionSummaryMapType = llvm::DenseMap<const FunctionDecl *, Summary>;
+ mutable FunctionSummaryMapType FunctionSummaryMap;
- // Auxiliary functions to support ArgNoTy within all structures
- // in a unified manner.
- static QualType getArgType(const FunctionSummaryTy &Summary, ArgNoTy ArgNo) {
- return Summary.getArgType(ArgNo);
- }
- static QualType getArgType(const CallEvent &Call, ArgNoTy ArgNo) {
- return ArgNo == Ret ? Call.getResultType().getCanonicalType()
- : Call.getArgExpr(ArgNo)->getType().getCanonicalType();
- }
- static QualType getArgType(const CallExpr *CE, ArgNoTy ArgNo) {
- return ArgNo == Ret ? CE->getType().getCanonicalType()
- : CE->getArg(ArgNo)->getType().getCanonicalType();
- }
- static SVal getArgSVal(const CallEvent &Call, ArgNoTy ArgNo) {
- return ArgNo == Ret ? Call.getReturnValue() : Call.getArgSVal(ArgNo);
+ mutable std::unique_ptr<BugType> BT_InvalidArg;
+
+ static SVal getArgSVal(const CallEvent &Call, ArgNo ArgN) {
+ return ArgN == Ret ? Call.getReturnValue() : Call.getArgSVal(ArgN);
}
public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
-private:
- Optional<FunctionSummaryTy> findFunctionSummary(const FunctionDecl *FD,
- const CallExpr *CE,
- CheckerContext &C) const;
+ enum CheckKind {
+ CK_StdCLibraryFunctionArgsChecker,
+ CK_StdCLibraryFunctionsTesterChecker,
+ CK_NumCheckKinds
+ };
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckerNameRef CheckNames[CK_NumCheckKinds];
+
+ bool DisplayLoadedSummaries = false;
+ bool ModelPOSIX = false;
- void initFunctionSummaries(BasicValueFactory &BVF) const;
+private:
+ Optional<Summary> findFunctionSummary(const FunctionDecl *FD,
+ CheckerContext &C) const;
+ Optional<Summary> findFunctionSummary(const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void initFunctionSummaries(CheckerContext &C) const;
+
+ void reportBug(const CallEvent &Call, ExplodedNode *N,
+ CheckerContext &C) const {
+ if (!ChecksEnabled[CK_StdCLibraryFunctionArgsChecker])
+ return;
+ // TODO Add detailed diagnostic.
+ StringRef Msg = "Function argument constraint is not satisfied";
+ if (!BT_InvalidArg)
+ BT_InvalidArg = std::make_unique<BugType>(
+ CheckNames[CK_StdCLibraryFunctionArgsChecker],
+ "Unsatisfied argument constraints", categories::LogicError);
+ auto R = std::make_unique<PathSensitiveBugReport>(*BT_InvalidArg, Msg, N);
+ bugreporter::trackExpressionValue(N, Call.getArgExpr(0), *R);
+ C.emitReport(std::move(R));
+ }
};
+
+const StdLibraryFunctionsChecker::ArgNo StdLibraryFunctionsChecker::Ret =
+ std::numeric_limits<ArgNo>::max();
+
} // end of anonymous namespace
-ProgramStateRef StdLibraryFunctionsChecker::ValueRange::applyAsOutOfRange(
+ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsOutOfRange(
ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const {
+ const Summary &Summary) const {
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
BasicValueFactory &BVF = SVB.getBasicValueFactory();
ConstraintManager &CM = Mgr.getConstraintManager();
- QualType T = getArgType(Summary, getArgNo());
+ QualType T = Summary.getArgType(getArgNo());
SVal V = getArgSVal(Call, getArgNo());
if (auto N = V.getAs<NonLoc>()) {
- const IntRangeVectorTy &R = getRanges();
+ const IntRangeVector &R = getRanges();
size_t E = R.size();
for (size_t I = 0; I != E; ++I) {
const llvm::APSInt &Min = BVF.getValue(R[I].first, T);
@@ -262,23 +519,28 @@ ProgramStateRef StdLibraryFunctionsChecker::ValueRange::applyAsOutOfRange(
return State;
}
-ProgramStateRef
-StdLibraryFunctionsChecker::ValueRange::applyAsWithinRange(
+ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsWithinRange(
ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const {
+ const Summary &Summary) const {
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
BasicValueFactory &BVF = SVB.getBasicValueFactory();
ConstraintManager &CM = Mgr.getConstraintManager();
- QualType T = getArgType(Summary, getArgNo());
+ QualType T = Summary.getArgType(getArgNo());
SVal V = getArgSVal(Call, getArgNo());
// "WithinRange R" is treated as "outside [T_MIN, T_MAX] \ R".
// We cut off [T_MIN, min(R) - 1] and [max(R) + 1, T_MAX] if necessary,
// and then cut away all holes in R one by one.
+ //
+ // E.g. consider a range list R as [A, B] and [C, D]
+ // -------+--------+------------------+------------+----------->
+ // A B C D
+ // Then we assume that the value is not in [-inf, A - 1],
+ // then not in [D + 1, +inf], then not in [B + 1, C - 1]
if (auto N = V.getAs<NonLoc>()) {
- const IntRangeVectorTy &R = getRanges();
+ const IntRangeVector &R = getRanges();
size_t E = R.size();
const llvm::APSInt &MinusInf = BVF.getMinValue(T);
@@ -303,31 +565,31 @@ StdLibraryFunctionsChecker::ValueRange::applyAsWithinRange(
for (size_t I = 1; I != E; ++I) {
const llvm::APSInt &Min = BVF.getValue(R[I - 1].second + 1ULL, T);
const llvm::APSInt &Max = BVF.getValue(R[I].first - 1ULL, T);
- assert(Min <= Max);
- State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
- if (!State)
- return nullptr;
+ if (Min <= Max) {
+ State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
+ if (!State)
+ return nullptr;
+ }
}
}
return State;
}
-ProgramStateRef
-StdLibraryFunctionsChecker::ValueRange::applyAsComparesToArgument(
- ProgramStateRef State, const CallEvent &Call,
- const FunctionSummaryTy &Summary) const {
+ProgramStateRef StdLibraryFunctionsChecker::ComparisonConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
QualType CondT = SVB.getConditionType();
- QualType T = getArgType(Summary, getArgNo());
+ QualType T = Summary.getArgType(getArgNo());
SVal V = getArgSVal(Call, getArgNo());
BinaryOperator::Opcode Op = getOpcode();
- ArgNoTy OtherArg = getOtherArgNo();
+ ArgNo OtherArg = getOtherArgNo();
SVal OtherV = getArgSVal(Call, OtherArg);
- QualType OtherT = getArgType(Call, OtherArg);
+ QualType OtherT = Summary.getArgType(OtherArg);
// Note: we avoid integral promotion for comparison.
OtherV = SVB.evalCast(OtherV, T, OtherT);
if (auto CompV = SVB.evalBinOp(State, Op, V, OtherV, CondT)
@@ -336,28 +598,53 @@ StdLibraryFunctionsChecker::ValueRange::applyAsComparesToArgument(
return State;
}
-void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
- CheckerContext &C) const {
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
- if (!FD)
+void StdLibraryFunctionsChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ Optional<Summary> FoundSummary = findFunctionSummary(Call, C);
+ if (!FoundSummary)
return;
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return;
+ const Summary &Summary = *FoundSummary;
+ ProgramStateRef State = C.getState();
+
+ ProgramStateRef NewState = State;
+ for (const ValueConstraintPtr &Constraint : Summary.getArgConstraints()) {
+ ProgramStateRef SuccessSt = Constraint->apply(NewState, Call, Summary, C);
+ ProgramStateRef FailureSt =
+ Constraint->negate()->apply(NewState, Call, Summary, C);
+ // The argument constraint is not satisfied.
+ if (FailureSt && !SuccessSt) {
+ if (ExplodedNode *N = C.generateErrorNode(NewState))
+ reportBug(Call, N, C);
+ break;
+ } else {
+ // We will apply the constraint even if we cannot reason about the
+ // argument. This means both SuccessSt and FailureSt can be true. If we
+ // weren't applying the constraint that would mean that symbolic
+ // execution continues on a code whose behaviour is undefined.
+ assert(SuccessSt);
+ NewState = SuccessSt;
+ }
+ }
+ if (NewState && NewState != State)
+ C.addTransition(NewState);
+}
- Optional<FunctionSummaryTy> FoundSummary = findFunctionSummary(FD, CE, C);
+void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ Optional<Summary> FoundSummary = findFunctionSummary(Call, C);
if (!FoundSummary)
return;
- // Now apply ranges.
- const FunctionSummaryTy &Summary = *FoundSummary;
+ // Now apply the constraints.
+ const Summary &Summary = *FoundSummary;
ProgramStateRef State = C.getState();
- for (const auto &VRS: Summary.Ranges) {
+ // Apply case/branch specifications.
+ for (const ConstraintSet &Case : Summary.getCaseConstraints()) {
ProgramStateRef NewState = State;
- for (const auto &VR: VRS) {
- NewState = VR.apply(NewState, Call, Summary);
+ for (const ValueConstraintPtr &Constraint : Case) {
+ NewState = Constraint->apply(NewState, Call, Summary, C);
if (!NewState)
break;
}
@@ -369,23 +656,16 @@ void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
CheckerContext &C) const {
- const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
- if (!FD)
- return false;
-
- const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
- if (!CE)
- return false;
-
- Optional<FunctionSummaryTy> FoundSummary = findFunctionSummary(FD, CE, C);
+ Optional<Summary> FoundSummary = findFunctionSummary(Call, C);
if (!FoundSummary)
return false;
- const FunctionSummaryTy &Summary = *FoundSummary;
- switch (Summary.InvalidationKind) {
+ const Summary &Summary = *FoundSummary;
+ switch (Summary.getInvalidationKd()) {
case EvalCallAsPure: {
ProgramStateRef State = C.getState();
const LocationContext *LC = C.getLocationContext();
+ const auto *CE = cast_or_null<CallExpr>(Call.getOriginExpr());
SVal V = C.getSValBuilder().conjureSymbolVal(
CE, LC, CE->getType().getCanonicalType(), C.blockCount());
State = State->BindExpr(CE, LC, V);
@@ -400,79 +680,86 @@ bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
llvm_unreachable("Unknown invalidation kind!");
}
-bool StdLibraryFunctionsChecker::FunctionSummaryTy::matchesCall(
- const CallExpr *CE) const {
+bool StdLibraryFunctionsChecker::Signature::matches(
+ const FunctionDecl *FD) const {
// Check number of arguments:
- if (CE->getNumArgs() != ArgTypes.size())
+ if (FD->param_size() != ArgTys.size())
return false;
- // Check return type if relevant:
- if (!RetType.isNull() && RetType != CE->getType().getCanonicalType())
- return false;
+ // Check return type.
+ if (!isIrrelevant(RetTy))
+ if (RetTy != FD->getReturnType().getCanonicalType())
+ return false;
- // Check argument types when relevant:
- for (size_t I = 0, E = ArgTypes.size(); I != E; ++I) {
- QualType FormalT = ArgTypes[I];
- // Null type marks irrelevant arguments.
- if (FormalT.isNull())
+ // Check argument types.
+ for (size_t I = 0, E = ArgTys.size(); I != E; ++I) {
+ QualType ArgTy = ArgTys[I];
+ if (isIrrelevant(ArgTy))
continue;
-
- assertTypeSuitableForSummary(FormalT);
-
- QualType ActualT = StdLibraryFunctionsChecker::getArgType(CE, I);
- assert(ActualT.isCanonical());
- if (ActualT != FormalT)
+ if (ArgTy != FD->getParamDecl(I)->getType().getCanonicalType())
return false;
}
return true;
}
-Optional<StdLibraryFunctionsChecker::FunctionSummaryTy>
+Optional<StdLibraryFunctionsChecker::Summary>
StdLibraryFunctionsChecker::findFunctionSummary(const FunctionDecl *FD,
- const CallExpr *CE,
CheckerContext &C) const {
- // Note: we cannot always obtain FD from CE
- // (eg. virtual call, or call by pointer).
- assert(CE);
-
if (!FD)
return None;
- SValBuilder &SVB = C.getSValBuilder();
- BasicValueFactory &BVF = SVB.getBasicValueFactory();
- initFunctionSummaries(BVF);
+ initFunctionSummaries(C);
- IdentifierInfo *II = FD->getIdentifier();
- if (!II)
- return None;
- StringRef Name = II->getName();
- if (Name.empty() || !C.isCLibraryFunction(FD, Name))
+ auto FSMI = FunctionSummaryMap.find(FD->getCanonicalDecl());
+ if (FSMI == FunctionSummaryMap.end())
return None;
+ return FSMI->second;
+}
- auto FSMI = FunctionSummaryMap.find(Name);
- if (FSMI == FunctionSummaryMap.end())
+Optional<StdLibraryFunctionsChecker::Summary>
+StdLibraryFunctionsChecker::findFunctionSummary(const CallEvent &Call,
+ CheckerContext &C) const {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!FD)
return None;
+ return findFunctionSummary(FD, C);
+}
- // Verify that function signature matches the spec in advance.
- // Otherwise we might be modeling the wrong function.
- // Strict checking is important because we will be conducting
- // very integral-type-sensitive operations on arguments and
- // return values.
- const FunctionVariantsTy &SpecVariants = FSMI->second;
- for (const FunctionSummaryTy &Spec : SpecVariants)
- if (Spec.matchesCall(CE))
- return Spec;
+static llvm::Optional<QualType> lookupType(StringRef Name,
+ const ASTContext &ACtx) {
+ IdentifierInfo &II = ACtx.Idents.get(Name);
+ auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
+ if (LookupRes.size() == 0)
+ return None;
+ // Prioritze typedef declarations.
+ // This is needed in case of C struct typedefs. E.g.:
+ // typedef struct FILE FILE;
+ // In this case, we have a RecordDecl 'struct FILE' with the name 'FILE' and
+ // we have a TypedefDecl with the name 'FILE'.
+ for (Decl *D : LookupRes)
+ if (auto *TD = dyn_cast<TypedefNameDecl>(D))
+ return ACtx.getTypeDeclType(TD).getCanonicalType();
+
+ // Find the first TypeDecl.
+ // There maybe cases when a function has the same name as a struct.
+ // E.g. in POSIX: `struct stat` and the function `stat()`:
+ // int stat(const char *restrict path, struct stat *restrict buf);
+ for (Decl *D : LookupRes)
+ if (auto *TD = dyn_cast<TypeDecl>(D))
+ return ACtx.getTypeDeclType(TD).getCanonicalType();
return None;
}
void StdLibraryFunctionsChecker::initFunctionSummaries(
- BasicValueFactory &BVF) const {
+ CheckerContext &C) const {
if (!FunctionSummaryMap.empty())
return;
- ASTContext &ACtx = BVF.getContext();
+ SValBuilder &SVB = C.getSValBuilder();
+ BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ const ASTContext &ACtx = BVF.getContext();
// These types are useful for writing specifications quickly,
// New specifications should probably introduce more types.
@@ -481,15 +768,105 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// of function summary for common cases (eg. ssize_t could be int or long
// or long long, so three summary variants would be enough).
// Of course, function variants are also useful for C++ overloads.
- QualType Irrelevant; // A placeholder, whenever we do not care about the type.
- QualType IntTy = ACtx.IntTy;
- QualType LongTy = ACtx.LongTy;
- QualType LongLongTy = ACtx.LongLongTy;
- QualType SizeTy = ACtx.getSizeType();
+ const QualType VoidTy = ACtx.VoidTy;
+ const QualType IntTy = ACtx.IntTy;
+ const QualType UnsignedIntTy = ACtx.UnsignedIntTy;
+ const QualType LongTy = ACtx.LongTy;
+ const QualType LongLongTy = ACtx.LongLongTy;
+ const QualType SizeTy = ACtx.getSizeType();
+
+ const QualType VoidPtrTy = ACtx.VoidPtrTy; // void *
+ const QualType IntPtrTy = ACtx.getPointerType(IntTy); // int *
+ const QualType UnsignedIntPtrTy =
+ ACtx.getPointerType(UnsignedIntTy); // unsigned int *
+ const QualType VoidPtrRestrictTy =
+ ACtx.getLangOpts().C99 ? ACtx.getRestrictType(VoidPtrTy) // void *restrict
+ : VoidPtrTy;
+ const QualType ConstVoidPtrTy =
+ ACtx.getPointerType(ACtx.VoidTy.withConst()); // const void *
+ const QualType CharPtrTy = ACtx.getPointerType(ACtx.CharTy); // char *
+ const QualType CharPtrRestrictTy =
+ ACtx.getLangOpts().C99 ? ACtx.getRestrictType(CharPtrTy) // char *restrict
+ : CharPtrTy;
+ const QualType ConstCharPtrTy =
+ ACtx.getPointerType(ACtx.CharTy.withConst()); // const char *
+ const QualType ConstCharPtrRestrictTy =
+ ACtx.getLangOpts().C99
+ ? ACtx.getRestrictType(ConstCharPtrTy) // const char *restrict
+ : ConstCharPtrTy;
+ const QualType Wchar_tPtrTy = ACtx.getPointerType(ACtx.WCharTy); // wchar_t *
+ const QualType ConstWchar_tPtrTy =
+ ACtx.getPointerType(ACtx.WCharTy.withConst()); // const wchar_t *
+ const QualType ConstVoidPtrRestrictTy =
+ ACtx.getLangOpts().C99
+ ? ACtx.getRestrictType(ConstVoidPtrTy) // const void *restrict
+ : ConstVoidPtrTy;
+
+ const RangeInt IntMax = BVF.getMaxValue(IntTy).getLimitedValue();
+ const RangeInt UnsignedIntMax =
+ BVF.getMaxValue(UnsignedIntTy).getLimitedValue();
+ const RangeInt LongMax = BVF.getMaxValue(LongTy).getLimitedValue();
+ const RangeInt LongLongMax = BVF.getMaxValue(LongLongTy).getLimitedValue();
+ const RangeInt SizeMax = BVF.getMaxValue(SizeTy).getLimitedValue();
+
+ // Set UCharRangeMax to min of int or uchar maximum value.
+ // The C standard states that the arguments of functions like isalpha must
+ // be representable as an unsigned char. Their type is 'int', so the max
+ // value of the argument should be min(UCharMax, IntMax). This just happen
+ // to be true for commonly used and well tested instruction set
+ // architectures, but not for others.
+ const RangeInt UCharRangeMax =
+ std::min(BVF.getMaxValue(ACtx.UnsignedCharTy).getLimitedValue(), IntMax);
+
+ // The platform dependent value of EOF.
+ // Try our best to parse this from the Preprocessor, otherwise fallback to -1.
+ const auto EOFv = [&C]() -> RangeInt {
+ if (const llvm::Optional<int> OptInt =
+ tryExpandAsInteger("EOF", C.getPreprocessor()))
+ return *OptInt;
+ return -1;
+ }();
+
+ // Auxiliary class to aid adding summaries to the summary map.
+ struct AddToFunctionSummaryMap {
+ const ASTContext &ACtx;
+ FunctionSummaryMapType &Map;
+ bool DisplayLoadedSummaries;
+ AddToFunctionSummaryMap(const ASTContext &ACtx, FunctionSummaryMapType &FSM,
+ bool DisplayLoadedSummaries)
+ : ACtx(ACtx), Map(FSM), DisplayLoadedSummaries(DisplayLoadedSummaries) {
+ }
- RangeIntTy IntMax = BVF.getMaxValue(IntTy).getLimitedValue();
- RangeIntTy LongMax = BVF.getMaxValue(LongTy).getLimitedValue();
- RangeIntTy LongLongMax = BVF.getMaxValue(LongLongTy).getLimitedValue();
+ // Add a summary to a FunctionDecl found by lookup. The lookup is performed
+ // by the given Name, and in the global scope. The summary will be attached
+ // to the found FunctionDecl only if the signatures match.
+ void operator()(StringRef Name, Summary S) {
+ IdentifierInfo &II = ACtx.Idents.get(Name);
+ auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
+ if (LookupRes.size() == 0)
+ return;
+ for (Decl *D : LookupRes) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (S.matchesAndSet(FD)) {
+ auto Res = Map.insert({FD->getCanonicalDecl(), S});
+ assert(Res.second && "Function already has a summary set!");
+ (void)Res;
+ if (DisplayLoadedSummaries) {
+ llvm::errs() << "Loaded summary for: ";
+ FD->print(llvm::errs());
+ llvm::errs() << "\n";
+ }
+ return;
+ }
+ }
+ }
+ }
+ // Add several summaries for the given name.
+ void operator()(StringRef Name, const std::vector<Summary> &Summaries) {
+ for (const Summary &S : Summaries)
+ operator()(Name, S);
+ }
+ } addToFunctionSummaryMap(ACtx, FunctionSummaryMap, DisplayLoadedSummaries);
// We are finally ready to define specifications for all supported functions.
//
@@ -516,550 +893,876 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// return value, however the correct range is [-1, 10].
//
// Please update the list of functions in the header after editing!
- //
- // The format is as follows:
- //
- //{ "function name",
- // { spec:
- // { argument types list, ... },
- // return type, purity, { range set list:
- // { range list:
- // { argument index, within or out of, {{from, to}, ...} },
- // { argument index, compares to argument, {{how, which}} },
- // ...
- // }
- // }
- // }
- //}
-
-#define SUMMARY_WITH_VARIANTS(identifier) {#identifier, {
-#define END_SUMMARY_WITH_VARIANTS }},
-#define VARIANT(argument_types, return_type, invalidation_approach) \
- { argument_types, return_type, invalidation_approach, {
-#define END_VARIANT } },
-#define SUMMARY(identifier, argument_types, return_type, \
- invalidation_approach) \
- { #identifier, { { argument_types, return_type, invalidation_approach, {
-#define END_SUMMARY } } } },
-#define ARGUMENT_TYPES(...) { __VA_ARGS__ }
-#define RETURN_TYPE(x) x
-#define INVALIDATION_APPROACH(x) x
-#define CASE {
-#define END_CASE },
-#define ARGUMENT_CONDITION(argument_number, condition_kind) \
- { argument_number, condition_kind, {
-#define END_ARGUMENT_CONDITION }},
-#define RETURN_VALUE_CONDITION(condition_kind) \
- { Ret, condition_kind, {
-#define END_RETURN_VALUE_CONDITION }},
-#define ARG_NO(x) x##U
-#define RANGE(x, y) { x, y },
-#define SINGLE_VALUE(x) RANGE(x, x)
-#define IS_LESS_THAN(arg) { BO_LE, arg }
-
- FunctionSummaryMap = {
- // The isascii() family of functions.
- SUMMARY(isalnum, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Boils down to isupper() or islower() or isdigit()
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('0', '9')
- RANGE('A', 'Z')
- RANGE('a', 'z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- // No post-condition. We are completely unaware of
- // locale-specific return values.
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('0', '9')
- RANGE('A', 'Z')
- RANGE('a', 'z')
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isalpha, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // isupper() or islower(). Note that 'Z' is less than 'a'.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('A', 'Z')
- RANGE('a', 'z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- END_CASE
- CASE // Other.
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('A', 'Z')
- RANGE('a', 'z')
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isascii, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Is ASCII.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(0, 127)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(0, 127)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isblank, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- SINGLE_VALUE('\t')
- SINGLE_VALUE(' ')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- SINGLE_VALUE('\t')
- SINGLE_VALUE(' ')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(iscntrl, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // 0..31 or 127
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(0, 32)
- SINGLE_VALUE(127)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(0, 32)
- SINGLE_VALUE(127)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isdigit, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Is a digit.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('0', '9')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('0', '9')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isgraph, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(33, 126)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(33, 126)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(islower, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Is certainly lowercase.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('a', 'z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // Is ascii but not lowercase.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(0, 127)
- END_ARGUMENT_CONDITION
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('a', 'z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- END_CASE
- CASE // Is not an unsigned char.
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(0, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isprint, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(32, 126)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(32, 126)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(ispunct, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('!', '/')
- RANGE(':', '@')
- RANGE('[', '`')
- RANGE('{', '~')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('!', '/')
- RANGE(':', '@')
- RANGE('[', '`')
- RANGE('{', '~')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isspace, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Space, '\f', '\n', '\r', '\t', '\v'.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(9, 13)
- SINGLE_VALUE(' ')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE(9, 13)
- SINGLE_VALUE(' ')
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isupper, ARGUMENT_TYPES(IntTy), RETURN_TYPE (IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE // Is certainly uppercase.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('A', 'Z')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE // The locale-specific range.
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE(128, 255)
- END_ARGUMENT_CONDITION
- END_CASE
- CASE // Other.
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('A', 'Z') RANGE(128, 255)
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(isxdigit, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(EvalCallAsPure))
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
- RANGE('0', '9')
- RANGE('A', 'F')
- RANGE('a', 'f')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(OutOfRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- CASE
- ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
- RANGE('0', '9')
- RANGE('A', 'F')
- RANGE('a', 'f')
- END_ARGUMENT_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(0)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
-
- // The getc() family of functions that returns either a char or an EOF.
- SUMMARY(getc, ARGUMENT_TYPES(Irrelevant), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(NoEvalCall))
- CASE // FIXME: EOF is assumed to be defined as -1.
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, 255)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(fgetc, ARGUMENT_TYPES(Irrelevant), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(NoEvalCall))
- CASE // FIXME: EOF is assumed to be defined as -1.
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, 255)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(getchar, ARGUMENT_TYPES(), RETURN_TYPE(IntTy),
- INVALIDATION_APPROACH(NoEvalCall))
- CASE // FIXME: EOF is assumed to be defined as -1.
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, 255)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
-
- // read()-like functions that never return more than buffer size.
- // We are not sure how ssize_t is defined on every platform, so we provide
- // three variants that should cover common cases.
- SUMMARY_WITH_VARIANTS(read)
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, IntMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, LongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, LongLongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- END_SUMMARY_WITH_VARIANTS
- SUMMARY_WITH_VARIANTS(write)
- // Again, due to elusive nature of ssize_t, we have duplicate
- // our summaries to cover different variants.
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, IntMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, LongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
- RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- RETURN_VALUE_CONDITION(WithinRange)
- RANGE(-1, LongLongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- END_SUMMARY_WITH_VARIANTS
- SUMMARY(fread,
- ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy, Irrelevant),
- RETURN_TYPE(SizeTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
- SUMMARY(fwrite,
- ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy, Irrelevant),
- RETURN_TYPE(SizeTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(ComparesToArgument)
- IS_LESS_THAN(ARG_NO(2))
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_SUMMARY
-
- // getline()-like functions either fail or read at least the delimiter.
- SUMMARY_WITH_VARIANTS(getline)
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, IntMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, LongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, LongLongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- END_SUMMARY_WITH_VARIANTS
- SUMMARY_WITH_VARIANTS(getdelim)
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, IntMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, LongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant, Irrelevant),
- RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
- CASE
- RETURN_VALUE_CONDITION(WithinRange)
- SINGLE_VALUE(-1)
- RANGE(1, LongLongMax)
- END_RETURN_VALUE_CONDITION
- END_CASE
- END_VARIANT
- END_SUMMARY_WITH_VARIANTS
+
+ // Below are helpers functions to create the summaries.
+ auto ArgumentCondition = [](ArgNo ArgN, RangeKind Kind,
+ IntRangeVector Ranges) {
+ return std::make_shared<RangeConstraint>(ArgN, Kind, Ranges);
+ };
+ auto BufferSize = [](auto... Args) {
+ return std::make_shared<BufferSizeConstraint>(Args...);
+ };
+ struct {
+ auto operator()(RangeKind Kind, IntRangeVector Ranges) {
+ return std::make_shared<RangeConstraint>(Ret, Kind, Ranges);
+ }
+ auto operator()(BinaryOperator::Opcode Op, ArgNo OtherArgN) {
+ return std::make_shared<ComparisonConstraint>(Ret, Op, OtherArgN);
+ }
+ } ReturnValueCondition;
+ auto Range = [](RangeInt b, RangeInt e) {
+ return IntRangeVector{std::pair<RangeInt, RangeInt>{b, e}};
+ };
+ auto SingleValue = [](RangeInt v) {
+ return IntRangeVector{std::pair<RangeInt, RangeInt>{v, v}};
};
+ auto LessThanOrEq = BO_LE;
+ auto NotNull = [&](ArgNo ArgN) {
+ return std::make_shared<NotNullConstraint>(ArgN);
+ };
+
+ Optional<QualType> FileTy = lookupType("FILE", ACtx);
+ Optional<QualType> FilePtrTy, FilePtrRestrictTy;
+ if (FileTy) {
+ // FILE *
+ FilePtrTy = ACtx.getPointerType(*FileTy);
+ // FILE *restrict
+ FilePtrRestrictTy =
+ ACtx.getLangOpts().C99 ? ACtx.getRestrictType(*FilePtrTy) : *FilePtrTy;
+ }
+
+ using RetType = QualType;
+ // Templates for summaries that are reused by many functions.
+ auto Getc = [&]() {
+ return Summary(ArgTypes{*FilePtrTy}, RetType{IntTy}, NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}})});
+ };
+ auto Read = [&](RetType R, RangeInt Max) {
+ return Summary(ArgTypes{Irrelevant, Irrelevant, SizeTy}, RetType{R},
+ NoEvalCall)
+ .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(-1, Max))});
+ };
+ auto Fread = [&]() {
+ return Summary(
+ ArgTypes{VoidPtrRestrictTy, SizeTy, SizeTy, *FilePtrRestrictTy},
+ RetType{SizeTy}, NoEvalCall)
+ .Case({
+ ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ })
+ .ArgConstraint(NotNull(ArgNo(0)));
+ };
+ auto Fwrite = [&]() {
+ return Summary(ArgTypes{ConstVoidPtrRestrictTy, SizeTy, SizeTy,
+ *FilePtrRestrictTy},
+ RetType{SizeTy}, NoEvalCall)
+ .Case({
+ ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ })
+ .ArgConstraint(NotNull(ArgNo(0)));
+ };
+ auto Getline = [&](RetType R, RangeInt Max) {
+ return Summary(ArgTypes{Irrelevant, Irrelevant, Irrelevant}, RetType{R},
+ NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, {{-1, -1}, {1, Max}})});
+ };
+
+ // The isascii() family of functions.
+ // The behavior is undefined if the value of the argument is not
+ // representable as unsigned char or is not equal to EOF. See e.g. C99
+ // 7.4.1.2 The isalpha function (p: 181-182).
+ addToFunctionSummaryMap(
+ "isalnum",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ // Boils down to isupper() or islower() or isdigit().
+ .Case({ArgumentCondition(0U, WithinRange,
+ {{'0', '9'}, {'A', 'Z'}, {'a', 'z'}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // The locale-specific range.
+ // No post-condition. We are completely unaware of
+ // locale-specific return values.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case(
+ {ArgumentCondition(
+ 0U, OutOfRange,
+ {{'0', '9'}, {'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))})
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ addToFunctionSummaryMap(
+ "isalpha",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, {{'A', 'Z'}, {'a', 'z'}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // The locale-specific range.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(
+ 0U, OutOfRange,
+ {{'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isascii",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, Range(0, 127)),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, Range(0, 127)),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isblank",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, {{'\t', '\t'}, {' ', ' '}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, {{'\t', '\t'}, {' ', ' '}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "iscntrl",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, {{0, 32}, {127, 127}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, {{0, 32}, {127, 127}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isdigit",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, Range('0', '9')),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, Range('0', '9')),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isgraph",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, Range(33, 126)),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, Range(33, 126)),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "islower",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ // Is certainly lowercase.
+ .Case({ArgumentCondition(0U, WithinRange, Range('a', 'z')),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // Is ascii but not lowercase.
+ .Case({ArgumentCondition(0U, WithinRange, Range(0, 127)),
+ ArgumentCondition(0U, OutOfRange, Range('a', 'z')),
+ ReturnValueCondition(WithinRange, SingleValue(0))})
+ // The locale-specific range.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ // Is not an unsigned char.
+ .Case({ArgumentCondition(0U, OutOfRange, Range(0, UCharRangeMax)),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isprint",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange, Range(32, 126)),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange, Range(32, 126)),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "ispunct",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(
+ 0U, WithinRange,
+ {{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(
+ 0U, OutOfRange,
+ {{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isspace",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ // Space, '\f', '\n', '\r', '\t', '\v'.
+ .Case({ArgumentCondition(0U, WithinRange, {{9, 13}, {' ', ' '}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // The locale-specific range.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, OutOfRange,
+ {{9, 13}, {' ', ' '}, {128, UCharRangeMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isupper",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ // Is certainly uppercase.
+ .Case({ArgumentCondition(0U, WithinRange, Range('A', 'Z')),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ // The locale-specific range.
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ // Other.
+ .Case({ArgumentCondition(0U, OutOfRange,
+ {{'A', 'Z'}, {128, UCharRangeMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+ addToFunctionSummaryMap(
+ "isxdigit",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange,
+ {{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
+ ReturnValueCondition(OutOfRange, SingleValue(0))})
+ .Case({ArgumentCondition(0U, OutOfRange,
+ {{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
+ ReturnValueCondition(WithinRange, SingleValue(0))}));
+
+ // The getc() family of functions that returns either a char or an EOF.
+ if (FilePtrTy) {
+ addToFunctionSummaryMap("getc", Getc());
+ addToFunctionSummaryMap("fgetc", Getc());
+ }
+ addToFunctionSummaryMap(
+ "getchar", Summary(ArgTypes{}, RetType{IntTy}, NoEvalCall)
+ .Case({ReturnValueCondition(
+ WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})}));
+
+ // read()-like functions that never return more than buffer size.
+ if (FilePtrRestrictTy) {
+ addToFunctionSummaryMap("fread", Fread());
+ addToFunctionSummaryMap("fwrite", Fwrite());
+ }
+
+ // We are not sure how ssize_t is defined on every platform, so we
+ // provide three variants that should cover common cases.
+ // FIXME these are actually defined by POSIX and not by the C standard, we
+ // should handle them together with the rest of the POSIX functions.
+ addToFunctionSummaryMap("read", {Read(IntTy, IntMax), Read(LongTy, LongMax),
+ Read(LongLongTy, LongLongMax)});
+ addToFunctionSummaryMap("write", {Read(IntTy, IntMax), Read(LongTy, LongMax),
+ Read(LongLongTy, LongLongMax)});
+
+ // getline()-like functions either fail or read at least the delimiter.
+ // FIXME these are actually defined by POSIX and not by the C standard, we
+ // should handle them together with the rest of the POSIX functions.
+ addToFunctionSummaryMap("getline",
+ {Getline(IntTy, IntMax), Getline(LongTy, LongMax),
+ Getline(LongLongTy, LongLongMax)});
+ addToFunctionSummaryMap("getdelim",
+ {Getline(IntTy, IntMax), Getline(LongTy, LongMax),
+ Getline(LongLongTy, LongLongMax)});
+
+ if (ModelPOSIX) {
+
+ // long a64l(const char *str64);
+ addToFunctionSummaryMap(
+ "a64l", Summary(ArgTypes{ConstCharPtrTy}, RetType{LongTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *l64a(long value);
+ addToFunctionSummaryMap(
+ "l64a", Summary(ArgTypes{LongTy}, RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, LongMax))));
+
+ // int access(const char *pathname, int amode);
+ addToFunctionSummaryMap("access", Summary(ArgTypes{ConstCharPtrTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int faccessat(int dirfd, const char *pathname, int mode, int flags);
+ addToFunctionSummaryMap(
+ "faccessat", Summary(ArgTypes{IntTy, ConstCharPtrTy, IntTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int dup(int fildes);
+ addToFunctionSummaryMap(
+ "dup", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // int dup2(int fildes1, int filedes2);
+ addToFunctionSummaryMap(
+ "dup2",
+ Summary(ArgTypes{IntTy, IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(0, IntMax))));
+
+ // int fdatasync(int fildes);
+ addToFunctionSummaryMap(
+ "fdatasync", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax))));
+
+ // int fnmatch(const char *pattern, const char *string, int flags);
+ addToFunctionSummaryMap(
+ "fnmatch", Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy, IntTy},
+ RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int fsync(int fildes);
+ addToFunctionSummaryMap(
+ "fsync", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ Optional<QualType> Off_tTy = lookupType("off_t", ACtx);
+
+ if (Off_tTy)
+ // int truncate(const char *path, off_t length);
+ addToFunctionSummaryMap("truncate",
+ Summary(ArgTypes{ConstCharPtrTy, *Off_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int symlink(const char *oldpath, const char *newpath);
+ addToFunctionSummaryMap("symlink",
+ Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int symlinkat(const char *oldpath, int newdirfd, const char *newpath);
+ addToFunctionSummaryMap(
+ "symlinkat",
+ Summary(ArgTypes{ConstCharPtrTy, IntTy, ConstCharPtrTy}, RetType{IntTy},
+ NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(ArgumentCondition(1, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(2))));
+
+ if (Off_tTy)
+ // int lockf(int fd, int cmd, off_t len);
+ addToFunctionSummaryMap(
+ "lockf",
+ Summary(ArgTypes{IntTy, IntTy, *Off_tTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ Optional<QualType> Mode_tTy = lookupType("mode_t", ACtx);
+
+ if (Mode_tTy)
+ // int creat(const char *pathname, mode_t mode);
+ addToFunctionSummaryMap("creat",
+ Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // unsigned int sleep(unsigned int seconds);
+ addToFunctionSummaryMap(
+ "sleep",
+ Summary(ArgTypes{UnsignedIntTy}, RetType{UnsignedIntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, UnsignedIntMax))));
+
+ Optional<QualType> DirTy = lookupType("DIR", ACtx);
+ Optional<QualType> DirPtrTy;
+ if (DirTy)
+ DirPtrTy = ACtx.getPointerType(*DirTy);
+
+ if (DirPtrTy)
+ // int dirfd(DIR *dirp);
+ addToFunctionSummaryMap(
+ "dirfd", Summary(ArgTypes{*DirPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // unsigned int alarm(unsigned int seconds);
+ addToFunctionSummaryMap(
+ "alarm",
+ Summary(ArgTypes{UnsignedIntTy}, RetType{UnsignedIntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, UnsignedIntMax))));
+
+ if (DirPtrTy)
+ // int closedir(DIR *dir);
+ addToFunctionSummaryMap(
+ "closedir", Summary(ArgTypes{*DirPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *strdup(const char *s);
+ addToFunctionSummaryMap("strdup", Summary(ArgTypes{ConstCharPtrTy},
+ RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *strndup(const char *s, size_t n);
+ addToFunctionSummaryMap(
+ "strndup", Summary(ArgTypes{ConstCharPtrTy, SizeTy}, RetType{CharPtrTy},
+ NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(ArgumentCondition(1, WithinRange,
+ Range(0, SizeMax))));
+
+ // wchar_t *wcsdup(const wchar_t *s);
+ addToFunctionSummaryMap("wcsdup", Summary(ArgTypes{ConstWchar_tPtrTy},
+ RetType{Wchar_tPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int mkstemp(char *template);
+ addToFunctionSummaryMap(
+ "mkstemp", Summary(ArgTypes{CharPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *mkdtemp(char *template);
+ addToFunctionSummaryMap(
+ "mkdtemp", Summary(ArgTypes{CharPtrTy}, RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // char *getcwd(char *buf, size_t size);
+ addToFunctionSummaryMap(
+ "getcwd",
+ Summary(ArgTypes{CharPtrTy, SizeTy}, RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(0, SizeMax))));
+
+ if (Mode_tTy) {
+ // int mkdir(const char *pathname, mode_t mode);
+ addToFunctionSummaryMap("mkdir",
+ Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int mkdirat(int dirfd, const char *pathname, mode_t mode);
+ addToFunctionSummaryMap(
+ "mkdirat", Summary(ArgTypes{IntTy, ConstCharPtrTy, *Mode_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(1))));
+ }
+
+ Optional<QualType> Dev_tTy = lookupType("dev_t", ACtx);
+
+ if (Mode_tTy && Dev_tTy) {
+ // int mknod(const char *pathname, mode_t mode, dev_t dev);
+ addToFunctionSummaryMap(
+ "mknod", Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy, *Dev_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev);
+ addToFunctionSummaryMap("mknodat", Summary(ArgTypes{IntTy, ConstCharPtrTy,
+ *Mode_tTy, *Dev_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(1))));
+ }
+
+ if (Mode_tTy) {
+ // int chmod(const char *path, mode_t mode);
+ addToFunctionSummaryMap("chmod",
+ Summary(ArgTypes{ConstCharPtrTy, *Mode_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int fchmodat(int dirfd, const char *pathname, mode_t mode, int flags);
+ addToFunctionSummaryMap(
+ "fchmodat", Summary(ArgTypes{IntTy, ConstCharPtrTy, *Mode_tTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int fchmod(int fildes, mode_t mode);
+ addToFunctionSummaryMap(
+ "fchmod",
+ Summary(ArgTypes{IntTy, *Mode_tTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+ }
+
+ Optional<QualType> Uid_tTy = lookupType("uid_t", ACtx);
+ Optional<QualType> Gid_tTy = lookupType("gid_t", ACtx);
+
+ if (Uid_tTy && Gid_tTy) {
+ // int fchownat(int dirfd, const char *pathname, uid_t owner, gid_t group,
+ // int flags);
+ addToFunctionSummaryMap(
+ "fchownat",
+ Summary(ArgTypes{IntTy, ConstCharPtrTy, *Uid_tTy, *Gid_tTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int chown(const char *path, uid_t owner, gid_t group);
+ addToFunctionSummaryMap(
+ "chown", Summary(ArgTypes{ConstCharPtrTy, *Uid_tTy, *Gid_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int lchown(const char *path, uid_t owner, gid_t group);
+ addToFunctionSummaryMap(
+ "lchown", Summary(ArgTypes{ConstCharPtrTy, *Uid_tTy, *Gid_tTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int fchown(int fildes, uid_t owner, gid_t group);
+ addToFunctionSummaryMap(
+ "fchown", Summary(ArgTypes{IntTy, *Uid_tTy, *Gid_tTy}, RetType{IntTy},
+ NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax))));
+ }
+
+ // int rmdir(const char *pathname);
+ addToFunctionSummaryMap(
+ "rmdir", Summary(ArgTypes{ConstCharPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int chdir(const char *path);
+ addToFunctionSummaryMap(
+ "chdir", Summary(ArgTypes{ConstCharPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int link(const char *oldpath, const char *newpath);
+ addToFunctionSummaryMap("link",
+ Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int linkat(int fd1, const char *path1, int fd2, const char *path2,
+ // int flag);
+ addToFunctionSummaryMap(
+ "linkat",
+ Summary(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(ArgumentCondition(2, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(3))));
+
+ // int unlink(const char *pathname);
+ addToFunctionSummaryMap(
+ "unlink", Summary(ArgTypes{ConstCharPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int unlinkat(int fd, const char *path, int flag);
+ addToFunctionSummaryMap(
+ "unlinkat",
+ Summary(ArgTypes{IntTy, ConstCharPtrTy, IntTy}, RetType{IntTy},
+ NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ Optional<QualType> StructStatTy = lookupType("stat", ACtx);
+ Optional<QualType> StructStatPtrTy, StructStatPtrRestrictTy;
+ if (StructStatTy) {
+ StructStatPtrTy = ACtx.getPointerType(*StructStatTy);
+ StructStatPtrRestrictTy = ACtx.getLangOpts().C99
+ ? ACtx.getRestrictType(*StructStatPtrTy)
+ : *StructStatPtrTy;
+ }
+
+ if (StructStatPtrTy)
+ // int fstat(int fd, struct stat *statbuf);
+ addToFunctionSummaryMap(
+ "fstat",
+ Summary(ArgTypes{IntTy, *StructStatPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ if (StructStatPtrRestrictTy) {
+ // int stat(const char *restrict path, struct stat *restrict buf);
+ addToFunctionSummaryMap(
+ "stat",
+ Summary(ArgTypes{ConstCharPtrRestrictTy, *StructStatPtrRestrictTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int lstat(const char *restrict path, struct stat *restrict buf);
+ addToFunctionSummaryMap(
+ "lstat",
+ Summary(ArgTypes{ConstCharPtrRestrictTy, *StructStatPtrRestrictTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int fstatat(int fd, const char *restrict path,
+ // struct stat *restrict buf, int flag);
+ addToFunctionSummaryMap(
+ "fstatat", Summary(ArgTypes{IntTy, ConstCharPtrRestrictTy,
+ *StructStatPtrRestrictTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2))));
+ }
+
+ if (DirPtrTy) {
+ // DIR *opendir(const char *name);
+ addToFunctionSummaryMap("opendir", Summary(ArgTypes{ConstCharPtrTy},
+ RetType{*DirPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // DIR *fdopendir(int fd);
+ addToFunctionSummaryMap(
+ "fdopendir", Summary(ArgTypes{IntTy}, RetType{*DirPtrTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax))));
+ }
+
+ // int isatty(int fildes);
+ addToFunctionSummaryMap(
+ "isatty", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ if (FilePtrTy) {
+ // FILE *popen(const char *command, const char *type);
+ addToFunctionSummaryMap("popen",
+ Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
+ RetType{*FilePtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int pclose(FILE *stream);
+ addToFunctionSummaryMap(
+ "pclose", Summary(ArgTypes{*FilePtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+ }
+
+ // int close(int fildes);
+ addToFunctionSummaryMap(
+ "close", Summary(ArgTypes{IntTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // long fpathconf(int fildes, int name);
+ addToFunctionSummaryMap(
+ "fpathconf",
+ Summary(ArgTypes{IntTy, IntTy}, RetType{LongTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
+ // long pathconf(const char *path, int name);
+ addToFunctionSummaryMap("pathconf", Summary(ArgTypes{ConstCharPtrTy, IntTy},
+ RetType{LongTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ if (FilePtrTy)
+ // FILE *fdopen(int fd, const char *mode);
+ addToFunctionSummaryMap(
+ "fdopen", Summary(ArgTypes{IntTy, ConstCharPtrTy},
+ RetType{*FilePtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ if (DirPtrTy) {
+ // void rewinddir(DIR *dir);
+ addToFunctionSummaryMap(
+ "rewinddir", Summary(ArgTypes{*DirPtrTy}, RetType{VoidTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // void seekdir(DIR *dirp, long loc);
+ addToFunctionSummaryMap("seekdir", Summary(ArgTypes{*DirPtrTy, LongTy},
+ RetType{VoidTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+ }
+
+ // int rand_r(unsigned int *seedp);
+ addToFunctionSummaryMap("rand_r", Summary(ArgTypes{UnsignedIntPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int strcasecmp(const char *s1, const char *s2);
+ addToFunctionSummaryMap("strcasecmp",
+ Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy},
+ RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int strncasecmp(const char *s1, const char *s2, size_t n);
+ addToFunctionSummaryMap(
+ "strncasecmp", Summary(ArgTypes{ConstCharPtrTy, ConstCharPtrTy, SizeTy},
+ RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(ArgumentCondition(
+ 2, WithinRange, Range(0, SizeMax))));
+
+ if (FilePtrTy && Off_tTy) {
+
+ // int fileno(FILE *stream);
+ addToFunctionSummaryMap(
+ "fileno", Summary(ArgTypes{*FilePtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int fseeko(FILE *stream, off_t offset, int whence);
+ addToFunctionSummaryMap("fseeko",
+ Summary(ArgTypes{*FilePtrTy, *Off_tTy, IntTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // off_t ftello(FILE *stream);
+ addToFunctionSummaryMap(
+ "ftello", Summary(ArgTypes{*FilePtrTy}, RetType{*Off_tTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+ }
+
+ if (Off_tTy) {
+ Optional<RangeInt> Off_tMax = BVF.getMaxValue(*Off_tTy).getLimitedValue();
+
+ // void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+ // off_t offset);
+ addToFunctionSummaryMap(
+ "mmap",
+ Summary(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, *Off_tTy},
+ RetType{VoidPtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(1, SizeMax)))
+ .ArgConstraint(
+ ArgumentCondition(4, WithinRange, Range(0, *Off_tMax))));
+ }
+
+ Optional<QualType> Off64_tTy = lookupType("off64_t", ACtx);
+ Optional<RangeInt> Off64_tMax;
+ if (Off64_tTy) {
+ Off64_tMax = BVF.getMaxValue(*Off_tTy).getLimitedValue();
+ // void *mmap64(void *addr, size_t length, int prot, int flags, int fd,
+ // off64_t offset);
+ addToFunctionSummaryMap(
+ "mmap64",
+ Summary(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, *Off64_tTy},
+ RetType{VoidPtrTy}, NoEvalCall)
+ .ArgConstraint(
+ ArgumentCondition(1, WithinRange, Range(1, SizeMax)))
+ .ArgConstraint(
+ ArgumentCondition(4, WithinRange, Range(0, *Off64_tMax))));
+ }
+
+ // int pipe(int fildes[2]);
+ addToFunctionSummaryMap(
+ "pipe", Summary(ArgTypes{IntPtrTy}, RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ if (Off_tTy)
+ // off_t lseek(int fildes, off_t offset, int whence);
+ addToFunctionSummaryMap(
+ "lseek", Summary(ArgTypes{IntTy, *Off_tTy, IntTy}, RetType{*Off_tTy},
+ NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax))));
+
+ Optional<QualType> Ssize_tTy = lookupType("ssize_t", ACtx);
+
+ if (Ssize_tTy) {
+ // ssize_t readlink(const char *restrict path, char *restrict buf,
+ // size_t bufsize);
+ addToFunctionSummaryMap(
+ "readlink",
+ Summary(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTy},
+ RetType{*Ssize_tTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
+ /*BufSize=*/ArgNo(2)))
+ .ArgConstraint(
+ ArgumentCondition(2, WithinRange, Range(0, SizeMax))));
+
+ // ssize_t readlinkat(int fd, const char *restrict path,
+ // char *restrict buf, size_t bufsize);
+ addToFunctionSummaryMap(
+ "readlinkat", Summary(ArgTypes{IntTy, ConstCharPtrRestrictTy,
+ CharPtrRestrictTy, SizeTy},
+ RetType{*Ssize_tTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange,
+ Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2)))
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(2),
+ /*BufSize=*/ArgNo(3)))
+ .ArgConstraint(ArgumentCondition(
+ 3, WithinRange, Range(0, SizeMax))));
+ }
+
+ // int renameat(int olddirfd, const char *oldpath, int newdirfd, const char
+ // *newpath);
+ addToFunctionSummaryMap("renameat", Summary(ArgTypes{IntTy, ConstCharPtrTy,
+ IntTy, ConstCharPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(3))));
+
+ // char *realpath(const char *restrict file_name,
+ // char *restrict resolved_name);
+ addToFunctionSummaryMap(
+ "realpath", Summary(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy},
+ RetType{CharPtrTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ QualType CharPtrConstPtr = ACtx.getPointerType(CharPtrTy.withConst());
+
+ // int execv(const char *path, char *const argv[]);
+ addToFunctionSummaryMap("execv",
+ Summary(ArgTypes{ConstCharPtrTy, CharPtrConstPtr},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int execvp(const char *file, char *const argv[]);
+ addToFunctionSummaryMap("execvp",
+ Summary(ArgTypes{ConstCharPtrTy, CharPtrConstPtr},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int getopt(int argc, char * const argv[], const char *optstring);
+ addToFunctionSummaryMap(
+ "getopt",
+ Summary(ArgTypes{IntTy, CharPtrConstPtr, ConstCharPtrTy},
+ RetType{IntTy}, NoEvalCall)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2))));
+ }
+
+ // Functions for testing.
+ if (ChecksEnabled[CK_StdCLibraryFunctionsTesterChecker]) {
+ addToFunctionSummaryMap(
+ "__two_constrained_args",
+ Summary(ArgTypes{IntTy, IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, SingleValue(1)))
+ .ArgConstraint(ArgumentCondition(1U, WithinRange, SingleValue(1))));
+ addToFunctionSummaryMap(
+ "__arg_constrained_twice",
+ Summary(ArgTypes{IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(1)))
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(2))));
+ addToFunctionSummaryMap(
+ "__defaultparam",
+ Summary(ArgTypes{Irrelevant, IntTy}, RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap("__variadic",
+ Summary(ArgTypes{VoidPtrTy, ConstCharPtrTy},
+ RetType{IntTy}, EvalCallAsPure)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+ addToFunctionSummaryMap(
+ "__buf_size_arg_constraint",
+ Summary(ArgTypes{ConstVoidPtrTy, SizeTy}, RetType{IntTy},
+ EvalCallAsPure)
+ .ArgConstraint(
+ BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1))));
+ addToFunctionSummaryMap(
+ "__buf_size_arg_constraint_mul",
+ Summary(ArgTypes{ConstVoidPtrTy, SizeTy, SizeTy}, RetType{IntTy},
+ EvalCallAsPure)
+ .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1),
+ /*BufSizeMultiplier=*/ArgNo(2))));
+ }
}
void ento::registerStdCLibraryFunctionsChecker(CheckerManager &mgr) {
- // If this checker grows large enough to support C++, Objective-C, or other
- // standard libraries, we could use multiple register...Checker() functions,
- // which would register various checkers with the help of the same Checker
- // class, turning on different function summaries.
- mgr.registerChecker<StdLibraryFunctionsChecker>();
+ auto *Checker = mgr.registerChecker<StdLibraryFunctionsChecker>();
+ Checker->DisplayLoadedSummaries =
+ mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ Checker, "DisplayLoadedSummaries");
+ Checker->ModelPOSIX =
+ mgr.getAnalyzerOptions().getCheckerBooleanOption(Checker, "ModelPOSIX");
}
-bool ento::shouldRegisterStdCLibraryFunctionsChecker(const LangOptions &LO) {
+bool ento::shouldRegisterStdCLibraryFunctionsChecker(const CheckerManager &mgr) {
return true;
}
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name(CheckerManager &mgr) { \
+ StdLibraryFunctionsChecker *checker = \
+ mgr.getChecker<StdLibraryFunctionsChecker>(); \
+ checker->ChecksEnabled[StdLibraryFunctionsChecker::CK_##name] = true; \
+ checker->CheckNames[StdLibraryFunctionsChecker::CK_##name] = \
+ mgr.getCurrentCheckerName(); \
+ } \
+ \
+ bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
+
+REGISTER_CHECKER(StdCLibraryFunctionArgsChecker)
+REGISTER_CHECKER(StdCLibraryFunctionsTesterChecker)
diff --git a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 47099f2afb6a..f6abbe4f8f03 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -27,142 +27,453 @@ using namespace std::placeholders;
namespace {
+struct FnDescription;
+
+/// State of the stream error flags.
+/// Sometimes it is not known to the checker what error flags are set.
+/// This is indicated by setting more than one flag to true.
+/// This is an optimization to avoid state splits.
+/// A stream can either be in FEOF or FERROR but not both at the same time.
+/// Multiple flags are set to handle the corresponding states together.
+struct StreamErrorState {
+ /// The stream can be in state where none of the error flags set.
+ bool NoError = true;
+ /// The stream can be in state where the EOF indicator is set.
+ bool FEof = false;
+ /// The stream can be in state where the error indicator is set.
+ bool FError = false;
+
+ bool isNoError() const { return NoError && !FEof && !FError; }
+ bool isFEof() const { return !NoError && FEof && !FError; }
+ bool isFError() const { return !NoError && !FEof && FError; }
+
+ bool operator==(const StreamErrorState &ES) const {
+ return NoError == ES.NoError && FEof == ES.FEof && FError == ES.FError;
+ }
+
+ bool operator!=(const StreamErrorState &ES) const { return !(*this == ES); }
+
+ StreamErrorState operator|(const StreamErrorState &E) const {
+ return {NoError || E.NoError, FEof || E.FEof, FError || E.FError};
+ }
+
+ StreamErrorState operator&(const StreamErrorState &E) const {
+ return {NoError && E.NoError, FEof && E.FEof, FError && E.FError};
+ }
+
+ StreamErrorState operator~() const { return {!NoError, !FEof, !FError}; }
+
+ /// Returns if the StreamErrorState is a valid object.
+ operator bool() const { return NoError || FEof || FError; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddBoolean(NoError);
+ ID.AddBoolean(FEof);
+ ID.AddBoolean(FError);
+ }
+};
+
+const StreamErrorState ErrorNone{true, false, false};
+const StreamErrorState ErrorFEof{false, true, false};
+const StreamErrorState ErrorFError{false, false, true};
+
+/// Full state information about a stream pointer.
struct StreamState {
- enum Kind { Opened, Closed, OpenFailed, Escaped } K;
+ /// The last file operation called in the stream.
+ const FnDescription *LastOperation;
- StreamState(Kind k) : K(k) {}
+ /// State of a stream symbol.
+ /// FIXME: We need maybe an "escaped" state later.
+ enum KindTy {
+ Opened, /// Stream is opened.
+ Closed, /// Closed stream (an invalid stream pointer after it was closed).
+ OpenFailed /// The last open operation has failed.
+ } State;
- bool isOpened() const { return K == Opened; }
- bool isClosed() const { return K == Closed; }
- //bool isOpenFailed() const { return K == OpenFailed; }
- //bool isEscaped() const { return K == Escaped; }
+ /// State of the error flags.
+ /// Ignored in non-opened stream state but must be NoError.
+ StreamErrorState const ErrorState;
- bool operator==(const StreamState &X) const { return K == X.K; }
+ /// Indicate if the file has an "indeterminate file position indicator".
+ /// This can be set at a failing read or write or seek operation.
+ /// If it is set no more read or write is allowed.
+ /// This value is not dependent on the stream error flags:
+ /// The error flag may be cleared with `clearerr` but the file position
+ /// remains still indeterminate.
+ /// This value applies to all error states in ErrorState except FEOF.
+ /// An EOF+indeterminate state is the same as EOF state.
+ bool const FilePositionIndeterminate = false;
- static StreamState getOpened() { return StreamState(Opened); }
- static StreamState getClosed() { return StreamState(Closed); }
- static StreamState getOpenFailed() { return StreamState(OpenFailed); }
- static StreamState getEscaped() { return StreamState(Escaped); }
+ StreamState(const FnDescription *L, KindTy S, const StreamErrorState &ES,
+ bool IsFilePositionIndeterminate)
+ : LastOperation(L), State(S), ErrorState(ES),
+ FilePositionIndeterminate(IsFilePositionIndeterminate) {
+ assert((!ES.isFEof() || !IsFilePositionIndeterminate) &&
+ "FilePositionIndeterminate should be false in FEof case.");
+ assert((State == Opened || ErrorState.isNoError()) &&
+ "ErrorState should be None in non-opened stream state.");
+ }
+
+ bool isOpened() const { return State == Opened; }
+ bool isClosed() const { return State == Closed; }
+ bool isOpenFailed() const { return State == OpenFailed; }
+
+ bool operator==(const StreamState &X) const {
+ // In not opened state error state should always NoError, so comparison
+ // here is no problem.
+ return LastOperation == X.LastOperation && State == X.State &&
+ ErrorState == X.ErrorState &&
+ FilePositionIndeterminate == X.FilePositionIndeterminate;
+ }
+
+ static StreamState getOpened(const FnDescription *L,
+ const StreamErrorState &ES = ErrorNone,
+ bool IsFilePositionIndeterminate = false) {
+ return StreamState{L, Opened, ES, IsFilePositionIndeterminate};
+ }
+ static StreamState getClosed(const FnDescription *L) {
+ return StreamState{L, Closed, {}, false};
+ }
+ static StreamState getOpenFailed(const FnDescription *L) {
+ return StreamState{L, OpenFailed, {}, false};
+ }
void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(K);
+ ID.AddPointer(LastOperation);
+ ID.AddInteger(State);
+ ID.AddInteger(ErrorState);
+ ID.AddBoolean(FilePositionIndeterminate);
}
};
-class StreamChecker : public Checker<eval::Call,
- check::DeadSymbols > {
- mutable std::unique_ptr<BuiltinBug> BT_nullfp, BT_illegalwhence,
- BT_doubleclose, BT_ResourceLeak;
+class StreamChecker;
+using FnCheck = std::function<void(const StreamChecker *, const FnDescription *,
+ const CallEvent &, CheckerContext &)>;
+
+using ArgNoTy = unsigned int;
+static const ArgNoTy ArgNone = std::numeric_limits<ArgNoTy>::max();
+
+struct FnDescription {
+ FnCheck PreFn;
+ FnCheck EvalFn;
+ ArgNoTy StreamArgNo;
+};
+
+/// Get the value of the stream argument out of the passed call event.
+/// The call should contain a function that is described by Desc.
+SVal getStreamArg(const FnDescription *Desc, const CallEvent &Call) {
+ assert(Desc && Desc->StreamArgNo != ArgNone &&
+ "Try to get a non-existing stream argument.");
+ return Call.getArgSVal(Desc->StreamArgNo);
+}
+
+/// Create a conjured symbol return value for a call expression.
+DefinedSVal makeRetVal(CheckerContext &C, const CallExpr *CE) {
+ assert(CE && "Expecting a call expression.");
+
+ const LocationContext *LCtx = C.getLocationContext();
+ return C.getSValBuilder()
+ .conjureSymbolVal(nullptr, CE, LCtx, C.blockCount())
+ .castAs<DefinedSVal>();
+}
+
+ProgramStateRef bindAndAssumeTrue(ProgramStateRef State, CheckerContext &C,
+ const CallExpr *CE) {
+ DefinedSVal RetVal = makeRetVal(C, CE);
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+ State = State->assume(RetVal, true);
+ assert(State && "Assumption on new value should not fail.");
+ return State;
+}
+
+ProgramStateRef bindInt(uint64_t Value, ProgramStateRef State,
+ CheckerContext &C, const CallExpr *CE) {
+ State = State->BindExpr(CE, C.getLocationContext(),
+ C.getSValBuilder().makeIntVal(Value, false));
+ return State;
+}
+
+class StreamChecker : public Checker<check::PreCall, eval::Call,
+ check::DeadSymbols, check::PointerEscape> {
+ BugType BT_FileNull{this, "NULL stream pointer", "Stream handling error"};
+ BugType BT_UseAfterClose{this, "Closed stream", "Stream handling error"};
+ BugType BT_UseAfterOpenFailed{this, "Invalid stream",
+ "Stream handling error"};
+ BugType BT_IndeterminatePosition{this, "Invalid stream state",
+ "Stream handling error"};
+ BugType BT_IllegalWhence{this, "Illegal whence argument",
+ "Stream handling error"};
+ BugType BT_StreamEof{this, "Stream already in EOF", "Stream handling error"};
+ BugType BT_ResourceLeak{this, "Resource leak", "Stream handling error"};
public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ ProgramStateRef checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
+
+ /// If true, evaluate special testing stream functions.
+ bool TestMode = false;
private:
- using FnCheck = std::function<void(const StreamChecker *, const CallEvent &,
- CheckerContext &)>;
-
- CallDescriptionMap<FnCheck> Callbacks = {
- {{"fopen"}, &StreamChecker::evalFopen},
- {{"freopen", 3}, &StreamChecker::evalFreopen},
- {{"tmpfile"}, &StreamChecker::evalFopen},
- {{"fclose", 1}, &StreamChecker::evalFclose},
+ CallDescriptionMap<FnDescription> FnDescriptions = {
+ {{"fopen"}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {{"freopen", 3},
+ {&StreamChecker::preFreopen, &StreamChecker::evalFreopen, 2}},
+ {{"tmpfile"}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {{"fclose", 1},
+ {&StreamChecker::preDefault, &StreamChecker::evalFclose, 0}},
{{"fread", 4},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 3)},
+ {&StreamChecker::preFread,
+ std::bind(&StreamChecker::evalFreadFwrite, _1, _2, _3, _4, true), 3}},
{{"fwrite", 4},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 3)},
- {{"fseek", 3}, &StreamChecker::evalFseek},
- {{"ftell", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
- {{"rewind", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
- {{"fgetpos", 2},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
- {{"fsetpos", 2},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
+ {&StreamChecker::preFwrite,
+ std::bind(&StreamChecker::evalFreadFwrite, _1, _2, _3, _4, false), 3}},
+ {{"fseek", 3}, {&StreamChecker::preFseek, &StreamChecker::evalFseek, 0}},
+ {{"ftell", 1}, {&StreamChecker::preDefault, nullptr, 0}},
+ {{"rewind", 1}, {&StreamChecker::preDefault, nullptr, 0}},
+ {{"fgetpos", 2}, {&StreamChecker::preDefault, nullptr, 0}},
+ {{"fsetpos", 2}, {&StreamChecker::preDefault, nullptr, 0}},
{{"clearerr", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
+ {&StreamChecker::preDefault, &StreamChecker::evalClearerr, 0}},
{{"feof", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
+ {&StreamChecker::preDefault,
+ std::bind(&StreamChecker::evalFeofFerror, _1, _2, _3, _4, ErrorFEof),
+ 0}},
{{"ferror", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
- {{"fileno", 1},
- std::bind(&StreamChecker::checkArgNullStream, _1, _2, _3, 0)},
+ {&StreamChecker::preDefault,
+ std::bind(&StreamChecker::evalFeofFerror, _1, _2, _3, _4, ErrorFError),
+ 0}},
+ {{"fileno", 1}, {&StreamChecker::preDefault, nullptr, 0}},
+ };
+
+ CallDescriptionMap<FnDescription> FnTestDescriptions = {
+ {{"StreamTesterChecker_make_feof_stream", 1},
+ {nullptr,
+ std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4, ErrorFEof),
+ 0}},
+ {{"StreamTesterChecker_make_ferror_stream", 1},
+ {nullptr,
+ std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4,
+ ErrorFError),
+ 0}},
+ };
+
+ void evalFopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void preFreopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+ void evalFreopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFclose(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void preFread(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void preFwrite(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFreadFwrite(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C, bool IsFread) const;
+
+ void preFseek(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+ void evalFseek(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void preDefault(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalClearerr(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFeofFerror(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C,
+ const StreamErrorState &ErrorKind) const;
+
+ void evalSetFeofFerror(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C,
+ const StreamErrorState &ErrorKind) const;
+
+ /// Check that the stream (in StreamVal) is not NULL.
+ /// If it can only be NULL a fatal error is emitted and nullptr returned.
+ /// Otherwise the return value is a new state where the stream is constrained
+ /// to be non-null.
+ ProgramStateRef ensureStreamNonNull(SVal StreamVal, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ /// Check that the stream is the opened state.
+ /// If the stream is known to be not opened an error is generated
+ /// and nullptr returned, otherwise the original state is returned.
+ ProgramStateRef ensureStreamOpened(SVal StreamVal, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ /// Check that the stream has not an invalid ("indeterminate") file position,
+ /// generate warning for it.
+ /// (EOF is not an invalid position.)
+ /// The returned state can be nullptr if a fatal error was generated.
+ /// It can return non-null state if the stream has not an invalid position or
+ /// there is execution path with non-invalid position.
+ ProgramStateRef
+ ensureNoFilePositionIndeterminate(SVal StreamVal, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ /// Check the legality of the 'whence' argument of 'fseek'.
+ /// Generate error and return nullptr if it is found to be illegal.
+ /// Otherwise returns the state.
+ /// (State is not changed here because the "whence" value is already known.)
+ ProgramStateRef ensureFseekWhenceCorrect(SVal WhenceVal, CheckerContext &C,
+ ProgramStateRef State) const;
+
+ /// Generate warning about stream in EOF state.
+ /// There will be always a state transition into the passed State,
+ /// by the new non-fatal error node or (if failed) a normal transition,
+ /// to ensure uniform handling.
+ void reportFEofWarning(CheckerContext &C, ProgramStateRef State) const;
+
+ /// Find the description data of the function called by a call event.
+ /// Returns nullptr if no function is recognized.
+ const FnDescription *lookupFn(const CallEvent &Call) const {
+ // Recognize "global C functions" with only integral or pointer arguments
+ // (and matching name) as stream functions.
+ if (!Call.isGlobalCFunction())
+ return nullptr;
+ for (auto P : Call.parameters()) {
+ QualType T = P->getType();
+ if (!T->isIntegralOrEnumerationType() && !T->isPointerType())
+ return nullptr;
+ }
+
+ return FnDescriptions.lookup(Call);
+ }
+
+ /// Generate a message for BugReporterVisitor if the stored symbol is
+ /// marked as interesting by the actual bug report.
+ struct NoteFn {
+ const CheckerNameRef CheckerName;
+ SymbolRef StreamSym;
+ std::string Message;
+
+ std::string operator()(PathSensitiveBugReport &BR) const {
+ if (BR.isInteresting(StreamSym) &&
+ CheckerName == BR.getBugType().getCheckerName())
+ return Message;
+
+ return "";
+ }
};
- void evalFopen(const CallEvent &Call, CheckerContext &C) const;
- void evalFreopen(const CallEvent &Call, CheckerContext &C) const;
- void evalFclose(const CallEvent &Call, CheckerContext &C) const;
- void evalFseek(const CallEvent &Call, CheckerContext &C) const;
-
- void checkArgNullStream(const CallEvent &Call, CheckerContext &C,
- unsigned ArgI) const;
- bool checkNullStream(SVal SV, CheckerContext &C,
- ProgramStateRef &State) const;
- void checkFseekWhence(SVal SV, CheckerContext &C,
- ProgramStateRef &State) const;
- bool checkDoubleClose(const CallEvent &Call, CheckerContext &C,
- ProgramStateRef &State) const;
+ const NoteTag *constructNoteTag(CheckerContext &C, SymbolRef StreamSym,
+ const std::string &Message) const {
+ return C.getNoteTag(NoteFn{getCheckerName(), StreamSym, Message});
+ }
+
+ /// Searches for the ExplodedNode where the file descriptor was acquired for
+ /// StreamSym.
+ static const ExplodedNode *getAcquisitionSite(const ExplodedNode *N,
+ SymbolRef StreamSym,
+ CheckerContext &C);
};
} // end anonymous namespace
REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+inline void assertStreamStateOpened(const StreamState *SS) {
+ assert(SS->isOpened() &&
+ "Previous create of error node for non-opened stream failed?");
+}
-bool StreamChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
- const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
- if (!FD || FD->getKind() != Decl::Function)
- return false;
+const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
+ SymbolRef StreamSym,
+ CheckerContext &C) {
+ ProgramStateRef State = N->getState();
+ // When bug type is resource leak, exploded node N may not have state info
+ // for leaked file descriptor, but predecessor should have it.
+ if (!State->get<StreamMap>(StreamSym))
+ N = N->getFirstPred();
- // Recognize "global C functions" with only integral or pointer arguments
- // (and matching name) as stream functions.
- if (!Call.isGlobalCFunction())
- return false;
- for (auto P : Call.parameters()) {
- QualType T = P->getType();
- if (!T->isIntegralOrEnumerationType() && !T->isPointerType())
- return false;
+ const ExplodedNode *Pred = N;
+ while (N) {
+ State = N->getState();
+ if (!State->get<StreamMap>(StreamSym))
+ return Pred;
+ Pred = N;
+ N = N->getFirstPred();
}
- const FnCheck *Callback = Callbacks.lookup(Call);
- if (!Callback)
+ return nullptr;
+}
+
+void StreamChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const FnDescription *Desc = lookupFn(Call);
+ if (!Desc || !Desc->PreFn)
+ return;
+
+ Desc->PreFn(this, Desc, Call, C);
+}
+
+bool StreamChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
+ const FnDescription *Desc = lookupFn(Call);
+ if (!Desc && TestMode)
+ Desc = FnTestDescriptions.lookup(Call);
+ if (!Desc || !Desc->EvalFn)
return false;
- (*Callback)(this, Call, C);
+ Desc->EvalFn(this, Desc, Call, C);
return C.isDifferent();
}
-void StreamChecker::evalFopen(const CallEvent &Call, CheckerContext &C) const {
- ProgramStateRef state = C.getState();
- SValBuilder &svalBuilder = C.getSValBuilder();
- const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
- auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+void StreamChecker::evalFopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
if (!CE)
return;
- DefinedSVal RetVal =
- svalBuilder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount())
- .castAs<DefinedSVal>();
- state = state->BindExpr(CE, C.getLocationContext(), RetVal);
+ DefinedSVal RetVal = makeRetVal(C, CE);
+ SymbolRef RetSym = RetVal.getAsSymbol();
+ assert(RetSym && "RetVal must be a symbol here.");
+
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
- ConstraintManager &CM = C.getConstraintManager();
// Bifurcate the state into two: one with a valid FILE* pointer, the other
// with a NULL.
- ProgramStateRef stateNotNull, stateNull;
- std::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
+ ProgramStateRef StateNotNull, StateNull;
+ std::tie(StateNotNull, StateNull) =
+ C.getConstraintManager().assumeDual(State, RetVal);
+
+ StateNotNull =
+ StateNotNull->set<StreamMap>(RetSym, StreamState::getOpened(Desc));
+ StateNull =
+ StateNull->set<StreamMap>(RetSym, StreamState::getOpenFailed(Desc));
+
+ C.addTransition(StateNotNull,
+ constructNoteTag(C, RetSym, "Stream opened here"));
+ C.addTransition(StateNull);
+}
- SymbolRef Sym = RetVal.getAsSymbol();
- assert(Sym && "RetVal must be a symbol here.");
- stateNotNull = stateNotNull->set<StreamMap>(Sym, StreamState::getOpened());
- stateNull = stateNull->set<StreamMap>(Sym, StreamState::getOpenFailed());
+void StreamChecker::preFreopen(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ // Do not allow NULL as passed stream pointer but allow a closed stream.
+ ProgramStateRef State = C.getState();
+ State = ensureStreamNonNull(getStreamArg(Desc, Call), C, State);
+ if (!State)
+ return;
- C.addTransition(stateNotNull);
- C.addTransition(stateNull);
+ C.addTransition(State);
}
-void StreamChecker::evalFreopen(const CallEvent &Call,
+void StreamChecker::evalFreopen(const FnDescription *Desc,
+ const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@@ -170,21 +481,21 @@ void StreamChecker::evalFreopen(const CallEvent &Call,
if (!CE)
return;
- Optional<DefinedSVal> StreamVal = Call.getArgSVal(2).getAs<DefinedSVal>();
+ Optional<DefinedSVal> StreamVal =
+ getStreamArg(Desc, Call).getAs<DefinedSVal>();
if (!StreamVal)
return;
- // Do not allow NULL as passed stream pointer.
- // This is not specified in the man page but may crash on some system.
- checkNullStream(*StreamVal, C, State);
- // Check if error was generated.
- if (C.isDifferent())
- return;
SymbolRef StreamSym = StreamVal->getAsSymbol();
- // Do not care about special values for stream ("(FILE *)0x12345"?).
+ // Do not care about concrete values for stream ("(FILE *)0x12345"?).
+ // FIXME: Can be stdin, stdout, stderr such values?
if (!StreamSym)
return;
+ // Do not handle untracked stream. It is probably escaped.
+ if (!State->get<StreamMap>(StreamSym))
+ return;
+
// Generate state for non-failed case.
// Return value is the passed stream pointer.
// According to the documentations, the stream is closed first
@@ -197,129 +508,452 @@ void StreamChecker::evalFreopen(const CallEvent &Call,
C.getSValBuilder().makeNull());
StateRetNotNull =
- StateRetNotNull->set<StreamMap>(StreamSym, StreamState::getOpened());
+ StateRetNotNull->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
StateRetNull =
- StateRetNull->set<StreamMap>(StreamSym, StreamState::getOpenFailed());
+ StateRetNull->set<StreamMap>(StreamSym, StreamState::getOpenFailed(Desc));
- C.addTransition(StateRetNotNull);
+ C.addTransition(StateRetNotNull,
+ constructNoteTag(C, StreamSym, "Stream reopened here"));
C.addTransition(StateRetNull);
}
-void StreamChecker::evalFclose(const CallEvent &Call, CheckerContext &C) const {
+void StreamChecker::evalFclose(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
- if (checkDoubleClose(Call, C, State))
+ SymbolRef Sym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!Sym)
+ return;
+
+ const StreamState *SS = State->get<StreamMap>(Sym);
+ if (!SS)
+ return;
+
+ assertStreamStateOpened(SS);
+
+ // Close the File Descriptor.
+ // Regardless if the close fails or not, stream becomes "closed"
+ // and can not be used any more.
+ State = State->set<StreamMap>(Sym, StreamState::getClosed(Desc));
+
+ C.addTransition(State);
+}
+
+void StreamChecker::preFread(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureNoFilePositionIndeterminate(StreamVal, C, State);
+ if (!State)
+ return;
+
+ SymbolRef Sym = StreamVal.getAsSymbol();
+ if (Sym && State->get<StreamMap>(Sym)) {
+ const StreamState *SS = State->get<StreamMap>(Sym);
+ if (SS->ErrorState & ErrorFEof)
+ reportFEofWarning(C, State);
+ } else {
C.addTransition(State);
+ }
}
-void StreamChecker::evalFseek(const CallEvent &Call, CheckerContext &C) const {
- const Expr *AE2 = Call.getArgExpr(2);
- if (!AE2)
+void StreamChecker::preFwrite(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, C, State);
+ if (!State)
return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureNoFilePositionIndeterminate(StreamVal, C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
+ const CallEvent &Call, CheckerContext &C,
+ bool IsFread) const {
ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ Optional<NonLoc> SizeVal = Call.getArgSVal(1).getAs<NonLoc>();
+ if (!SizeVal)
+ return;
+ Optional<NonLoc> NMembVal = Call.getArgSVal(2).getAs<NonLoc>();
+ if (!NMembVal)
+ return;
- bool StateChanged = checkNullStream(Call.getArgSVal(0), C, State);
- // Check if error was generated.
- if (C.isDifferent())
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
return;
- // Check the legality of the 'whence' argument of 'fseek'.
- checkFseekWhence(State->getSVal(AE2, C.getLocationContext()), C, State);
+ assertStreamStateOpened(SS);
+
+ // C'99 standard, §7.19.8.1.3, the return value of fread:
+ // The fread function returns the number of elements successfully read, which
+ // may be less than nmemb if a read error or end-of-file is encountered. If
+ // size or nmemb is zero, fread returns zero and the contents of the array and
+ // the state of the stream remain unchanged.
- if (!C.isDifferent() && StateChanged)
+ if (State->isNull(*SizeVal).isConstrainedTrue() ||
+ State->isNull(*NMembVal).isConstrainedTrue()) {
+ // This is the "size or nmemb is zero" case.
+ // Just return 0, do nothing more (not clear the error flags).
+ State = bindInt(0, State, C, CE);
C.addTransition(State);
+ return;
+ }
- return;
+ // Generate a transition for the success state.
+ // If we know the state to be FEOF at fread, do not add a success state.
+ if (!IsFread || (SS->ErrorState != ErrorFEof)) {
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), *NMembVal);
+ if (StateNotFailed) {
+ StateNotFailed = StateNotFailed->set<StreamMap>(
+ StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
+ }
+ }
+
+ // Add transition for the failed state.
+ Optional<NonLoc> RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ assert(RetVal && "Value should be NonLoc.");
+ ProgramStateRef StateFailed =
+ State->BindExpr(CE, C.getLocationContext(), *RetVal);
+ if (!StateFailed)
+ return;
+ auto Cond = C.getSValBuilder()
+ .evalBinOpNN(State, BO_LT, *RetVal, *NMembVal,
+ C.getASTContext().IntTy)
+ .getAs<DefinedOrUnknownSVal>();
+ if (!Cond)
+ return;
+ StateFailed = StateFailed->assume(*Cond, true);
+ if (!StateFailed)
+ return;
+
+ StreamErrorState NewES;
+ if (IsFread)
+ NewES = (SS->ErrorState == ErrorFEof) ? ErrorFEof : ErrorFEof | ErrorFError;
+ else
+ NewES = ErrorFError;
+ // If a (non-EOF) error occurs, the resulting value of the file position
+ // indicator for the stream is indeterminate.
+ StreamState NewState = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
+ StateFailed = StateFailed->set<StreamMap>(StreamSym, NewState);
+ C.addTransition(StateFailed);
}
-void StreamChecker::checkArgNullStream(const CallEvent &Call, CheckerContext &C,
- unsigned ArgI) const {
+void StreamChecker::preFseek(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
- if (checkNullStream(Call.getArgSVal(ArgI), C, State))
- C.addTransition(State);
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureFseekWhenceCorrect(Call.getArgSVal(2), C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
}
-bool StreamChecker::checkNullStream(SVal SV, CheckerContext &C,
- ProgramStateRef &State) const {
- Optional<DefinedSVal> DV = SV.getAs<DefinedSVal>();
- if (!DV)
- return false;
+void StreamChecker::evalFseek(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ // Ignore the call if the stream is not tracked.
+ if (!State->get<StreamMap>(StreamSym))
+ return;
+
+ DefinedSVal RetVal = makeRetVal(C, CE);
+
+ // Make expression result.
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+
+ // Bifurcate the state into failed and non-failed.
+ // Return zero on success, nonzero on error.
+ ProgramStateRef StateNotFailed, StateFailed;
+ std::tie(StateFailed, StateNotFailed) =
+ C.getConstraintManager().assumeDual(State, RetVal);
+
+ // Reset the state to opened with no error.
+ StateNotFailed =
+ StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ // We get error.
+ // It is possible that fseek fails but sets none of the error flags.
+ // If fseek failed, assume that the file position becomes indeterminate in any
+ // case.
+ StateFailed = StateFailed->set<StreamMap>(
+ StreamSym,
+ StreamState::getOpened(Desc, ErrorNone | ErrorFEof | ErrorFError, true));
+
+ C.addTransition(StateNotFailed);
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalClearerr(const FnDescription *Desc,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
+ return;
+
+ assertStreamStateOpened(SS);
+
+ // FilePositionIndeterminate is not cleared.
+ State = State->set<StreamMap>(
+ StreamSym,
+ StreamState::getOpened(Desc, ErrorNone, SS->FilePositionIndeterminate));
+ C.addTransition(State);
+}
+
+void StreamChecker::evalFeofFerror(const FnDescription *Desc,
+ const CallEvent &Call, CheckerContext &C,
+ const StreamErrorState &ErrorKind) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
+ return;
+
+ assertStreamStateOpened(SS);
+
+ if (SS->ErrorState & ErrorKind) {
+ // Execution path with error of ErrorKind.
+ // Function returns true.
+ // From now on it is the only one error state.
+ ProgramStateRef TrueState = bindAndAssumeTrue(State, C, CE);
+ C.addTransition(TrueState->set<StreamMap>(
+ StreamSym, StreamState::getOpened(Desc, ErrorKind,
+ SS->FilePositionIndeterminate &&
+ !ErrorKind.isFEof())));
+ }
+ if (StreamErrorState NewES = SS->ErrorState & (~ErrorKind)) {
+ // Execution path(s) with ErrorKind not set.
+ // Function returns false.
+ // New error state is everything before minus ErrorKind.
+ ProgramStateRef FalseState = bindInt(0, State, C, CE);
+ C.addTransition(FalseState->set<StreamMap>(
+ StreamSym,
+ StreamState::getOpened(
+ Desc, NewES, SS->FilePositionIndeterminate && !NewES.isFEof())));
+ }
+}
+
+void StreamChecker::preDefault(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ State = ensureStreamNonNull(StreamVal, C, State);
+ if (!State)
+ return;
+ State = ensureStreamOpened(StreamVal, C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+void StreamChecker::evalSetFeofFerror(const FnDescription *Desc,
+ const CallEvent &Call, CheckerContext &C,
+ const StreamErrorState &ErrorKind) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ assert(StreamSym && "Operation not permitted on non-symbolic stream value.");
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ assert(SS && "Stream should be tracked by the checker.");
+ State = State->set<StreamMap>(
+ StreamSym, StreamState::getOpened(SS->LastOperation, ErrorKind));
+ C.addTransition(State);
+}
+
+ProgramStateRef
+StreamChecker::ensureStreamNonNull(SVal StreamVal, CheckerContext &C,
+ ProgramStateRef State) const {
+ auto Stream = StreamVal.getAs<DefinedSVal>();
+ if (!Stream)
+ return State;
ConstraintManager &CM = C.getConstraintManager();
+
ProgramStateRef StateNotNull, StateNull;
- std::tie(StateNotNull, StateNull) = CM.assumeDual(C.getState(), *DV);
+ std::tie(StateNotNull, StateNull) = CM.assumeDual(C.getState(), *Stream);
if (!StateNotNull && StateNull) {
if (ExplodedNode *N = C.generateErrorNode(StateNull)) {
- if (!BT_nullfp)
- BT_nullfp.reset(new BuiltinBug(this, "NULL stream pointer",
- "Stream pointer might be NULL."));
C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *BT_nullfp, BT_nullfp->getDescription(), N));
+ BT_FileNull, "Stream pointer might be NULL.", N));
}
- return false;
- }
-
- if (StateNotNull) {
- State = StateNotNull;
- return true;
+ return nullptr;
}
- return false;
+ return StateNotNull;
}
-void StreamChecker::checkFseekWhence(SVal SV, CheckerContext &C,
- ProgramStateRef &State) const {
- Optional<nonloc::ConcreteInt> CI = SV.getAs<nonloc::ConcreteInt>();
- if (!CI)
- return;
+ProgramStateRef StreamChecker::ensureStreamOpened(SVal StreamVal,
+ CheckerContext &C,
+ ProgramStateRef State) const {
+ SymbolRef Sym = StreamVal.getAsSymbol();
+ if (!Sym)
+ return State;
- int64_t X = CI->getValue().getSExtValue();
- if (X >= 0 && X <= 2)
- return;
+ const StreamState *SS = State->get<StreamMap>(Sym);
+ if (!SS)
+ return State;
- if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
- if (!BT_illegalwhence)
- BT_illegalwhence.reset(
- new BuiltinBug(this, "Illegal whence argument",
- "The whence argument to fseek() should be "
- "SEEK_SET, SEEK_END, or SEEK_CUR."));
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *BT_illegalwhence, BT_illegalwhence->getDescription(), N));
+ if (SS->isClosed()) {
+ // Using a stream pointer after 'fclose' causes undefined behavior
+ // according to cppreference.com .
+ ExplodedNode *N = C.generateErrorNode();
+ if (N) {
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_UseAfterClose,
+ "Stream might be already closed. Causes undefined behaviour.", N));
+ return nullptr;
+ }
+
+ return State;
}
+
+ if (SS->isOpenFailed()) {
+ // Using a stream that has failed to open is likely to cause problems.
+ // This should usually not occur because stream pointer is NULL.
+ // But freopen can cause a state when stream pointer remains non-null but
+ // failed to open.
+ ExplodedNode *N = C.generateErrorNode();
+ if (N) {
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_UseAfterOpenFailed,
+ "Stream might be invalid after "
+ "(re-)opening it has failed. "
+ "Can cause undefined behaviour.",
+ N));
+ return nullptr;
+ }
+ return State;
+ }
+
+ return State;
}
-bool StreamChecker::checkDoubleClose(const CallEvent &Call, CheckerContext &C,
- ProgramStateRef &State) const {
- SymbolRef Sym = Call.getArgSVal(0).getAsSymbol();
+ProgramStateRef StreamChecker::ensureNoFilePositionIndeterminate(
+ SVal StreamVal, CheckerContext &C, ProgramStateRef State) const {
+ static const char *BugMessage =
+ "File position of the stream might be 'indeterminate' "
+ "after a failed operation. "
+ "Can cause undefined behavior.";
+
+ SymbolRef Sym = StreamVal.getAsSymbol();
if (!Sym)
- return false;
+ return State;
const StreamState *SS = State->get<StreamMap>(Sym);
-
- // If the file stream is not tracked, return.
if (!SS)
- return false;
+ return State;
+
+ assert(SS->isOpened() && "First ensure that stream is opened.");
+
+ if (SS->FilePositionIndeterminate) {
+ if (SS->ErrorState & ErrorFEof) {
+ // The error is unknown but may be FEOF.
+ // Continue analysis with the FEOF error state.
+ // Report warning because the other possible error states.
+ ExplodedNode *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return nullptr;
- // Check: Double close a File Descriptor could cause undefined behaviour.
- // Conforming to man-pages
- if (SS->isClosed()) {
- ExplodedNode *N = C.generateErrorNode();
- if (N) {
- if (!BT_doubleclose)
- BT_doubleclose.reset(new BuiltinBug(
- this, "Double fclose", "Try to close a file Descriptor already"
- " closed. Cause undefined behaviour."));
C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *BT_doubleclose, BT_doubleclose->getDescription(), N));
+ BT_IndeterminatePosition, BugMessage, N));
+ return State->set<StreamMap>(
+ Sym, StreamState::getOpened(SS->LastOperation, ErrorFEof, false));
}
- return false;
+
+ // Known or unknown error state without FEOF possible.
+ // Stop analysis, report error.
+ ExplodedNode *N = C.generateErrorNode(State);
+ if (N)
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_IndeterminatePosition, BugMessage, N));
+
+ return nullptr;
}
- // Close the File Descriptor.
- State = State->set<StreamMap>(Sym, StreamState::getClosed());
+ return State;
+}
- return true;
+ProgramStateRef
+StreamChecker::ensureFseekWhenceCorrect(SVal WhenceVal, CheckerContext &C,
+ ProgramStateRef State) const {
+ Optional<nonloc::ConcreteInt> CI = WhenceVal.getAs<nonloc::ConcreteInt>();
+ if (!CI)
+ return State;
+
+ int64_t X = CI->getValue().getSExtValue();
+ if (X >= 0 && X <= 2)
+ return State;
+
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_IllegalWhence,
+ "The whence argument to fseek() should be "
+ "SEEK_SET, SEEK_END, or SEEK_CUR.",
+ N));
+ return nullptr;
+ }
+
+ return State;
+}
+
+void StreamChecker::reportFEofWarning(CheckerContext &C,
+ ProgramStateRef State) const {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ BT_StreamEof,
+ "Read function called when stream is in EOF state. "
+ "Function has no effect.",
+ N));
+ return;
+ }
+ C.addTransition(State);
}
void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
@@ -328,7 +962,7 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
// TODO: Clean up the state.
const StreamMapTy &Map = State->get<StreamMap>();
- for (const auto &I: Map) {
+ for (const auto &I : Map) {
SymbolRef Sym = I.first;
const StreamState &SS = I.second;
if (!SymReaper.isDead(Sym) || !SS.isOpened())
@@ -338,19 +972,77 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
if (!N)
continue;
- if (!BT_ResourceLeak)
- BT_ResourceLeak.reset(
- new BuiltinBug(this, "Resource Leak",
- "Opened File never closed. Potential Resource leak."));
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *BT_ResourceLeak, BT_ResourceLeak->getDescription(), N));
+ // Do not warn for non-closed stream at program exit.
+ ExplodedNode *Pred = C.getPredecessor();
+ if (Pred && Pred->getCFGBlock() &&
+ Pred->getCFGBlock()->hasNoReturnElement())
+ continue;
+
+ // Resource leaks can result in multiple warning that describe the same kind
+ // of programming error:
+ // void f() {
+ // FILE *F = fopen("a.txt");
+ // if (rand()) // state split
+ // return; // warning
+ // } // warning
+ // While this isn't necessarily true (leaking the same stream could result
+ // from a different kinds of errors), the reduction in redundant reports
+ // makes this a worthwhile heuristic.
+ // FIXME: Add a checker option to turn this uniqueing feature off.
+
+ const ExplodedNode *StreamOpenNode = getAcquisitionSite(N, Sym, C);
+ assert(StreamOpenNode && "Could not find place of stream opening.");
+ PathDiagnosticLocation LocUsedForUniqueing =
+ PathDiagnosticLocation::createBegin(
+ StreamOpenNode->getStmtForDiagnostics(), C.getSourceManager(),
+ StreamOpenNode->getLocationContext());
+
+ std::unique_ptr<PathSensitiveBugReport> R =
+ std::make_unique<PathSensitiveBugReport>(
+ BT_ResourceLeak,
+ "Opened stream never closed. Potential resource leak.", N,
+ LocUsedForUniqueing,
+ StreamOpenNode->getLocationContext()->getDecl());
+ R->markInteresting(Sym);
+ C.emitReport(std::move(R));
+ }
+}
+
+ProgramStateRef StreamChecker::checkPointerEscape(
+ ProgramStateRef State, const InvalidatedSymbols &Escaped,
+ const CallEvent *Call, PointerEscapeKind Kind) const {
+ // Check for file-handling system call that is not handled by the checker.
+ // FIXME: The checker should be updated to handle all system calls that take
+ // 'FILE*' argument. These are now ignored.
+ if (Kind == PSK_DirectEscapeOnCall && Call->isInSystemHeader())
+ return State;
+
+ for (SymbolRef Sym : Escaped) {
+ // The symbol escaped.
+ // From now the stream can be manipulated in unknown way to the checker,
+ // it is not possible to handle it any more.
+ // Optimistically, assume that the corresponding file handle will be closed
+ // somewhere else.
+ // Remove symbol from state so the following stream calls on this symbol are
+ // not handled by the checker.
+ State = State->remove<StreamMap>(Sym);
}
+ return State;
+}
+
+void ento::registerStreamChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<StreamChecker>();
+}
+
+bool ento::shouldRegisterStreamChecker(const CheckerManager &Mgr) {
+ return true;
}
-void ento::registerStreamChecker(CheckerManager &mgr) {
- mgr.registerChecker<StreamChecker>();
+void ento::registerStreamTesterChecker(CheckerManager &Mgr) {
+ auto *Checker = Mgr.getChecker<StreamChecker>();
+ Checker->TestMode = true;
}
-bool ento::shouldRegisterStreamChecker(const LangOptions &LO) {
+bool ento::shouldRegisterStreamTesterChecker(const CheckerManager &Mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
index f81705304f3a..916977c10c0c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -63,6 +63,6 @@ void ento::registerTaintTesterChecker(CheckerManager &mgr) {
mgr.registerChecker<TaintTesterChecker>();
}
-bool ento::shouldRegisterTaintTesterChecker(const LangOptions &LO) {
+bool ento::shouldRegisterTaintTesterChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index 3663b0963692..eeec807ccee4 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -261,6 +261,6 @@ void ento::registerTestAfterDivZeroChecker(CheckerManager &mgr) {
mgr.registerChecker<TestAfterDivZeroChecker>();
}
-bool ento::shouldRegisterTestAfterDivZeroChecker(const LangOptions &LO) {
+bool ento::shouldRegisterTestAfterDivZeroChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
index 73183aa468f6..2f316bd3b20d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
@@ -64,7 +64,7 @@ void ento::registerTraversalDumper(CheckerManager &mgr) {
mgr.registerChecker<TraversalDumper>();
}
-bool ento::shouldRegisterTraversalDumper(const LangOptions &LO) {
+bool ento::shouldRegisterTraversalDumper(const CheckerManager &mgr) {
return true;
}
@@ -116,6 +116,6 @@ void ento::registerCallDumper(CheckerManager &mgr) {
mgr.registerChecker<CallDumper>();
}
-bool ento::shouldRegisterCallDumper(const LangOptions &LO) {
+bool ento::shouldRegisterCallDumper(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
index 62a4c2ab0209..5cc713172527 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
@@ -252,6 +252,6 @@ void ento::registerTrustNonnullChecker(CheckerManager &Mgr) {
Mgr.registerChecker<TrustNonnullChecker>(Mgr.getASTContext());
}
-bool ento::shouldRegisterTrustNonnullChecker(const LangOptions &LO) {
+bool ento::shouldRegisterTrustNonnullChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 247cba7dc933..3e0caaf79ca0 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -110,6 +110,6 @@ void ento::registerUndefBranchChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefBranchChecker>();
}
-bool ento::shouldRegisterUndefBranchChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefBranchChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 7b581bef3900..e457513d8de4 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -101,6 +101,6 @@ void ento::registerUndefCapturedBlockVarChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefCapturedBlockVarChecker>();
}
-bool ento::shouldRegisterUndefCapturedBlockVarChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefCapturedBlockVarChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index a2f3e0da13fb..392da4818098 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -50,10 +51,10 @@ static bool isArrayIndexOutOfBounds(CheckerContext &C, const Expr *Ex) {
return false;
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
- DefinedOrUnknownSVal NumElements = C.getStoreManager().getSizeInElements(
- state, ER->getSuperRegion(), ER->getValueType());
- ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
+ state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
+ ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
return StOutBound && !StInBound;
}
@@ -186,6 +187,6 @@ void ento::registerUndefResultChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefResultChecker>();
}
-bool ento::shouldRegisterUndefResultChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefResultChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index 2f075eaeb03b..fdefe75e8201 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -62,6 +62,6 @@ void ento::registerUndefinedArraySubscriptChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefinedArraySubscriptChecker>();
}
-bool ento::shouldRegisterUndefinedArraySubscriptChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefinedArraySubscriptChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 277a8a143328..05f8f6084c0b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -120,6 +120,6 @@ void ento::registerUndefinedAssignmentChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefinedAssignmentChecker>();
}
-bool ento::shouldRegisterUndefinedAssignmentChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUndefinedAssignmentChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
index 020df8a1bb8c..4182b51c02b0 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
@@ -541,14 +541,11 @@ static bool hasUnguardedAccess(const FieldDecl *FD, ProgramStateRef State) {
auto FieldAccessM = memberExpr(hasDeclaration(equalsNode(FD))).bind("access");
auto AssertLikeM = callExpr(callee(functionDecl(
- anyOf(hasName("exit"), hasName("panic"), hasName("error"),
- hasName("Assert"), hasName("assert"), hasName("ziperr"),
- hasName("assfail"), hasName("db_error"), hasName("__assert"),
- hasName("__assert2"), hasName("_wassert"), hasName("__assert_rtn"),
- hasName("__assert_fail"), hasName("dtrace_assfail"),
- hasName("yy_fatal_error"), hasName("_XCAssertionFailureHandler"),
- hasName("_DTAssertionFailureHandler"),
- hasName("_TSAssertionFailureHandler")))));
+ hasAnyName("exit", "panic", "error", "Assert", "assert", "ziperr",
+ "assfail", "db_error", "__assert", "__assert2", "_wassert",
+ "__assert_rtn", "__assert_fail", "dtrace_assfail",
+ "yy_fatal_error", "_XCAssertionFailureHandler",
+ "_DTAssertionFailureHandler", "_TSAssertionFailureHandler"))));
auto NoReturnFuncM = callExpr(callee(functionDecl(isNoReturn())));
@@ -602,13 +599,13 @@ std::string clang::ento::getVariableName(const FieldDecl *Field) {
llvm_unreachable("No other capture type is expected!");
}
- return Field->getName();
+ return std::string(Field->getName());
}
void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
auto Chk = Mgr.registerChecker<UninitializedObjectChecker>();
- AnalyzerOptions &AnOpts = Mgr.getAnalyzerOptions();
+ const AnalyzerOptions &AnOpts = Mgr.getAnalyzerOptions();
UninitObjCheckerOptions &ChOpts = Chk->Opts;
ChOpts.IsPedantic = AnOpts.getCheckerBooleanOption(Chk, "Pedantic");
@@ -617,7 +614,7 @@ void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
ChOpts.CheckPointeeInitialization = AnOpts.getCheckerBooleanOption(
Chk, "CheckPointeeInitialization");
ChOpts.IgnoredRecordsWithFieldPattern =
- AnOpts.getCheckerStringOption(Chk, "IgnoreRecordsWithField");
+ std::string(AnOpts.getCheckerStringOption(Chk, "IgnoreRecordsWithField"));
ChOpts.IgnoreGuardedFields =
AnOpts.getCheckerBooleanOption(Chk, "IgnoreGuardedFields");
@@ -628,6 +625,6 @@ void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
"\"" + ErrorMsg + "\"");
}
-bool ento::shouldRegisterUninitializedObjectChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUninitializedObjectChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index f4e225d836f3..381334de068e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -503,7 +504,7 @@ void UnixAPIPortabilityChecker::checkPreStmt(const CallExpr *CE,
mgr.registerChecker<CHECKERNAME>(); \
} \
\
- bool ento::shouldRegister##CHECKERNAME(const LangOptions &LO) { \
+ bool ento::shouldRegister##CHECKERNAME(const CheckerManager &mgr) { \
return true; \
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 65dd82675df9..74eec81ffb3e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -257,6 +257,6 @@ void ento::registerUnreachableCodeChecker(CheckerManager &mgr) {
mgr.registerChecker<UnreachableCodeChecker>();
}
-bool ento::shouldRegisterUnreachableCodeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterUnreachableCodeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index b92757312dc6..d76b2a06aba5 100644
--- a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -14,12 +14,13 @@
//===----------------------------------------------------------------------===//
#include "Taint.h"
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -29,9 +30,28 @@ using namespace ento;
using namespace taint;
namespace {
-class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
+class VLASizeChecker
+ : public Checker<check::PreStmt<DeclStmt>,
+ check::PreStmt<UnaryExprOrTypeTraitExpr>> {
mutable std::unique_ptr<BugType> BT;
- enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted, VLA_Negative };
+ enum VLASize_Kind {
+ VLA_Garbage,
+ VLA_Zero,
+ VLA_Tainted,
+ VLA_Negative,
+ VLA_Overflow
+ };
+
+ /// Check a VLA for validity.
+ /// Every dimension of the array and the total size is checked for validity.
+ /// Returns null or a new state where the size is validated.
+ /// 'ArraySize' will contain SVal that refers to the total size (in char)
+ /// of the array.
+ ProgramStateRef checkVLA(CheckerContext &C, ProgramStateRef State,
+ const VariableArrayType *VLA, SVal &ArraySize) const;
+ /// Check a single VLA index size expression for validity.
+ ProgramStateRef checkVLAIndexSize(CheckerContext &C, ProgramStateRef State,
+ const Expr *SizeE) const;
void reportBug(VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
CheckerContext &C,
@@ -39,9 +59,155 @@ class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
public:
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
+ void checkPreStmt(const UnaryExprOrTypeTraitExpr *UETTE,
+ CheckerContext &C) const;
};
} // end anonymous namespace
+ProgramStateRef VLASizeChecker::checkVLA(CheckerContext &C,
+ ProgramStateRef State,
+ const VariableArrayType *VLA,
+ SVal &ArraySize) const {
+ assert(VLA && "Function should be called with non-null VLA argument.");
+
+ const VariableArrayType *VLALast = nullptr;
+ llvm::SmallVector<const Expr *, 2> VLASizes;
+
+ // Walk over the VLAs for every dimension until a non-VLA is found.
+ // There is a VariableArrayType for every dimension (fixed or variable) until
+ // the most inner array that is variably modified.
+ // Dimension sizes are collected into 'VLASizes'. 'VLALast' is set to the
+ // innermost VLA that was encountered.
+ // In "int vla[x][2][y][3]" this will be the array for index "y" (with type
+ // int[3]). 'VLASizes' contains 'x', '2', and 'y'.
+ while (VLA) {
+ const Expr *SizeE = VLA->getSizeExpr();
+ State = checkVLAIndexSize(C, State, SizeE);
+ if (!State)
+ return nullptr;
+ VLASizes.push_back(SizeE);
+ VLALast = VLA;
+ VLA = C.getASTContext().getAsVariableArrayType(VLA->getElementType());
+ };
+ assert(VLALast &&
+ "Array should have at least one variably-modified dimension.");
+
+ ASTContext &Ctx = C.getASTContext();
+ SValBuilder &SVB = C.getSValBuilder();
+ CanQualType SizeTy = Ctx.getSizeType();
+ uint64_t SizeMax =
+ SVB.getBasicValueFactory().getMaxValue(SizeTy).getZExtValue();
+
+ // Get the element size.
+ CharUnits EleSize = Ctx.getTypeSizeInChars(VLALast->getElementType());
+ NonLoc ArrSize =
+ SVB.makeIntVal(EleSize.getQuantity(), SizeTy).castAs<NonLoc>();
+
+ // Try to calculate the known real size of the array in KnownSize.
+ uint64_t KnownSize = 0;
+ if (const llvm::APSInt *KV = SVB.getKnownValue(State, ArrSize))
+ KnownSize = KV->getZExtValue();
+
+ for (const Expr *SizeE : VLASizes) {
+ auto SizeD = C.getSVal(SizeE).castAs<DefinedSVal>();
+ // Convert the array length to size_t.
+ NonLoc IndexLength =
+ SVB.evalCast(SizeD, SizeTy, SizeE->getType()).castAs<NonLoc>();
+ // Multiply the array length by the element size.
+ SVal Mul = SVB.evalBinOpNN(State, BO_Mul, ArrSize, IndexLength, SizeTy);
+ if (auto MulNonLoc = Mul.getAs<NonLoc>())
+ ArrSize = *MulNonLoc;
+ else
+ // Extent could not be determined.
+ return State;
+
+ if (const llvm::APSInt *IndexLVal = SVB.getKnownValue(State, IndexLength)) {
+ // Check if the array size will overflow.
+ // Size overflow check does not work with symbolic expressions because a
+ // overflow situation can not be detected easily.
+ uint64_t IndexL = IndexLVal->getZExtValue();
+ // FIXME: See https://reviews.llvm.org/D80903 for discussion of
+ // some difference in assume and getKnownValue that leads to
+ // unexpected behavior. Just bail on IndexL == 0 at this point.
+ if (IndexL == 0)
+ return nullptr;
+
+ if (KnownSize <= SizeMax / IndexL) {
+ KnownSize *= IndexL;
+ } else {
+ // Array size does not fit into size_t.
+ reportBug(VLA_Overflow, SizeE, State, C);
+ return nullptr;
+ }
+ } else {
+ KnownSize = 0;
+ }
+ }
+
+ ArraySize = ArrSize;
+
+ return State;
+}
+
+ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
+ ProgramStateRef State,
+ const Expr *SizeE) const {
+ SVal SizeV = C.getSVal(SizeE);
+
+ if (SizeV.isUndef()) {
+ reportBug(VLA_Garbage, SizeE, State, C);
+ return nullptr;
+ }
+
+ // See if the size value is known. It can't be undefined because we would have
+ // warned about that already.
+ if (SizeV.isUnknown())
+ return nullptr;
+
+ // Check if the size is tainted.
+ if (isTainted(State, SizeV)) {
+ reportBug(VLA_Tainted, SizeE, nullptr, C,
+ std::make_unique<TaintBugVisitor>(SizeV));
+ return nullptr;
+ }
+
+ // Check if the size is zero.
+ DefinedSVal SizeD = SizeV.castAs<DefinedSVal>();
+
+ ProgramStateRef StateNotZero, StateZero;
+ std::tie(StateNotZero, StateZero) = State->assume(SizeD);
+
+ if (StateZero && !StateNotZero) {
+ reportBug(VLA_Zero, SizeE, StateZero, C);
+ return nullptr;
+ }
+
+ // From this point on, assume that the size is not zero.
+ State = StateNotZero;
+
+ // Check if the size is negative.
+ SValBuilder &SVB = C.getSValBuilder();
+
+ QualType SizeTy = SizeE->getType();
+ DefinedOrUnknownSVal Zero = SVB.makeZeroVal(SizeTy);
+
+ SVal LessThanZeroVal = SVB.evalBinOp(State, BO_LT, SizeD, Zero, SizeTy);
+ if (Optional<DefinedSVal> LessThanZeroDVal =
+ LessThanZeroVal.getAs<DefinedSVal>()) {
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef StatePos, StateNeg;
+
+ std::tie(StateNeg, StatePos) = CM.assumeDual(State, *LessThanZeroDVal);
+ if (StateNeg && !StatePos) {
+ reportBug(VLA_Negative, SizeE, State, C);
+ return nullptr;
+ }
+ State = StatePos;
+ }
+
+ return State;
+}
+
void VLASizeChecker::reportBug(
VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
CheckerContext &C, std::unique_ptr<BugReporterVisitor> Visitor) const {
@@ -70,6 +236,9 @@ void VLASizeChecker::reportBug(
case VLA_Negative:
os << "has negative size";
break;
+ case VLA_Overflow:
+ os << "has too large size";
+ break;
}
auto report = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
@@ -83,108 +252,89 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (!DS->isSingleDecl())
return;
- const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
- if (!VD)
- return;
-
ASTContext &Ctx = C.getASTContext();
- const VariableArrayType *VLA = Ctx.getAsVariableArrayType(VD->getType());
- if (!VLA)
- return;
+ SValBuilder &SVB = C.getSValBuilder();
+ ProgramStateRef State = C.getState();
+ QualType TypeToCheck;
- // FIXME: Handle multi-dimensional VLAs.
- const Expr *SE = VLA->getSizeExpr();
- ProgramStateRef state = C.getState();
- SVal sizeV = C.getSVal(SE);
+ const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
- if (sizeV.isUndef()) {
- reportBug(VLA_Garbage, SE, state, C);
+ if (VD)
+ TypeToCheck = VD->getType().getCanonicalType();
+ else if (const auto *TND = dyn_cast<TypedefNameDecl>(DS->getSingleDecl()))
+ TypeToCheck = TND->getUnderlyingType().getCanonicalType();
+ else
return;
- }
- // See if the size value is known. It can't be undefined because we would have
- // warned about that already.
- if (sizeV.isUnknown())
+ const VariableArrayType *VLA = Ctx.getAsVariableArrayType(TypeToCheck);
+ if (!VLA)
return;
- // Check if the size is tainted.
- if (isTainted(state, sizeV)) {
- reportBug(VLA_Tainted, SE, nullptr, C,
- std::make_unique<TaintBugVisitor>(sizeV));
- return;
- }
+ // Check the VLA sizes for validity.
- // Check if the size is zero.
- DefinedSVal sizeD = sizeV.castAs<DefinedSVal>();
+ SVal ArraySize;
- ProgramStateRef stateNotZero, stateZero;
- std::tie(stateNotZero, stateZero) = state->assume(sizeD);
+ State = checkVLA(C, State, VLA, ArraySize);
+ if (!State)
+ return;
- if (stateZero && !stateNotZero) {
- reportBug(VLA_Zero, SE, stateZero, C);
+ auto ArraySizeNL = ArraySize.getAs<NonLoc>();
+ if (!ArraySizeNL) {
+ // Array size could not be determined but state may contain new assumptions.
+ C.addTransition(State);
return;
}
- // From this point on, assume that the size is not zero.
- state = stateNotZero;
-
// VLASizeChecker is responsible for defining the extent of the array being
// declared. We do this by multiplying the array length by the element size,
// then matching that with the array region's extent symbol.
- // Check if the size is negative.
- SValBuilder &svalBuilder = C.getSValBuilder();
+ if (VD) {
+ // Assume that the array's size matches the region size.
+ const LocationContext *LC = C.getLocationContext();
+ DefinedOrUnknownSVal DynSize =
+ getDynamicSize(State, State->getRegion(VD, LC), SVB);
- QualType Ty = SE->getType();
- DefinedOrUnknownSVal Zero = svalBuilder.makeZeroVal(Ty);
+ DefinedOrUnknownSVal SizeIsKnown = SVB.evalEQ(State, DynSize, *ArraySizeNL);
+ State = State->assume(SizeIsKnown, true);
- SVal LessThanZeroVal = svalBuilder.evalBinOp(state, BO_LT, sizeD, Zero, Ty);
- if (Optional<DefinedSVal> LessThanZeroDVal =
- LessThanZeroVal.getAs<DefinedSVal>()) {
- ConstraintManager &CM = C.getConstraintManager();
- ProgramStateRef StatePos, StateNeg;
-
- std::tie(StateNeg, StatePos) = CM.assumeDual(state, *LessThanZeroDVal);
- if (StateNeg && !StatePos) {
- reportBug(VLA_Negative, SE, state, C);
- return;
- }
- state = StatePos;
+ // Assume should not fail at this point.
+ assert(State);
}
- // Convert the array length to size_t.
- QualType SizeTy = Ctx.getSizeType();
- NonLoc ArrayLength =
- svalBuilder.evalCast(sizeD, SizeTy, SE->getType()).castAs<NonLoc>();
+ // Remember our assumptions!
+ C.addTransition(State);
+}
- // Get the element size.
- CharUnits EleSize = Ctx.getTypeSizeInChars(VLA->getElementType());
- SVal EleSizeVal = svalBuilder.makeIntVal(EleSize.getQuantity(), SizeTy);
+void VLASizeChecker::checkPreStmt(const UnaryExprOrTypeTraitExpr *UETTE,
+ CheckerContext &C) const {
+ // Want to check for sizeof.
+ if (UETTE->getKind() != UETT_SizeOf)
+ return;
- // Multiply the array length by the element size.
- SVal ArraySizeVal = svalBuilder.evalBinOpNN(
- state, BO_Mul, ArrayLength, EleSizeVal.castAs<NonLoc>(), SizeTy);
+ // Ensure a type argument.
+ if (!UETTE->isArgumentType())
+ return;
- // Finally, assume that the array's extent matches the given size.
- const LocationContext *LC = C.getLocationContext();
- DefinedOrUnknownSVal Extent =
- state->getRegion(VD, LC)->getExtent(svalBuilder);
- DefinedOrUnknownSVal ArraySize = ArraySizeVal.castAs<DefinedOrUnknownSVal>();
- DefinedOrUnknownSVal sizeIsKnown =
- svalBuilder.evalEQ(state, Extent, ArraySize);
- state = state->assume(sizeIsKnown, true);
+ const VariableArrayType *VLA = C.getASTContext().getAsVariableArrayType(
+ UETTE->getTypeOfArgument().getCanonicalType());
+ // Ensure that the type is a VLA.
+ if (!VLA)
+ return;
- // Assume should not fail at this point.
- assert(state);
+ ProgramStateRef State = C.getState();
+ SVal ArraySize;
+ State = checkVLA(C, State, VLA, ArraySize);
+ if (!State)
+ return;
- // Remember our assumptions!
- C.addTransition(state);
+ C.addTransition(State);
}
void ento::registerVLASizeChecker(CheckerManager &mgr) {
mgr.registerChecker<VLASizeChecker>();
}
-bool ento::shouldRegisterVLASizeChecker(const LangOptions &LO) {
+bool ento::shouldRegisterVLASizeChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
index a3610514a924..dde5912b6d6e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -404,7 +404,7 @@ void ento::registerValistBase(CheckerManager &mgr) {
mgr.registerChecker<ValistChecker>();
}
-bool ento::shouldRegisterValistBase(const LangOptions &LO) {
+bool ento::shouldRegisterValistBase(const CheckerManager &mgr) {
return true;
}
@@ -416,7 +416,7 @@ bool ento::shouldRegisterValistBase(const LangOptions &LO) {
mgr.getCurrentCheckerName(); \
} \
\
- bool ento::shouldRegister##name##Checker(const LangOptions &LO) { \
+ bool ento::shouldRegister##name##Checker(const CheckerManager &mgr) { \
return true; \
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
index 6724eead5072..8f147026ae19 100644
--- a/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
@@ -98,12 +98,13 @@ bool VforkChecker::isCallWhitelisted(const IdentifierInfo *II,
if (VforkWhitelist.empty()) {
// According to manpage.
const char *ids[] = {
- "_exit",
"_Exit",
+ "_exit",
"execl",
- "execlp",
"execle",
+ "execlp",
"execv",
+ "execve",
"execvp",
"execvpe",
nullptr
@@ -216,6 +217,6 @@ void ento::registerVforkChecker(CheckerManager &mgr) {
mgr.registerChecker<VforkChecker>();
}
-bool ento::shouldRegisterVforkChecker(const LangOptions &LO) {
+bool ento::shouldRegisterVforkChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index fd93fc33115f..f49ee5fa5ad3 100644
--- a/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -224,14 +224,17 @@ void ento::registerVirtualCallChecker(CheckerManager &Mgr) {
}
}
-bool ento::shouldRegisterVirtualCallModeling(const LangOptions &LO) {
+bool ento::shouldRegisterVirtualCallModeling(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
-bool ento::shouldRegisterPureVirtualCallChecker(const LangOptions &LO) {
+bool ento::shouldRegisterPureVirtualCallChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
-bool ento::shouldRegisterVirtualCallChecker(const LangOptions &LO) {
+bool ento::shouldRegisterVirtualCallChecker(const CheckerManager &mgr) {
+ const LangOptions &LO = mgr.getLangOpts();
return LO.CPlusPlus;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
new file mode 100644
index 000000000000..34c072ac2241
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
@@ -0,0 +1,93 @@
+//=======- ASTUtils.cpp ------------------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+
+using llvm::Optional;
+namespace clang {
+
+std::pair<const Expr *, bool>
+tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
+ while (E) {
+ if (auto *cast = dyn_cast<CastExpr>(E)) {
+ if (StopAtFirstRefCountedObj) {
+ if (auto *ConversionFunc =
+ dyn_cast_or_null<FunctionDecl>(cast->getConversionFunction())) {
+ if (isCtorOfRefCounted(ConversionFunc))
+ return {E, true};
+ }
+ }
+ // FIXME: This can give false "origin" that would lead to false negatives
+ // in checkers. See https://reviews.llvm.org/D37023 for reference.
+ E = cast->getSubExpr();
+ continue;
+ }
+ if (auto *call = dyn_cast<CallExpr>(E)) {
+ if (auto *memberCall = dyn_cast<CXXMemberCallExpr>(call)) {
+ if (isGetterOfRefCounted(memberCall->getMethodDecl())) {
+ E = memberCall->getImplicitObjectArgument();
+ if (StopAtFirstRefCountedObj) {
+ return {E, true};
+ }
+ continue;
+ }
+ }
+
+ if (auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (operatorCall->getNumArgs() == 1) {
+ E = operatorCall->getArg(0);
+ continue;
+ }
+ }
+
+ if (auto *callee = call->getDirectCallee()) {
+ if (isCtorOfRefCounted(callee)) {
+ if (StopAtFirstRefCountedObj)
+ return {E, true};
+
+ E = call->getArg(0);
+ continue;
+ }
+
+ if (isPtrConversion(callee)) {
+ E = call->getArg(0);
+ continue;
+ }
+ }
+ }
+ if (auto *unaryOp = dyn_cast<UnaryOperator>(E)) {
+ // FIXME: Currently accepts ANY unary operator. Is it OK?
+ E = unaryOp->getSubExpr();
+ continue;
+ }
+
+ break;
+ }
+ // Some other expression.
+ return {E, false};
+}
+
+bool isASafeCallArg(const Expr *E) {
+ assert(E);
+ if (auto *Ref = dyn_cast<DeclRefExpr>(E)) {
+ if (auto *D = dyn_cast_or_null<VarDecl>(Ref->getFoundDecl())) {
+ if (isa<ParmVarDecl>(D) || D->isLocalVarDecl())
+ return true;
+ }
+ }
+
+ // TODO: checker for method calls on non-refcounted objects
+ return isa<CXXThisExpr>(E);
+}
+
+} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
new file mode 100644
index 000000000000..ed4577755457
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
@@ -0,0 +1,84 @@
+//=======- ASTUtis.h ---------------------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYZER_WEBKIT_ASTUTILS_H
+#define LLVM_CLANG_ANALYZER_WEBKIT_ASTUTILS_H
+
+#include "clang/AST/Decl.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/Support/Casting.h"
+
+#include <string>
+#include <utility>
+
+namespace clang {
+class CXXRecordDecl;
+class CXXBaseSpecifier;
+class FunctionDecl;
+class CXXMethodDecl;
+class Expr;
+
+/// This function de-facto defines a set of transformations that we consider
+/// safe (in heuristical sense). These transformation if passed a safe value as
+/// an input should provide a safe value (or an object that provides safe
+/// values).
+///
+/// For more context see Static Analyzer checkers documentation - specifically
+/// webkit.UncountedCallArgsChecker checker. Whitelist of transformations:
+/// - constructors of ref-counted types (including factory methods)
+/// - getters of ref-counted types
+/// - member overloaded operators
+/// - casts
+/// - unary operators like ``&`` or ``*``
+///
+/// If passed expression is of type uncounted pointer/reference we try to find
+/// the "origin" of the pointer value.
+/// Origin can be for example a local variable, nullptr, constant or
+/// this-pointer.
+///
+/// Certain subexpression nodes represent transformations that don't affect
+/// where the memory address originates from. We try to traverse such
+/// subexpressions to get to the relevant child nodes. Whenever we encounter a
+/// subexpression that either can't be ignored, we don't model its semantics or
+/// that has multiple children we stop.
+///
+/// \p E is an expression of uncounted pointer/reference type.
+/// If \p StopAtFirstRefCountedObj is true and we encounter a subexpression that
+/// represents ref-counted object during the traversal we return relevant
+/// sub-expression and true.
+///
+/// \returns subexpression that we traversed to and if \p
+/// StopAtFirstRefCountedObj is true we also return whether we stopped early.
+std::pair<const clang::Expr *, bool>
+tryToFindPtrOrigin(const clang::Expr *E, bool StopAtFirstRefCountedObj);
+
+/// For \p E referring to a ref-countable/-counted pointer/reference we return
+/// whether it's a safe call argument. Examples: function parameter or
+/// this-pointer. The logic relies on the set of recursive rules we enforce for
+/// WebKit codebase.
+///
+/// \returns Whether \p E is a safe call arugment.
+bool isASafeCallArg(const clang::Expr *E);
+
+/// \returns name of AST node or empty string.
+template <typename T> std::string safeGetName(const T *ASTNode) {
+ const auto *const ND = llvm::dyn_cast_or_null<clang::NamedDecl>(ASTNode);
+ if (!ND)
+ return "";
+
+ // In case F is for example "operator|" the getName() method below would
+ // assert.
+ if (!ND->getDeclName().isIdentifier())
+ return "";
+
+ return ND->getName().str();
+}
+
+} // namespace clang
+
+#endif
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/DiagOutputUtils.h b/clang/lib/StaticAnalyzer/Checkers/WebKit/DiagOutputUtils.h
new file mode 100644
index 000000000000..781a8d746001
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/DiagOutputUtils.h
@@ -0,0 +1,36 @@
+//=======- DiagOutputUtils.h -------------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYZER_WEBKIT_DIAGPRINTUTILS_H
+#define LLVM_CLANG_ANALYZER_WEBKIT_DIAGPRINTUTILS_H
+
+#include "clang/AST/Decl.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+template <typename NamedDeclDerivedT>
+void printQuotedQualifiedName(llvm::raw_ostream &Os,
+ const NamedDeclDerivedT &D) {
+ Os << "'";
+ D->getNameForDiagnostic(Os, D->getASTContext().getPrintingPolicy(),
+ /*Qualified=*/true);
+ Os << "'";
+}
+
+template <typename NamedDeclDerivedT>
+void printQuotedName(llvm::raw_ostream &Os, const NamedDeclDerivedT &D) {
+ Os << "'";
+ D->getNameForDiagnostic(Os, D->getASTContext().getPrintingPolicy(),
+ /*Qualified=*/false);
+ Os << "'";
+}
+
+} // namespace clang
+
+#endif
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
new file mode 100644
index 000000000000..3956db933b35
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
@@ -0,0 +1,155 @@
+//=======- NoUncountedMembersChecker.cpp -------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTUtils.h"
+#include "DiagOutputUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/Casting.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class NoUncountedMemberChecker
+ : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+private:
+ BugType Bug;
+ mutable BugReporter *BR;
+
+public:
+ NoUncountedMemberChecker()
+ : Bug(this,
+ "Member variable is a raw-poiner/reference to reference-countable "
+ "type",
+ "WebKit coding guidelines") {}
+
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const NoUncountedMemberChecker *Checker;
+ explicit LocalVisitor(const NoUncountedMemberChecker *Checker)
+ : Checker(Checker) {
+ assert(Checker);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+
+ bool VisitRecordDecl(const RecordDecl *RD) {
+ Checker->visitRecordDecl(RD);
+ return true;
+ }
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ void visitRecordDecl(const RecordDecl *RD) const {
+ if (shouldSkipDecl(RD))
+ return;
+
+ for (auto Member : RD->fields()) {
+ const Type *MemberType = Member->getType().getTypePtrOrNull();
+ if (!MemberType)
+ continue;
+
+ if (auto *MemberCXXRD = MemberType->getPointeeCXXRecordDecl()) {
+ // If we don't see the definition we just don't know.
+ if (MemberCXXRD->hasDefinition() && isRefCountable(MemberCXXRD))
+ reportBug(Member, MemberType, MemberCXXRD, RD);
+ }
+ }
+ }
+
+ bool shouldSkipDecl(const RecordDecl *RD) const {
+ if (!RD->isThisDeclarationADefinition())
+ return true;
+
+ if (RD->isImplicit())
+ return true;
+
+ if (RD->isLambda())
+ return true;
+
+ // If the construct doesn't have a source file, then it's not something
+ // we want to diagnose.
+ const auto RDLocation = RD->getLocation();
+ if (!RDLocation.isValid())
+ return true;
+
+ const auto Kind = RD->getTagKind();
+ // FIMXE: Should we check union members too?
+ if (Kind != TTK_Struct && Kind != TTK_Class)
+ return true;
+
+ // Ignore CXXRecords that come from system headers.
+ if (BR->getSourceManager().isInSystemHeader(RDLocation))
+ return true;
+
+ // Ref-counted smartpointers actually have raw-pointer to uncounted type as
+ // a member but we trust them to handle it correctly.
+ auto CXXRD = llvm::dyn_cast_or_null<CXXRecordDecl>(RD);
+ if (CXXRD)
+ return isRefCounted(CXXRD);
+
+ return false;
+ }
+
+ void reportBug(const FieldDecl *Member, const Type *MemberType,
+ const CXXRecordDecl *MemberCXXRD,
+ const RecordDecl *ClassCXXRD) const {
+ assert(Member);
+ assert(MemberType);
+ assert(MemberCXXRD);
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ Os << "Member variable ";
+ printQuotedName(Os, Member);
+ Os << " in ";
+ printQuotedQualifiedName(Os, ClassCXXRD);
+ Os << " is a "
+ << (isa<PointerType>(MemberType) ? "raw pointer" : "reference")
+ << " to ref-countable type ";
+ printQuotedQualifiedName(Os, MemberCXXRD);
+ Os << "; member variables must be ref-counted.";
+
+ PathDiagnosticLocation BSLoc(Member->getSourceRange().getBegin(),
+ BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(Member->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
+};
+} // namespace
+
+void ento::registerNoUncountedMemberChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<NoUncountedMemberChecker>();
+}
+
+bool ento::shouldRegisterNoUncountedMemberChecker(
+ const CheckerManager &Mgr) {
+ return true;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
new file mode 100644
index 000000000000..168cfd511170
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -0,0 +1,172 @@
+//=======- PtrTypesSemantics.cpp ---------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "PtrTypesSemantics.h"
+#include "ASTUtils.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+
+using llvm::Optional;
+using namespace clang;
+
+namespace {
+
+bool hasPublicRefAndDeref(const CXXRecordDecl *R) {
+ assert(R);
+
+ bool hasRef = false;
+ bool hasDeref = false;
+ for (const CXXMethodDecl *MD : R->methods()) {
+ const auto MethodName = safeGetName(MD);
+
+ if (MethodName == "ref" && MD->getAccess() == AS_public) {
+ if (hasDeref)
+ return true;
+ hasRef = true;
+ } else if (MethodName == "deref" && MD->getAccess() == AS_public) {
+ if (hasRef)
+ return true;
+ hasDeref = true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+namespace clang {
+
+const CXXRecordDecl *isRefCountable(const CXXBaseSpecifier *Base) {
+ assert(Base);
+
+ const Type *T = Base->getType().getTypePtrOrNull();
+ if (!T)
+ return nullptr;
+
+ const CXXRecordDecl *R = T->getAsCXXRecordDecl();
+ if (!R)
+ return nullptr;
+
+ return hasPublicRefAndDeref(R) ? R : nullptr;
+}
+
+bool isRefCountable(const CXXRecordDecl *R) {
+ assert(R);
+
+ R = R->getDefinition();
+ assert(R);
+
+ if (hasPublicRefAndDeref(R))
+ return true;
+
+ CXXBasePaths Paths;
+ Paths.setOrigin(const_cast<CXXRecordDecl *>(R));
+
+ const auto isRefCountableBase = [](const CXXBaseSpecifier *Base,
+ CXXBasePath &) {
+ return clang::isRefCountable(Base);
+ };
+
+ return R->lookupInBases(isRefCountableBase, Paths,
+ /*LookupInDependent =*/true);
+}
+
+bool isCtorOfRefCounted(const clang::FunctionDecl *F) {
+ assert(F);
+ const auto &FunctionName = safeGetName(F);
+
+ return FunctionName == "Ref" || FunctionName == "makeRef"
+
+ || FunctionName == "RefPtr" || FunctionName == "makeRefPtr"
+
+ || FunctionName == "UniqueRef" || FunctionName == "makeUniqueRef" ||
+ FunctionName == "makeUniqueRefWithoutFastMallocCheck"
+
+ || FunctionName == "String" || FunctionName == "AtomString" ||
+ FunctionName == "UniqueString"
+ // FIXME: Implement as attribute.
+ || FunctionName == "Identifier";
+}
+
+bool isUncounted(const CXXRecordDecl *Class) {
+ // Keep isRefCounted first as it's cheaper.
+ return !isRefCounted(Class) && isRefCountable(Class);
+}
+
+bool isUncountedPtr(const Type *T) {
+ assert(T);
+
+ if (T->isPointerType() || T->isReferenceType()) {
+ if (auto *CXXRD = T->getPointeeCXXRecordDecl()) {
+ return isUncounted(CXXRD);
+ }
+ }
+ return false;
+}
+
+bool isGetterOfRefCounted(const CXXMethodDecl *M) {
+ assert(M);
+
+ if (isa<CXXMethodDecl>(M)) {
+ const CXXRecordDecl *calleeMethodsClass = M->getParent();
+ auto className = safeGetName(calleeMethodsClass);
+ auto methodName = safeGetName(M);
+
+ if (((className == "Ref" || className == "RefPtr") &&
+ methodName == "get") ||
+ ((className == "String" || className == "AtomString" ||
+ className == "AtomStringImpl" || className == "UniqueString" ||
+ className == "UniqueStringImpl" || className == "Identifier") &&
+ methodName == "impl"))
+ return true;
+
+ // Ref<T> -> T conversion
+ // FIXME: Currently allowing any Ref<T> -> whatever cast.
+ if (className == "Ref" || className == "RefPtr") {
+ if (auto *maybeRefToRawOperator = dyn_cast<CXXConversionDecl>(M)) {
+ if (auto *targetConversionType =
+ maybeRefToRawOperator->getConversionType().getTypePtrOrNull()) {
+ if (isUncountedPtr(targetConversionType)) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
+
+bool isRefCounted(const CXXRecordDecl *R) {
+ assert(R);
+ if (auto *TmplR = R->getTemplateInstantiationPattern()) {
+ // FIXME: String/AtomString/UniqueString
+ const auto &ClassName = safeGetName(TmplR);
+ return ClassName == "RefPtr" || ClassName == "Ref";
+ }
+ return false;
+}
+
+bool isPtrConversion(const FunctionDecl *F) {
+ assert(F);
+ if (isCtorOfRefCounted(F))
+ return true;
+
+ // FIXME: check # of params == 1
+ const auto FunctionName = safeGetName(F);
+ if (FunctionName == "getPtr" || FunctionName == "WeakPtr" ||
+ FunctionName == "makeWeakPtr"
+
+ || FunctionName == "downcast" || FunctionName == "bitwise_cast")
+ return true;
+
+ return false;
+}
+
+} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
new file mode 100644
index 000000000000..83d9c0bcc13b
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
@@ -0,0 +1,59 @@
+//=======- PtrTypesSemantics.cpp ---------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYZER_WEBKIT_PTRTYPESEMANTICS_H
+#define LLVM_CLANG_ANALYZER_WEBKIT_PTRTYPESEMANTICS_H
+
+namespace clang {
+class CXXBaseSpecifier;
+class CXXMethodDecl;
+class CXXRecordDecl;
+class Expr;
+class FunctionDecl;
+class Type;
+
+// Ref-countability of a type is implicitly defined by Ref<T> and RefPtr<T>
+// implementation. It can be modeled as: type T having public methods ref() and
+// deref()
+
+// In WebKit there are two ref-counted templated smart pointers: RefPtr<T> and
+// Ref<T>.
+
+/// \returns CXXRecordDecl of the base if the type is ref-countable, nullptr if
+/// not.
+const clang::CXXRecordDecl *isRefCountable(const clang::CXXBaseSpecifier *Base);
+
+/// \returns true if \p Class is ref-countable, false if not.
+/// Asserts that \p Class IS a definition.
+bool isRefCountable(const clang::CXXRecordDecl *Class);
+
+/// \returns true if \p Class is ref-counted, false if not.
+bool isRefCounted(const clang::CXXRecordDecl *Class);
+
+/// \returns true if \p Class is ref-countable AND not ref-counted, false if
+/// not. Asserts that \p Class IS a definition.
+bool isUncounted(const clang::CXXRecordDecl *Class);
+
+/// \returns true if \p T is either a raw pointer or reference to an uncounted
+/// class, false if not.
+bool isUncountedPtr(const clang::Type *T);
+
+/// \returns true if \p F creates ref-countable object from uncounted parameter,
+/// false if not.
+bool isCtorOfRefCounted(const clang::FunctionDecl *F);
+
+/// \returns true if \p M is getter of a ref-counted class, false if not.
+bool isGetterOfRefCounted(const clang::CXXMethodDecl *Method);
+
+/// \returns true if \p F is a conversion between ref-countable or ref-counted
+/// pointer types.
+bool isPtrConversion(const FunctionDecl *F);
+
+} // namespace clang
+
+#endif
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
new file mode 100644
index 000000000000..81ce284c2dc7
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
@@ -0,0 +1,167 @@
+//=======- RefCntblBaseVirtualDtor.cpp ---------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DiagOutputUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class RefCntblBaseVirtualDtorChecker
+ : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+private:
+ BugType Bug;
+ mutable BugReporter *BR;
+
+public:
+ RefCntblBaseVirtualDtorChecker()
+ : Bug(this,
+ "Reference-countable base class doesn't have virtual destructor",
+ "WebKit coding guidelines") {}
+
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const RefCntblBaseVirtualDtorChecker *Checker;
+ explicit LocalVisitor(const RefCntblBaseVirtualDtorChecker *Checker)
+ : Checker(Checker) {
+ assert(Checker);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+
+ bool VisitCXXRecordDecl(const CXXRecordDecl *RD) {
+ Checker->visitCXXRecordDecl(RD);
+ return true;
+ }
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ void visitCXXRecordDecl(const CXXRecordDecl *RD) const {
+ if (shouldSkipDecl(RD))
+ return;
+
+ CXXBasePaths Paths;
+ Paths.setOrigin(RD);
+
+ const CXXBaseSpecifier *ProblematicBaseSpecifier = nullptr;
+ const CXXRecordDecl *ProblematicBaseClass = nullptr;
+
+ const auto IsPublicBaseRefCntblWOVirtualDtor =
+ [RD, &ProblematicBaseSpecifier,
+ &ProblematicBaseClass](const CXXBaseSpecifier *Base, CXXBasePath &) {
+ const auto AccSpec = Base->getAccessSpecifier();
+ if (AccSpec == AS_protected || AccSpec == AS_private ||
+ (AccSpec == AS_none && RD->isClass()))
+ return false;
+
+ llvm::Optional<const clang::CXXRecordDecl *> MaybeRefCntblBaseRD =
+ isRefCountable(Base);
+ if (!MaybeRefCntblBaseRD.hasValue())
+ return false;
+
+ const CXXRecordDecl *RefCntblBaseRD = MaybeRefCntblBaseRD.getValue();
+ if (!RefCntblBaseRD)
+ return false;
+
+ const auto *Dtor = RefCntblBaseRD->getDestructor();
+ if (!Dtor || !Dtor->isVirtual()) {
+ ProblematicBaseSpecifier = Base;
+ ProblematicBaseClass = RefCntblBaseRD;
+ return true;
+ }
+
+ return false;
+ };
+
+ if (RD->lookupInBases(IsPublicBaseRefCntblWOVirtualDtor, Paths,
+ /*LookupInDependent =*/true)) {
+ reportBug(RD, ProblematicBaseSpecifier, ProblematicBaseClass);
+ }
+ }
+
+ bool shouldSkipDecl(const CXXRecordDecl *RD) const {
+ if (!RD->isThisDeclarationADefinition())
+ return true;
+
+ if (RD->isImplicit())
+ return true;
+
+ if (RD->isLambda())
+ return true;
+
+ // If the construct doesn't have a source file, then it's not something
+ // we want to diagnose.
+ const auto RDLocation = RD->getLocation();
+ if (!RDLocation.isValid())
+ return true;
+
+ const auto Kind = RD->getTagKind();
+ if (Kind != TTK_Struct && Kind != TTK_Class)
+ return true;
+
+ // Ignore CXXRecords that come from system headers.
+ if (BR->getSourceManager().getFileCharacteristic(RDLocation) !=
+ SrcMgr::C_User)
+ return true;
+
+ return false;
+ }
+
+ void reportBug(const CXXRecordDecl *DerivedClass,
+ const CXXBaseSpecifier *BaseSpec,
+ const CXXRecordDecl *ProblematicBaseClass) const {
+ assert(DerivedClass);
+ assert(BaseSpec);
+ assert(ProblematicBaseClass);
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ Os << (ProblematicBaseClass->isClass() ? "Class" : "Struct") << " ";
+ printQuotedQualifiedName(Os, ProblematicBaseClass);
+
+ Os << " is used as a base of "
+ << (DerivedClass->isClass() ? "class" : "struct") << " ";
+ printQuotedQualifiedName(Os, DerivedClass);
+
+ Os << " but doesn't have virtual destructor";
+
+ PathDiagnosticLocation BSLoc(BaseSpec->getSourceRange().getBegin(),
+ BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(BaseSpec->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
+};
+} // namespace
+
+void ento::registerRefCntblBaseVirtualDtorChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<RefCntblBaseVirtualDtorChecker>();
+}
+
+bool ento::shouldRegisterRefCntblBaseVirtualDtorChecker(
+ const CheckerManager &mgr) {
+ return true;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
new file mode 100644
index 000000000000..940a1f349831
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
@@ -0,0 +1,195 @@
+//=======- UncountedCallArgsChecker.cpp --------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTUtils.h"
+#include "DiagOutputUtils.h"
+#include "PtrTypesSemantics.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/DenseSet.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class UncountedCallArgsChecker
+ : public Checker<check::ASTDecl<TranslationUnitDecl>> {
+ BugType Bug{this,
+ "Uncounted call argument for a raw pointer/reference parameter",
+ "WebKit coding guidelines"};
+ mutable BugReporter *BR;
+
+public:
+
+ void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
+ BugReporter &BRArg) const {
+ BR = &BRArg;
+
+ // The calls to checkAST* from AnalysisConsumer don't
+ // visit template instantiations or lambda classes. We
+ // want to visit those, so we make our own RecursiveASTVisitor.
+ struct LocalVisitor : public RecursiveASTVisitor<LocalVisitor> {
+ const UncountedCallArgsChecker *Checker;
+ explicit LocalVisitor(const UncountedCallArgsChecker *Checker)
+ : Checker(Checker) {
+ assert(Checker);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+
+ bool VisitCallExpr(const CallExpr *CE) {
+ Checker->visitCallExpr(CE);
+ return true;
+ }
+ };
+
+ LocalVisitor visitor(this);
+ visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ }
+
+ void visitCallExpr(const CallExpr *CE) const {
+ if (shouldSkipCall(CE))
+ return;
+
+ if (auto *F = CE->getDirectCallee()) {
+ // Skip the first argument for overloaded member operators (e. g. lambda
+ // or std::function call operator).
+ unsigned ArgIdx =
+ isa<CXXOperatorCallExpr>(CE) && dyn_cast_or_null<CXXMethodDecl>(F);
+
+ for (auto P = F->param_begin();
+ // FIXME: Also check variadic function parameters.
+ // FIXME: Also check default function arguments. Probably a different
+ // checker. In case there are default arguments the call can have
+ // fewer arguments than the callee has parameters.
+ P < F->param_end() && ArgIdx < CE->getNumArgs(); ++P, ++ArgIdx) {
+ // TODO: attributes.
+ // if ((*P)->hasAttr<SafeRefCntblRawPtrAttr>())
+ // continue;
+
+ const auto *ArgType = (*P)->getType().getTypePtrOrNull();
+ if (!ArgType)
+ continue; // FIXME? Should we bail?
+
+ // FIXME: more complex types (arrays, references to raw pointers, etc)
+ if (!isUncountedPtr(ArgType))
+ continue;
+
+ const auto *Arg = CE->getArg(ArgIdx);
+
+ std::pair<const clang::Expr *, bool> ArgOrigin =
+ tryToFindPtrOrigin(Arg, true);
+
+ // Temporary ref-counted object created as part of the call argument
+ // would outlive the call.
+ if (ArgOrigin.second)
+ continue;
+
+ if (isa<CXXNullPtrLiteralExpr>(ArgOrigin.first)) {
+ // foo(nullptr)
+ continue;
+ }
+ if (isa<IntegerLiteral>(ArgOrigin.first)) {
+ // FIXME: Check the value.
+ // foo(NULL)
+ continue;
+ }
+
+ if (isASafeCallArg(ArgOrigin.first))
+ continue;
+
+ reportBug(Arg, *P);
+ }
+ }
+ }
+
+ bool shouldSkipCall(const CallExpr *CE) const {
+ if (CE->getNumArgs() == 0)
+ return false;
+
+ // If an assignment is problematic we should warn about the sole existence
+ // of object on LHS.
+ if (auto *MemberOp = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ // Note: assignemnt to built-in type isn't derived from CallExpr.
+ if (MemberOp->isAssignmentOp())
+ return false;
+ }
+
+ const auto *Callee = CE->getDirectCallee();
+ if (!Callee)
+ return false;
+
+ auto overloadedOperatorType = Callee->getOverloadedOperator();
+ if (overloadedOperatorType == OO_EqualEqual ||
+ overloadedOperatorType == OO_ExclaimEqual ||
+ overloadedOperatorType == OO_LessEqual ||
+ overloadedOperatorType == OO_GreaterEqual ||
+ overloadedOperatorType == OO_Spaceship ||
+ overloadedOperatorType == OO_AmpAmp ||
+ overloadedOperatorType == OO_PipePipe)
+ return true;
+
+ if (isCtorOfRefCounted(Callee))
+ return true;
+
+ auto name = safeGetName(Callee);
+ if (name == "adoptRef" || name == "getPtr" || name == "WeakPtr" ||
+ name == "makeWeakPtr" || name == "downcast" || name == "bitwise_cast" ||
+ name == "is" || name == "equal" || name == "hash" ||
+ name == "isType"
+ // FIXME: Most/all of these should be implemented via attributes.
+ || name == "equalIgnoringASCIICase" ||
+ name == "equalIgnoringASCIICaseCommon" ||
+ name == "equalIgnoringNullity")
+ return true;
+
+ return false;
+ }
+
+ void reportBug(const Expr *CallArg, const ParmVarDecl *Param) const {
+ assert(CallArg);
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+
+ const std::string paramName = safeGetName(Param);
+ Os << "Call argument";
+ if (!paramName.empty()) {
+ Os << " for parameter ";
+ printQuotedQualifiedName(Os, Param);
+ }
+ Os << " is uncounted and unsafe.";
+
+ const SourceLocation SrcLocToReport =
+ isa<CXXDefaultArgExpr>(CallArg) ? Param->getDefaultArg()->getExprLoc()
+ : CallArg->getSourceRange().getBegin();
+
+ PathDiagnosticLocation BSLoc(SrcLocToReport, BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(CallArg->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
+};
+} // namespace
+
+void ento::registerUncountedCallArgsChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<UncountedCallArgsChecker>();
+}
+
+bool ento::shouldRegisterUncountedCallArgsChecker(const CheckerManager &) {
+ return true;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/Yaml.h b/clang/lib/StaticAnalyzer/Checkers/Yaml.h
index 968c50e33f6d..ec612dde3b8b 100755
--- a/clang/lib/StaticAnalyzer/Checkers/Yaml.h
+++ b/clang/lib/StaticAnalyzer/Checkers/Yaml.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LIB_STATICANALYZER_CHECKER_YAML_H
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLTraits.h"
namespace clang {
diff --git a/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp
new file mode 100644
index 000000000000..1c67bbd77ec8
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp
@@ -0,0 +1,66 @@
+//== PutenvWithAutoChecker.cpp --------------------------------- -*- C++ -*--=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines PutenvWithAutoChecker which finds calls of ``putenv``
+// function with automatic variable as the argument.
+// https://wiki.sei.cmu.edu/confluence/x/6NYxBQ
+//
+//===----------------------------------------------------------------------===//
+
+#include "../AllocationState.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PutenvWithAutoChecker : public Checker<check::PostCall> {
+private:
+ BugType BT{this, "'putenv' function should not be called with auto variables",
+ categories::SecurityError};
+ const CallDescription Putenv{"putenv", 1};
+
+public:
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+};
+} // namespace
+
+void PutenvWithAutoChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!Call.isCalled(Putenv))
+ return;
+
+ SVal ArgV = Call.getArgSVal(0);
+ const Expr *ArgExpr = Call.getArgExpr(0);
+ const MemSpaceRegion *MSR = ArgV.getAsRegion()->getMemorySpace();
+
+ if (!isa<StackSpaceRegion>(MSR))
+ return;
+
+ StringRef ErrorMsg = "The 'putenv' function should not be called with "
+ "arguments that have automatic storage";
+ ExplodedNode *N = C.generateErrorNode();
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT, ErrorMsg, N);
+
+ // Track the argument.
+ bugreporter::trackExpressionValue(Report->getErrorNode(), ArgExpr, *Report);
+
+ C.emitReport(std::move(Report));
+}
+
+void ento::registerPutenvWithAuto(CheckerManager &Mgr) {
+ Mgr.registerChecker<PutenvWithAutoChecker>();
+}
+
+bool ento::shouldRegisterPutenvWithAuto(const CheckerManager &) { return true; }
diff --git a/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index fdd03c75920d..ecfc7106560e 100644
--- a/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -13,7 +13,7 @@ using namespace ento;
void AnalysisManager::anchor() { }
-AnalysisManager::AnalysisManager(ASTContext &ASTCtx,
+AnalysisManager::AnalysisManager(ASTContext &ASTCtx, Preprocessor &PP,
const PathDiagnosticConsumers &PDC,
StoreManagerCreator storemgr,
ConstraintManagerCreator constraintmgr,
@@ -38,7 +38,7 @@ AnalysisManager::AnalysisManager(ASTContext &ASTCtx,
Options.ShouldElideConstructors,
/*addVirtualBaseBranches=*/true,
injector),
- Ctx(ASTCtx), LangOpts(ASTCtx.getLangOpts()),
+ Ctx(ASTCtx), PP(PP), LangOpts(ASTCtx.getLangOpts()),
PathConsumers(PDC), CreateStoreMgr(storemgr),
CreateConstraintMgr(constraintmgr), CheckerMgr(checkerMgr),
options(Options) {
diff --git a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index 7cd48bf44374..73f057f09550 100644
--- a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -239,7 +239,7 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
if (Amt >= V1.getBitWidth())
return nullptr;
- if (!Ctx.getLangOpts().CPlusPlus2a) {
+ if (!Ctx.getLangOpts().CPlusPlus20) {
if (V1.isSigned() && V1.isNegative())
return nullptr;
diff --git a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index 1864bcef9b87..72be4e81c83d 100644
--- a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -34,10 +34,12 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/CheckerRegistryData.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/ArrayRef.h"
@@ -51,6 +53,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
@@ -2105,6 +2108,53 @@ void BuiltinBug::anchor() {}
// Methods for BugReport and subclasses.
//===----------------------------------------------------------------------===//
+LLVM_ATTRIBUTE_USED static bool
+isDependency(const CheckerRegistryData &Registry, StringRef CheckerName) {
+ for (const std::pair<StringRef, StringRef> &Pair : Registry.Dependencies) {
+ if (Pair.second == CheckerName)
+ return true;
+ }
+ return false;
+}
+
+LLVM_ATTRIBUTE_USED static bool isHidden(const CheckerRegistryData &Registry,
+ StringRef CheckerName) {
+ for (const CheckerInfo &Checker : Registry.Checkers) {
+ if (Checker.FullName == CheckerName)
+ return Checker.IsHidden;
+ }
+ llvm_unreachable(
+ "Checker name not found in CheckerRegistry -- did you retrieve it "
+ "correctly from CheckerManager::getCurrentCheckerName?");
+}
+
+PathSensitiveBugReport::PathSensitiveBugReport(
+ const BugType &bt, StringRef shortDesc, StringRef desc,
+ const ExplodedNode *errorNode, PathDiagnosticLocation LocationToUnique,
+ const Decl *DeclToUnique)
+ : BugReport(Kind::PathSensitive, bt, shortDesc, desc), ErrorNode(errorNode),
+ ErrorNodeRange(getStmt() ? getStmt()->getSourceRange() : SourceRange()),
+ UniqueingLocation(LocationToUnique), UniqueingDecl(DeclToUnique) {
+ assert(!isDependency(ErrorNode->getState()
+ ->getAnalysisManager()
+ .getCheckerManager()
+ ->getCheckerRegistryData(),
+ bt.getCheckerName()) &&
+ "Some checkers depend on this one! We don't allow dependency "
+ "checkers to emit warnings, because checkers should depend on "
+ "*modeling*, not *diagnostics*.");
+
+ assert(
+ (bt.getCheckerName().startswith("debug") ||
+ !isHidden(ErrorNode->getState()
+ ->getAnalysisManager()
+ .getCheckerManager()
+ ->getCheckerRegistryData(),
+ bt.getCheckerName())) &&
+ "Hidden checkers musn't emit diagnostics as they are by definition "
+ "non-user facing!");
+}
+
void PathSensitiveBugReport::addVisitor(
std::unique_ptr<BugReporterVisitor> visitor) {
if (!visitor)
@@ -2193,12 +2243,12 @@ static void insertToInterestingnessMap(
return;
case bugreporter::TrackingKind::Condition:
return;
- }
+ }
- llvm_unreachable(
- "BugReport::markInteresting currently can only handle 2 different "
- "tracking kinds! Please define what tracking kind should this entitiy"
- "have, if it was already marked as interesting with a different kind!");
+ llvm_unreachable(
+ "BugReport::markInteresting currently can only handle 2 different "
+ "tracking kinds! Please define what tracking kind should this entitiy"
+ "have, if it was already marked as interesting with a different kind!");
}
void PathSensitiveBugReport::markInteresting(SymbolRef sym,
@@ -2389,6 +2439,7 @@ ProgramStateManager &PathSensitiveBugReporter::getStateManager() const {
return Eng.getStateManager();
}
+BugReporter::BugReporter(BugReporterData &d) : D(d) {}
BugReporter::~BugReporter() {
// Make sure reports are flushed.
assert(StrBugTypes.empty() &&
@@ -2409,7 +2460,7 @@ void BugReporter::FlushReports() {
// EmitBasicReport.
// FIXME: There are leaks from checkers that assume that the BugTypes they
// create will be destroyed by the BugReporter.
- llvm::DeleteContainerSeconds(StrBugTypes);
+ StrBugTypes.clear();
}
//===----------------------------------------------------------------------===//
@@ -2781,7 +2832,7 @@ Optional<PathDiagnosticBuilder> PathDiagnosticBuilder::findValidReport(
R->clearVisitors();
R->addVisitor(std::make_unique<FalsePositiveRefutationBRVisitor>());
- // We don't overrite the notes inserted by other visitors because the
+ // We don't overwrite the notes inserted by other visitors because the
// refutation manager does not add any new note to the path
generateVisitorsDiagnostics(R, BugPath->ErrorNode, BRC);
}
@@ -3262,8 +3313,8 @@ BugType *BugReporter::getBugTypeForName(CheckerNameRef CheckName,
SmallString<136> fullDesc;
llvm::raw_svector_ostream(fullDesc) << CheckName.getName() << ":" << name
<< ":" << category;
- BugType *&BT = StrBugTypes[fullDesc];
+ std::unique_ptr<BugType> &BT = StrBugTypes[fullDesc];
if (!BT)
- BT = new BugType(CheckName, name, category);
- return BT;
+ BT = std::make_unique<BugType>(CheckName, name, category);
+ return BT.get();
}
diff --git a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 0525b5c41e34..ef4d38ff498f 100644
--- a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -45,7 +45,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
@@ -358,7 +357,7 @@ class NoStoreFuncVisitor final : public BugReporterVisitor {
public:
NoStoreFuncVisitor(const SubRegion *R, bugreporter::TrackingKind TKind)
- : RegionOfInterest(R), MmrMgr(*R->getMemRegionManager()),
+ : RegionOfInterest(R), MmrMgr(R->getMemRegionManager()),
SM(MmrMgr.getContext().getSourceManager()),
PP(MmrMgr.getContext().getPrintingPolicy()), TKind(TKind) {}
@@ -813,7 +812,7 @@ public:
const SourceManager &SMgr = BRC.getSourceManager();
if (auto Loc = matchAssignment(N)) {
if (isFunctionMacroExpansion(*Loc, SMgr)) {
- std::string MacroName = getMacroName(*Loc, BRC);
+ std::string MacroName = std::string(getMacroName(*Loc, BRC));
SourceLocation BugLoc = BugPoint->getStmt()->getBeginLoc();
if (!BugLoc.isMacroID() || getMacroName(BugLoc, BRC) != MacroName)
BR.markInvalid(getTag(), MacroName.c_str());
@@ -1735,10 +1734,9 @@ constructDebugPieceForTrackedCondition(const Expr *Cond,
!BRC.getAnalyzerOptions().ShouldTrackConditionsDebug)
return nullptr;
- std::string ConditionText = Lexer::getSourceText(
+ std::string ConditionText = std::string(Lexer::getSourceText(
CharSourceRange::getTokenRange(Cond->getSourceRange()),
- BRC.getSourceManager(),
- BRC.getASTContext().getLangOpts());
+ BRC.getSourceManager(), BRC.getASTContext().getLangOpts()));
return std::make_shared<PathDiagnosticEventPiece>(
PathDiagnosticLocation::createBegin(
@@ -2494,7 +2492,7 @@ PathDiagnosticPieceRef ConditionBRVisitor::VisitTrueTest(
Out << WillBeUsedForACondition;
// Convert 'field ...' to 'Field ...' if it is a MemberExpr.
- std::string Message = Out.str();
+ std::string Message = std::string(Out.str());
Message[0] = toupper(Message[0]);
// If we know the value create a pop-up note to the value part of 'BExpr'.
@@ -2821,7 +2819,7 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
BugReporterContext &BRC, const ExplodedNode *EndPathNode,
PathSensitiveBugReport &BR) {
// Collect new constraints
- VisitNode(EndPathNode, BRC, BR);
+ addConstraints(EndPathNode, /*OverwriteConstraintsOnExistingSyms=*/true);
// Create a refutation manager
llvm::SMTSolverRef RefutationSolver = llvm::CreateZ3Solver();
@@ -2832,30 +2830,30 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
const SymbolRef Sym = I.first;
auto RangeIt = I.second.begin();
- llvm::SMTExprRef Constraints = SMTConv::getRangeExpr(
+ llvm::SMTExprRef SMTConstraints = SMTConv::getRangeExpr(
RefutationSolver, Ctx, Sym, RangeIt->From(), RangeIt->To(),
/*InRange=*/true);
while ((++RangeIt) != I.second.end()) {
- Constraints = RefutationSolver->mkOr(
- Constraints, SMTConv::getRangeExpr(RefutationSolver, Ctx, Sym,
- RangeIt->From(), RangeIt->To(),
- /*InRange=*/true));
+ SMTConstraints = RefutationSolver->mkOr(
+ SMTConstraints, SMTConv::getRangeExpr(RefutationSolver, Ctx, Sym,
+ RangeIt->From(), RangeIt->To(),
+ /*InRange=*/true));
}
- RefutationSolver->addConstraint(Constraints);
+ RefutationSolver->addConstraint(SMTConstraints);
}
// And check for satisfiability
- Optional<bool> isSat = RefutationSolver->check();
- if (!isSat.hasValue())
+ Optional<bool> IsSAT = RefutationSolver->check();
+ if (!IsSAT.hasValue())
return;
- if (!isSat.getValue())
+ if (!IsSAT.getValue())
BR.markInvalid("Infeasible constraints", EndPathNode->getLocationContext());
}
-PathDiagnosticPieceRef FalsePositiveRefutationBRVisitor::VisitNode(
- const ExplodedNode *N, BugReporterContext &, PathSensitiveBugReport &) {
+void FalsePositiveRefutationBRVisitor::addConstraints(
+ const ExplodedNode *N, bool OverwriteConstraintsOnExistingSyms) {
// Collect new constraints
const ConstraintRangeTy &NewCs = N->getState()->get<ConstraintRange>();
ConstraintRangeTy::Factory &CF =
@@ -2865,10 +2863,19 @@ PathDiagnosticPieceRef FalsePositiveRefutationBRVisitor::VisitNode(
for (auto const &C : NewCs) {
const SymbolRef &Sym = C.first;
if (!Constraints.contains(Sym)) {
+ // This symbol is new, just add the constraint.
+ Constraints = CF.add(Constraints, Sym, C.second);
+ } else if (OverwriteConstraintsOnExistingSyms) {
+ // Overwrite the associated constraint of the Symbol.
+ Constraints = CF.remove(Constraints, Sym);
Constraints = CF.add(Constraints, Sym, C.second);
}
}
+}
+PathDiagnosticPieceRef FalsePositiveRefutationBRVisitor::VisitNode(
+ const ExplodedNode *N, BugReporterContext &, PathSensitiveBugReport &) {
+ addConstraints(N, /*OverwriteConstraintsOnExistingSyms=*/false);
return nullptr;
}
diff --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 168d6fe6ec48..78d13ddfb773 100644
--- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -172,23 +172,9 @@ AnalysisDeclContext *CallEvent::getCalleeAnalysisDeclContext() const {
if (!D)
return nullptr;
- // TODO: For now we skip functions without definitions, even if we have
- // our own getDecl(), because it's hard to find out which re-declaration
- // is going to be used, and usually clients don't really care about this
- // situation because there's a loss of precision anyway because we cannot
- // inline the call.
- RuntimeDefinition RD = getRuntimeDefinition();
- if (!RD.getDecl())
- return nullptr;
-
AnalysisDeclContext *ADC =
LCtx->getAnalysisDeclContext()->getManager()->getContext(D);
- // TODO: For now we skip virtual functions, because this also rises
- // the problem of which decl to use, but now it's across different classes.
- if (RD.mayHaveOtherDefinitions() || RD.getDecl() != ADC->getDecl())
- return nullptr;
-
return ADC;
}
@@ -222,39 +208,17 @@ CallEvent::getCalleeStackFrame(unsigned BlockCount) const {
return ADC->getManager()->getStackFrame(ADC, LCtx, E, B, BlockCount, Idx);
}
-const VarRegion *CallEvent::getParameterLocation(unsigned Index,
- unsigned BlockCount) const {
+const ParamVarRegion
+*CallEvent::getParameterLocation(unsigned Index, unsigned BlockCount) const {
const StackFrameContext *SFC = getCalleeStackFrame(BlockCount);
// We cannot construct a VarRegion without a stack frame.
if (!SFC)
return nullptr;
- // Retrieve parameters of the definition, which are different from
- // CallEvent's parameters() because getDecl() isn't necessarily
- // the definition. SFC contains the definition that would be used
- // during analysis.
- const Decl *D = SFC->getDecl();
-
- // TODO: Refactor into a virtual method of CallEvent, like parameters().
- const ParmVarDecl *PVD = nullptr;
- if (const auto *FD = dyn_cast<FunctionDecl>(D))
- PVD = FD->parameters()[Index];
- else if (const auto *BD = dyn_cast<BlockDecl>(D))
- PVD = BD->parameters()[Index];
- else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
- PVD = MD->parameters()[Index];
- else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
- PVD = CD->parameters()[Index];
- assert(PVD && "Unexpected Decl kind!");
-
- const VarRegion *VR =
- State->getStateManager().getRegionManager().getVarRegion(PVD, SFC);
-
- // This sanity check would fail if our parameter declaration doesn't
- // correspond to the stack frame's function declaration.
- assert(VR->getStackFrame() == SFC);
-
- return VR;
+ const ParamVarRegion *PVR =
+ State->getStateManager().getRegionManager().getParamVarRegion(
+ getOriginExpr(), Index, SFC);
+ return PVR;
}
/// Returns true if a type is a pointer-to-const or reference-to-const
@@ -325,8 +289,9 @@ ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
if (getKind() != CE_CXXAllocator)
if (isArgumentConstructedDirectly(Idx))
if (auto AdjIdx = getAdjustedParameterIndex(Idx))
- if (const VarRegion *VR = getParameterLocation(*AdjIdx, BlockCount))
- ValuesToInvalidate.push_back(loc::MemRegionVal(VR));
+ if (const TypedValueRegion *TVR =
+ getParameterLocation(*AdjIdx, BlockCount))
+ ValuesToInvalidate.push_back(loc::MemRegionVal(TVR));
}
// Invalidate designated regions using the batch invalidation API.
@@ -450,8 +415,7 @@ void CallEvent::dump(raw_ostream &Out) const {
return;
}
- // FIXME: a string representation of the kind would be nice.
- Out << "Unknown call (type " << getKind() << ")";
+ Out << "Unknown call (type " << getKindAsString() << ")";
}
bool CallEvent::isCallStmt(const Stmt *S) {
@@ -515,8 +479,7 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
unsigned Idx = 0;
ArrayRef<ParmVarDecl*>::iterator I = parameters.begin(), E = parameters.end();
for (; I != E && Idx < NumArgs; ++I, ++Idx) {
- const ParmVarDecl *ParamDecl = *I;
- assert(ParamDecl && "Formal parameter has no decl?");
+ assert(*I && "Formal parameter has no decl?");
// TODO: Support allocator calls.
if (Call.getKind() != CE_CXXAllocator)
@@ -528,7 +491,8 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
// which makes getArgSVal() fail and return UnknownVal.
SVal ArgVal = Call.getArgSVal(Idx);
if (!ArgVal.isUnknown()) {
- Loc ParamLoc = SVB.makeLoc(MRMgr.getVarRegion(ParamDecl, CalleeCtx));
+ Loc ParamLoc = SVB.makeLoc(
+ MRMgr.getParamVarRegion(Call.getOriginExpr(), Idx, CalleeCtx));
Bindings.push_back(std::make_pair(ParamLoc, ArgVal));
}
}
@@ -536,6 +500,37 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
// FIXME: Variadic arguments are not handled at all right now.
}
+const ConstructionContext *CallEvent::getConstructionContext() const {
+ const StackFrameContext *StackFrame = getCalleeStackFrame(0);
+ if (!StackFrame)
+ return nullptr;
+
+ const CFGElement Element = StackFrame->getCallSiteCFGElement();
+ if (const auto Ctor = Element.getAs<CFGConstructor>()) {
+ return Ctor->getConstructionContext();
+ }
+
+ if (const auto RecCall = Element.getAs<CFGCXXRecordTypedCall>()) {
+ return RecCall->getConstructionContext();
+ }
+
+ return nullptr;
+}
+
+Optional<SVal>
+CallEvent::getReturnValueUnderConstruction() const {
+ const auto *CC = getConstructionContext();
+ if (!CC)
+ return None;
+
+ EvalCallOptions CallOpts;
+ ExprEngine &Engine = getState()->getStateManager().getOwningEngine();
+ SVal RetVal =
+ Engine.computeObjectUnderConstruction(getOriginExpr(), getState(),
+ getLocationContext(), CC, CallOpts);
+ return RetVal;
+}
+
ArrayRef<ParmVarDecl*> AnyFunctionCall::parameters() const {
const FunctionDecl *D = getDecl();
if (!D)
@@ -565,7 +560,7 @@ RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
return RuntimeDefinition(Decl);
}
- SubEngine &Engine = getState()->getStateManager().getOwningEngine();
+ ExprEngine &Engine = getState()->getStateManager().getOwningEngine();
AnalyzerOptions &Opts = Engine.getAnalysisManager().options;
// Try to get CTU definition only if CTUDir is provided.
@@ -889,24 +884,22 @@ void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
Params);
}
-SVal CXXConstructorCall::getCXXThisVal() const {
+SVal AnyCXXConstructorCall::getCXXThisVal() const {
if (Data)
return loc::MemRegionVal(static_cast<const MemRegion *>(Data));
return UnknownVal();
}
-void CXXConstructorCall::getExtraInvalidatedValues(ValueList &Values,
+void AnyCXXConstructorCall::getExtraInvalidatedValues(ValueList &Values,
RegionAndSymbolInvalidationTraits *ETraits) const {
- if (Data) {
- loc::MemRegionVal MV(static_cast<const MemRegion *>(Data));
- if (SymbolRef Sym = MV.getAsSymbol(true))
- ETraits->setTrait(Sym,
- RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
- Values.push_back(MV);
- }
+ SVal V = getCXXThisVal();
+ if (SymbolRef Sym = V.getAsSymbol(true))
+ ETraits->setTrait(Sym,
+ RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
+ Values.push_back(V);
}
-void CXXConstructorCall::getInitialStackFrameContents(
+void AnyCXXConstructorCall::getInitialStackFrameContents(
const StackFrameContext *CalleeCtx,
BindingsTy &Bindings) const {
AnyFunctionCall::getInitialStackFrameContents(CalleeCtx, Bindings);
@@ -920,6 +913,14 @@ void CXXConstructorCall::getInitialStackFrameContents(
}
}
+const StackFrameContext *
+CXXInheritedConstructorCall::getInheritingStackFrame() const {
+ const StackFrameContext *SFC = getLocationContext()->getStackFrame();
+ while (isa<CXXInheritedCtorInitExpr>(SFC->getCallSite()))
+ SFC = SFC->getParent()->getStackFrame();
+ return SFC;
+}
+
SVal CXXDestructorCall::getCXXThisVal() const {
if (Data)
return loc::MemRegionVal(DtorDataTy::getFromOpaqueValue(Data).getPointer());
@@ -967,14 +968,6 @@ void ObjCMethodCall::getExtraInvalidatedValues(
Values.push_back(getReceiverSVal());
}
-SVal ObjCMethodCall::getSelfSVal() const {
- const LocationContext *LCtx = getLocationContext();
- const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl();
- if (!SelfDecl)
- return SVal();
- return getState()->getSVal(getState()->getRegion(SelfDecl, LCtx));
-}
-
SVal ObjCMethodCall::getReceiverSVal() const {
// FIXME: Is this the best way to handle class receivers?
if (!isInstanceMessage())
@@ -986,7 +979,7 @@ SVal ObjCMethodCall::getReceiverSVal() const {
// An instance message with no expression means we are sending to super.
// In this case the object reference is the same as 'self'.
assert(getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance);
- SVal SelfVal = getSelfSVal();
+ SVal SelfVal = getState()->getSelfSVal(getLocationContext());
assert(SelfVal.isValid() && "Calling super but not in ObjC method");
return SelfVal;
}
@@ -1000,8 +993,9 @@ bool ObjCMethodCall::isReceiverSelfOrSuper() const {
return false;
SVal RecVal = getSVal(getOriginExpr()->getInstanceReceiver());
+ SVal SelfVal = getState()->getSelfSVal(getLocationContext());
- return (RecVal == getSelfSVal());
+ return (RecVal == SelfVal);
}
SourceRange ObjCMethodCall::getSourceRange() const {
@@ -1168,23 +1162,77 @@ static const ObjCMethodDecl *findDefiningRedecl(const ObjCMethodDecl *MD) {
return MD;
}
-static bool isCallToSelfClass(const ObjCMessageExpr *ME) {
- const Expr* InstRec = ME->getInstanceReceiver();
- if (!InstRec)
- return false;
- const auto *InstRecIg = dyn_cast<DeclRefExpr>(InstRec->IgnoreParenImpCasts());
+struct PrivateMethodKey {
+ const ObjCInterfaceDecl *Interface;
+ Selector LookupSelector;
+ bool IsClassMethod;
+};
- // Check that receiver is called 'self'.
- if (!InstRecIg || !InstRecIg->getFoundDecl() ||
- !InstRecIg->getFoundDecl()->getName().equals("self"))
- return false;
+namespace llvm {
+template <> struct DenseMapInfo<PrivateMethodKey> {
+ using InterfaceInfo = DenseMapInfo<const ObjCInterfaceDecl *>;
+ using SelectorInfo = DenseMapInfo<Selector>;
- // Check that the method name is 'class'.
- if (ME->getSelector().getNumArgs() != 0 ||
- !ME->getSelector().getNameForSlot(0).equals("class"))
- return false;
+ static inline PrivateMethodKey getEmptyKey() {
+ return {InterfaceInfo::getEmptyKey(), SelectorInfo::getEmptyKey(), false};
+ }
- return true;
+ static inline PrivateMethodKey getTombstoneKey() {
+ return {InterfaceInfo::getTombstoneKey(), SelectorInfo::getTombstoneKey(),
+ true};
+ }
+
+ static unsigned getHashValue(const PrivateMethodKey &Key) {
+ return llvm::hash_combine(
+ llvm::hash_code(InterfaceInfo::getHashValue(Key.Interface)),
+ llvm::hash_code(SelectorInfo::getHashValue(Key.LookupSelector)),
+ Key.IsClassMethod);
+ }
+
+ static bool isEqual(const PrivateMethodKey &LHS,
+ const PrivateMethodKey &RHS) {
+ return InterfaceInfo::isEqual(LHS.Interface, RHS.Interface) &&
+ SelectorInfo::isEqual(LHS.LookupSelector, RHS.LookupSelector) &&
+ LHS.IsClassMethod == RHS.IsClassMethod;
+ }
+};
+} // end namespace llvm
+
+static const ObjCMethodDecl *
+lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
+ Selector LookupSelector, bool InstanceMethod) {
+ // Repeatedly calling lookupPrivateMethod() is expensive, especially
+ // when in many cases it returns null. We cache the results so
+ // that repeated queries on the same ObjCIntefaceDecl and Selector
+ // don't incur the same cost. On some test cases, we can see the
+ // same query being issued thousands of times.
+ //
+ // NOTE: This cache is essentially a "global" variable, but it
+ // only gets lazily created when we get here. The value of the
+ // cache probably comes from it being global across ExprEngines,
+ // where the same queries may get issued. If we are worried about
+ // concurrency, or possibly loading/unloading ASTs, etc., we may
+ // need to revisit this someday. In terms of memory, this table
+ // stays around until clang quits, which also may be bad if we
+ // need to release memory.
+ using PrivateMethodCache =
+ llvm::DenseMap<PrivateMethodKey, Optional<const ObjCMethodDecl *>>;
+
+ static PrivateMethodCache PMC;
+ Optional<const ObjCMethodDecl *> &Val =
+ PMC[{Interface, LookupSelector, InstanceMethod}];
+
+ // Query lookupPrivateMethod() if the cache does not hit.
+ if (!Val.hasValue()) {
+ Val = Interface->lookupPrivateMethod(LookupSelector, InstanceMethod);
+
+ if (!*Val) {
+ // Query 'lookupMethod' as a backup.
+ Val = Interface->lookupMethod(LookupSelector, InstanceMethod);
+ }
+ }
+
+ return Val.getValue();
}
RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
@@ -1194,8 +1242,9 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
if (E->isInstanceMessage()) {
// Find the receiver type.
- const ObjCObjectPointerType *ReceiverT = nullptr;
+ const ObjCObjectType *ReceiverT = nullptr;
bool CanBeSubClassed = false;
+ bool LookingForInstanceMethod = true;
QualType SupersType = E->getSuperType();
const MemRegion *Receiver = nullptr;
@@ -1203,7 +1252,7 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
// The receiver is guaranteed to be 'super' in this case.
// Super always means the type of immediate predecessor to the method
// where the call occurs.
- ReceiverT = cast<ObjCObjectPointerType>(SupersType);
+ ReceiverT = cast<ObjCObjectPointerType>(SupersType)->getObjectType();
} else {
Receiver = getReceiverSVal().getAsRegion();
if (!Receiver)
@@ -1218,100 +1267,59 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
QualType DynType = DTI.getType();
CanBeSubClassed = DTI.canBeASubClass();
- ReceiverT = dyn_cast<ObjCObjectPointerType>(DynType.getCanonicalType());
- if (ReceiverT && CanBeSubClassed)
- if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl())
- if (!canBeOverridenInSubclass(IDecl, Sel))
- CanBeSubClassed = false;
- }
+ const auto *ReceiverDynT =
+ dyn_cast<ObjCObjectPointerType>(DynType.getCanonicalType());
+
+ if (ReceiverDynT) {
+ ReceiverT = ReceiverDynT->getObjectType();
- // Handle special cases of '[self classMethod]' and
- // '[[self class] classMethod]', which are treated by the compiler as
- // instance (not class) messages. We will statically dispatch to those.
- if (auto *PT = dyn_cast_or_null<ObjCObjectPointerType>(ReceiverT)) {
- // For [self classMethod], return the compiler visible declaration.
- if (PT->getObjectType()->isObjCClass() &&
- Receiver == getSelfSVal().getAsRegion())
- return RuntimeDefinition(findDefiningRedecl(E->getMethodDecl()));
-
- // Similarly, handle [[self class] classMethod].
- // TODO: We are currently doing a syntactic match for this pattern with is
- // limiting as the test cases in Analysis/inlining/InlineObjCClassMethod.m
- // shows. A better way would be to associate the meta type with the symbol
- // using the dynamic type info tracking and use it here. We can add a new
- // SVal for ObjC 'Class' values that know what interface declaration they
- // come from. Then 'self' in a class method would be filled in with
- // something meaningful in ObjCMethodCall::getReceiverSVal() and we could
- // do proper dynamic dispatch for class methods just like we do for
- // instance methods now.
- if (E->getInstanceReceiver())
- if (const auto *M = dyn_cast<ObjCMessageExpr>(E->getInstanceReceiver()))
- if (isCallToSelfClass(M))
+ // It can be actually class methods called with Class object as a
+ // receiver. This type of messages is treated by the compiler as
+ // instance (not class).
+ if (ReceiverT->isObjCClass()) {
+
+ SVal SelfVal = getState()->getSelfSVal(getLocationContext());
+ // For [self classMethod], return compiler visible declaration.
+ if (Receiver == SelfVal.getAsRegion()) {
return RuntimeDefinition(findDefiningRedecl(E->getMethodDecl()));
+ }
+
+ // Otherwise, let's check if we know something about the type
+ // inside of this class object.
+ if (SymbolRef ReceiverSym = getReceiverSVal().getAsSymbol()) {
+ DynamicTypeInfo DTI =
+ getClassObjectDynamicTypeInfo(getState(), ReceiverSym);
+ if (DTI.isValid()) {
+ // Let's use this type for lookup.
+ ReceiverT =
+ cast<ObjCObjectType>(DTI.getType().getCanonicalType());
+
+ CanBeSubClassed = DTI.canBeASubClass();
+ // And it should be a class method instead.
+ LookingForInstanceMethod = false;
+ }
+ }
+ }
+
+ if (CanBeSubClassed)
+ if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterface())
+ // Even if `DynamicTypeInfo` told us that it can be
+ // not necessarily this type, but its descendants, we still want
+ // to check again if this selector can be actually overridden.
+ CanBeSubClassed = canBeOverridenInSubclass(IDecl, Sel);
+ }
}
// Lookup the instance method implementation.
if (ReceiverT)
- if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl()) {
- // Repeatedly calling lookupPrivateMethod() is expensive, especially
- // when in many cases it returns null. We cache the results so
- // that repeated queries on the same ObjCIntefaceDecl and Selector
- // don't incur the same cost. On some test cases, we can see the
- // same query being issued thousands of times.
- //
- // NOTE: This cache is essentially a "global" variable, but it
- // only gets lazily created when we get here. The value of the
- // cache probably comes from it being global across ExprEngines,
- // where the same queries may get issued. If we are worried about
- // concurrency, or possibly loading/unloading ASTs, etc., we may
- // need to revisit this someday. In terms of memory, this table
- // stays around until clang quits, which also may be bad if we
- // need to release memory.
- using PrivateMethodKey = std::pair<const ObjCInterfaceDecl *, Selector>;
- using PrivateMethodCache =
- llvm::DenseMap<PrivateMethodKey, Optional<const ObjCMethodDecl *>>;
-
- static PrivateMethodCache PMC;
- Optional<const ObjCMethodDecl *> &Val = PMC[std::make_pair(IDecl, Sel)];
-
- // Query lookupPrivateMethod() if the cache does not hit.
- if (!Val.hasValue()) {
- Val = IDecl->lookupPrivateMethod(Sel);
-
- // If the method is a property accessor, we should try to "inline" it
- // even if we don't actually have an implementation.
- if (!*Val)
- if (const ObjCMethodDecl *CompileTimeMD = E->getMethodDecl())
- if (CompileTimeMD->isPropertyAccessor()) {
- if (!CompileTimeMD->getSelfDecl() &&
- isa<ObjCCategoryDecl>(CompileTimeMD->getDeclContext())) {
- // If the method is an accessor in a category, and it doesn't
- // have a self declaration, first
- // try to find the method in a class extension. This
- // works around a bug in Sema where multiple accessors
- // are synthesized for properties in class
- // extensions that are redeclared in a category and the
- // the implicit parameters are not filled in for
- // the method on the category.
- // This ensures we find the accessor in the extension, which
- // has the implicit parameters filled in.
- auto *ID = CompileTimeMD->getClassInterface();
- for (auto *CatDecl : ID->visible_extensions()) {
- Val = CatDecl->getMethod(Sel,
- CompileTimeMD->isInstanceMethod());
- if (*Val)
- break;
- }
- }
- if (!*Val)
- Val = IDecl->lookupInstanceMethod(Sel);
- }
- }
+ if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterface()) {
+ const ObjCMethodDecl *MD =
+ lookupRuntimeDefinition(IDecl, Sel, LookingForInstanceMethod);
- const ObjCMethodDecl *MD = Val.getValue();
if (MD && !MD->hasBody())
MD = MD->getCanonicalDecl();
+
if (CanBeSubClassed)
return RuntimeDefinition(MD, Receiver);
else
@@ -1392,17 +1400,20 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
if (CallEventRef<> Out = getCall(CallSite, State, CallerCtx))
return Out;
- // All other cases are handled by getCall.
- assert(isa<CXXConstructExpr>(CallSite) &&
- "This is not an inlineable statement");
-
SValBuilder &SVB = State->getStateManager().getSValBuilder();
const auto *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx);
SVal ThisVal = State->getSVal(ThisPtr);
- return getCXXConstructorCall(cast<CXXConstructExpr>(CallSite),
- ThisVal.getAsRegion(), State, CallerCtx);
+ if (const auto *CE = dyn_cast<CXXConstructExpr>(CallSite))
+ return getCXXConstructorCall(CE, ThisVal.getAsRegion(), State, CallerCtx);
+ else if (const auto *CIE = dyn_cast<CXXInheritedCtorInitExpr>(CallSite))
+ return getCXXInheritedConstructorCall(CIE, ThisVal.getAsRegion(), State,
+ CallerCtx);
+ else {
+ // All other cases are handled by getCall.
+ llvm_unreachable("This is not an inlineable statement");
+ }
}
// Fall back to the CFG. The only thing we haven't handled yet is
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index 11693132de68..cae728815b41 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -13,6 +13,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
+#include "clang/Lex/Preprocessor.h"
namespace clang {
@@ -109,6 +110,43 @@ Nullability getNullabilityAnnotation(QualType Type) {
return Nullability::Unspecified;
}
+llvm::Optional<int> tryExpandAsInteger(StringRef Macro,
+ const Preprocessor &PP) {
+ const auto *MacroII = PP.getIdentifierInfo(Macro);
+ if (!MacroII)
+ return llvm::None;
+ const MacroInfo *MI = PP.getMacroInfo(MacroII);
+ if (!MI)
+ return llvm::None;
+
+ // Filter out parens.
+ std::vector<Token> FilteredTokens;
+ FilteredTokens.reserve(MI->tokens().size());
+ for (auto &T : MI->tokens())
+ if (!T.isOneOf(tok::l_paren, tok::r_paren))
+ FilteredTokens.push_back(T);
+
+ // Parse an integer at the end of the macro definition.
+ const Token &T = FilteredTokens.back();
+ // FIXME: EOF macro token coming from a PCH file on macOS while marked as
+ // literal, doesn't contain any literal data
+ if (!T.isLiteral() || !T.getLiteralData())
+ return llvm::None;
+ StringRef ValueStr = StringRef(T.getLiteralData(), T.getLength());
+ llvm::APInt IntValue;
+ constexpr unsigned AutoSenseRadix = 0;
+ if (ValueStr.getAsInteger(AutoSenseRadix, IntValue))
+ return llvm::None;
+
+ // Parse an optional minus sign.
+ size_t Size = FilteredTokens.size();
+ if (Size >= 2) {
+ if (FilteredTokens[Size - 2].is(tok::minus))
+ IntValue = -IntValue;
+ }
+
+ return IntValue.getSExtValue();
+}
-} // end namespace ento
-} // end namespace clang
+} // namespace ento
+} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index a9361837cf68..86cecf6524f0 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -61,12 +61,12 @@ void CheckerManager::finishedCheckerRegistration() {
}
void CheckerManager::reportInvalidCheckerOptionValue(
- const CheckerBase *C, StringRef OptionName, StringRef ExpectedValueDesc) {
+ const CheckerBase *C, StringRef OptionName,
+ StringRef ExpectedValueDesc) const {
- Context.getDiagnostics()
- .Report(diag::err_analyzer_checker_option_invalid_input)
- << (llvm::Twine() + C->getTagDescription() + ":" + OptionName).str()
- << ExpectedValueDesc;
+ getDiagnostics().Report(diag::err_analyzer_checker_option_invalid_input)
+ << (llvm::Twine() + C->getTagDescription() + ":" + OptionName).str()
+ << ExpectedValueDesc;
}
//===----------------------------------------------------------------------===//
@@ -243,13 +243,13 @@ void CheckerManager::runCheckersForObjCMessage(ObjCMessageVisitKind visitKind,
const ObjCMethodCall &msg,
ExprEngine &Eng,
bool WasInlined) {
- auto &checkers = getObjCMessageCheckers(visitKind);
+ const auto &checkers = getObjCMessageCheckers(visitKind);
CheckObjCMessageContext C(visitKind, checkers, msg, Eng, WasInlined);
expandGraphWithCheckers(C, Dst, Src);
}
const std::vector<CheckerManager::CheckObjCMessageFunc> &
-CheckerManager::getObjCMessageCheckers(ObjCMessageVisitKind Kind) {
+CheckerManager::getObjCMessageCheckers(ObjCMessageVisitKind Kind) const {
switch (Kind) {
case ObjCMessageVisitKind::Pre:
return PreObjCMessageCheckers;
@@ -507,35 +507,38 @@ namespace {
using CheckersTy = std::vector<CheckerManager::CheckNewAllocatorFunc>;
const CheckersTy &Checkers;
- const CXXNewExpr *NE;
- SVal Target;
+ const CXXAllocatorCall &Call;
bool WasInlined;
ExprEngine &Eng;
- CheckNewAllocatorContext(const CheckersTy &Checkers, const CXXNewExpr *NE,
- SVal Target, bool WasInlined, ExprEngine &Eng)
- : Checkers(Checkers), NE(NE), Target(Target), WasInlined(WasInlined),
- Eng(Eng) {}
+ CheckNewAllocatorContext(const CheckersTy &Checkers,
+ const CXXAllocatorCall &Call, bool WasInlined,
+ ExprEngine &Eng)
+ : Checkers(Checkers), Call(Call), WasInlined(WasInlined), Eng(Eng) {}
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
void runChecker(CheckerManager::CheckNewAllocatorFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
- ProgramPoint L = PostAllocatorCall(NE, Pred->getLocationContext());
+ ProgramPoint L =
+ PostAllocatorCall(Call.getOriginExpr(), Pred->getLocationContext());
CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
- checkFn(NE, Target, C);
+ checkFn(cast<CXXAllocatorCall>(*Call.cloneWithState(Pred->getState())),
+ C);
}
};
} // namespace
-void CheckerManager::runCheckersForNewAllocator(
- const CXXNewExpr *NE, SVal Target, ExplodedNodeSet &Dst, ExplodedNode *Pred,
- ExprEngine &Eng, bool WasInlined) {
+void CheckerManager::runCheckersForNewAllocator(const CXXAllocatorCall &Call,
+ ExplodedNodeSet &Dst,
+ ExplodedNode *Pred,
+ ExprEngine &Eng,
+ bool WasInlined) {
ExplodedNodeSet Src;
Src.insert(Pred);
- CheckNewAllocatorContext C(NewAllocatorCheckers, NE, Target, WasInlined, Eng);
+ CheckNewAllocatorContext C(NewAllocatorCheckers, Call, WasInlined, Eng);
expandGraphWithCheckers(C, Dst, Src);
}
@@ -650,8 +653,9 @@ CheckerManager::runCheckersForEvalAssume(ProgramStateRef state,
void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
const CallEvent &Call,
- ExprEngine &Eng) {
- for (const auto Pred : Src) {
+ ExprEngine &Eng,
+ const EvalCallOptions &CallOpts) {
+ for (auto *const Pred : Src) {
bool anyEvaluated = false;
ExplodedNodeSet checkDst;
@@ -662,10 +666,8 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// TODO: Support the situation when the call doesn't correspond
// to any Expr.
ProgramPoint L = ProgramPoint::getProgramPoint(
- cast<CallExpr>(Call.getOriginExpr()),
- ProgramPoint::PostStmtKind,
- Pred->getLocationContext(),
- EvalCallChecker.Checker);
+ Call.getOriginExpr(), ProgramPoint::PostStmtKind,
+ Pred->getLocationContext(), EvalCallChecker.Checker);
bool evaluated = false;
{ // CheckerContext generates transitions(populates checkDest) on
// destruction, so introduce the scope to make sure it gets properly
@@ -687,7 +689,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// If none of the checkers evaluated the call, ask ExprEngine to handle it.
if (!anyEvaluated) {
NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
- Eng.defaultEvalCall(B, Pred, Call);
+ Eng.defaultEvalCall(B, Pred, Call, CallOpts);
}
}
}
@@ -902,8 +904,3 @@ CheckerManager::getCachedStmtCheckersFor(const Stmt *S, bool isPreVisit) {
Checkers.push_back(Info.CheckFn);
return Checkers;
}
-
-CheckerManager::~CheckerManager() {
- for (const auto &CheckerDtor : CheckerDtors)
- CheckerDtor();
-}
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp b/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp
new file mode 100644
index 000000000000..1b3e8b11549d
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp
@@ -0,0 +1,241 @@
+//===- CheckerRegistry.h - Maintains all available checkers -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerRegistryData.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "llvm/ADT/Twine.h"
+#include <map>
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Methods of CmdLineOption, PackageInfo and CheckerInfo.
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void CmdLineOption::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+LLVM_DUMP_METHOD void
+CmdLineOption::dumpToStream(llvm::raw_ostream &Out) const {
+ // The description can be just checked in Checkers.inc, the point here is to
+ // debug whether we succeeded in parsing it.
+ Out << OptionName << " (" << OptionType << ", "
+ << (IsHidden ? "hidden, " : "") << DevelopmentStatus << ") default: \""
+ << DefaultValStr;
+}
+
+static StringRef toString(StateFromCmdLine Kind) {
+ switch (Kind) {
+ case StateFromCmdLine::State_Disabled:
+ return "Disabled";
+ case StateFromCmdLine::State_Enabled:
+ return "Enabled";
+ case StateFromCmdLine::State_Unspecified:
+ return "Unspecified";
+ }
+ llvm_unreachable("Unhandled StateFromCmdLine enum");
+}
+
+LLVM_DUMP_METHOD void CheckerInfo::dump() const { dumpToStream(llvm::errs()); }
+
+LLVM_DUMP_METHOD void CheckerInfo::dumpToStream(llvm::raw_ostream &Out) const {
+ // The description can be just checked in Checkers.inc, the point here is to
+ // debug whether we succeeded in parsing it. Same with documentation uri.
+ Out << FullName << " (" << toString(State) << (IsHidden ? ", hidden" : "")
+ << ")\n";
+ Out << " Options:\n";
+ for (const CmdLineOption &Option : CmdLineOptions) {
+ Out << " ";
+ Option.dumpToStream(Out);
+ Out << '\n';
+ }
+ Out << " Dependencies:\n";
+ for (const CheckerInfo *Dependency : Dependencies) {
+ Out << " " << Dependency->FullName << '\n';
+ }
+ Out << " Weak dependencies:\n";
+ for (const CheckerInfo *Dependency : WeakDependencies) {
+ Out << " " << Dependency->FullName << '\n';
+ }
+}
+
+LLVM_DUMP_METHOD void PackageInfo::dump() const { dumpToStream(llvm::errs()); }
+
+LLVM_DUMP_METHOD void PackageInfo::dumpToStream(llvm::raw_ostream &Out) const {
+ Out << FullName << "\n";
+ Out << " Options:\n";
+ for (const CmdLineOption &Option : CmdLineOptions) {
+ Out << " ";
+ Option.dumpToStream(Out);
+ Out << '\n';
+ }
+}
+
+static constexpr char PackageSeparator = '.';
+
+static bool isInPackage(const CheckerInfo &Checker, StringRef PackageName) {
+ // Does the checker's full name have the package as a prefix?
+ if (!Checker.FullName.startswith(PackageName))
+ return false;
+
+ // Is the package actually just the name of a specific checker?
+ if (Checker.FullName.size() == PackageName.size())
+ return true;
+
+ // Is the checker in the package (or a subpackage)?
+ if (Checker.FullName[PackageName.size()] == PackageSeparator)
+ return true;
+
+ return false;
+}
+
+CheckerInfoListRange
+CheckerRegistryData::getMutableCheckersForCmdLineArg(StringRef CmdLineArg) {
+ auto It = checker_registry::binaryFind(Checkers, CmdLineArg);
+
+ if (!isInPackage(*It, CmdLineArg))
+ return {Checkers.end(), Checkers.end()};
+
+ // See how large the package is.
+ // If the package doesn't exist, assume the option refers to a single
+ // checker.
+ size_t Size = 1;
+ llvm::StringMap<size_t>::const_iterator PackageSize =
+ PackageSizes.find(CmdLineArg);
+
+ if (PackageSize != PackageSizes.end())
+ Size = PackageSize->getValue();
+
+ return {It, It + Size};
+}
+//===----------------------------------------------------------------------===//
+// Printing functions.
+//===----------------------------------------------------------------------===//
+
+void CheckerRegistryData::printCheckerWithDescList(
+ const AnalyzerOptions &AnOpts, raw_ostream &Out,
+ size_t MaxNameChars) const {
+ // FIXME: Print available packages.
+
+ Out << "CHECKERS:\n";
+
+ // Find the maximum option length.
+ size_t OptionFieldWidth = 0;
+ for (const auto &Checker : Checkers) {
+ // Limit the amount of padding we are willing to give up for alignment.
+ // Package.Name Description [Hidden]
+ size_t NameLength = Checker.FullName.size();
+ if (NameLength <= MaxNameChars)
+ OptionFieldWidth = std::max(OptionFieldWidth, NameLength);
+ }
+
+ const size_t InitialPad = 2;
+
+ auto Print = [=](llvm::raw_ostream &Out, const CheckerInfo &Checker,
+ StringRef Description) {
+ AnalyzerOptions::printFormattedEntry(Out, {Checker.FullName, Description},
+ InitialPad, OptionFieldWidth);
+ Out << '\n';
+ };
+
+ for (const auto &Checker : Checkers) {
+ // The order of this if branches is significant, we wouldn't like to display
+ // developer checkers even in the alpha output. For example,
+ // alpha.cplusplus.IteratorModeling is a modeling checker, hence it's hidden
+ // by default, and users (even when the user is a developer of an alpha
+ // checker) shouldn't normally tinker with whether they should be enabled.
+
+ if (Checker.IsHidden) {
+ if (AnOpts.ShowCheckerHelpDeveloper)
+ Print(Out, Checker, Checker.Desc);
+ continue;
+ }
+
+ if (Checker.FullName.startswith("alpha")) {
+ if (AnOpts.ShowCheckerHelpAlpha)
+ Print(Out, Checker,
+ ("(Enable only for development!) " + Checker.Desc).str());
+ continue;
+ }
+
+ if (AnOpts.ShowCheckerHelp)
+ Print(Out, Checker, Checker.Desc);
+ }
+}
+
+void CheckerRegistryData::printEnabledCheckerList(raw_ostream &Out) const {
+ for (const auto *i : EnabledCheckers)
+ Out << i->FullName << '\n';
+}
+
+void CheckerRegistryData::printCheckerOptionList(const AnalyzerOptions &AnOpts,
+ raw_ostream &Out) const {
+ Out << "OVERVIEW: Clang Static Analyzer Checker and Package Option List\n\n";
+ Out << "USAGE: -analyzer-config <OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
+ Out << " -analyzer-config OPTION1=VALUE, -analyzer-config "
+ "OPTION2=VALUE, ...\n\n";
+ Out << "OPTIONS:\n\n";
+
+ // It's usually ill-advised to use multimap, but clang will terminate after
+ // this function.
+ std::multimap<StringRef, const CmdLineOption &> OptionMap;
+
+ for (const CheckerInfo &Checker : Checkers) {
+ for (const CmdLineOption &Option : Checker.CmdLineOptions) {
+ OptionMap.insert({Checker.FullName, Option});
+ }
+ }
+
+ for (const PackageInfo &Package : Packages) {
+ for (const CmdLineOption &Option : Package.CmdLineOptions) {
+ OptionMap.insert({Package.FullName, Option});
+ }
+ }
+
+ auto Print = [](llvm::raw_ostream &Out, StringRef FullOption,
+ StringRef Desc) {
+ AnalyzerOptions::printFormattedEntry(Out, {FullOption, Desc},
+ /*InitialPad*/ 2,
+ /*EntryWidth*/ 50,
+ /*MinLineWidth*/ 90);
+ Out << "\n\n";
+ };
+ for (const std::pair<const StringRef, const CmdLineOption &> &Entry :
+ OptionMap) {
+ const CmdLineOption &Option = Entry.second;
+ std::string FullOption = (Entry.first + ":" + Option.OptionName).str();
+
+ std::string Desc =
+ ("(" + Option.OptionType + ") " + Option.Description + " (default: " +
+ (Option.DefaultValStr.empty() ? "\"\"" : Option.DefaultValStr) + ")")
+ .str();
+
+ // The list of these if branches is significant, we wouldn't like to
+ // display hidden alpha checker options for
+ // -analyzer-checker-option-help-alpha.
+
+ if (Option.IsHidden) {
+ if (AnOpts.ShowCheckerOptionDeveloperList)
+ Print(Out, FullOption, Desc);
+ continue;
+ }
+
+ if (Option.DevelopmentStatus == "alpha" ||
+ Entry.first.startswith("alpha")) {
+ if (AnOpts.ShowCheckerOptionAlphaList)
+ Print(Out, FullOption,
+ llvm::Twine("(Enable only for development!) " + Desc).str());
+ continue;
+ }
+
+ if (AnOpts.ShowCheckerOptionList)
+ Print(Out, FullOption, Desc);
+ }
+}
diff --git a/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp b/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
index bdae3e605eff..a601370775b4 100644
--- a/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
@@ -9,13 +9,18 @@
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
// Common strings used for the "category" of many static analyzer issues.
-namespace clang { namespace ento { namespace categories {
+namespace clang {
+namespace ento {
+namespace categories {
-const char * const CoreFoundationObjectiveC = "Core Foundation/Objective-C";
-const char * const LogicError = "Logic error";
-const char * const MemoryRefCount =
- "Memory (Core Foundation/Objective-C/OSObject)";
-const char * const MemoryError = "Memory error";
-const char * const UnixAPI = "Unix API";
-const char * const CXXObjectLifecycle = "C++ object lifecycle";
-}}}
+const char *const CoreFoundationObjectiveC = "Core Foundation/Objective-C";
+const char *const LogicError = "Logic error";
+const char *const MemoryRefCount =
+ "Memory (Core Foundation/Objective-C/OSObject)";
+const char *const MemoryError = "Memory error";
+const char *const UnixAPI = "Unix API";
+const char *const CXXObjectLifecycle = "C++ object lifecycle";
+const char *const SecurityError = "Security error";
+} // namespace categories
+} // namespace ento
+} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index 94cf74de8293..70deb13a8e1a 100644
--- a/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -23,8 +23,8 @@
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
@@ -52,8 +52,7 @@ STATISTIC(NumPathsExplored,
// Core analysis engine.
//===----------------------------------------------------------------------===//
-static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts,
- SubEngine &subengine) {
+static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts) {
switch (Opts.getExplorationStrategy()) {
case ExplorationStrategyKind::DFS:
return WorkList::makeDFS();
@@ -71,9 +70,9 @@ static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts,
llvm_unreachable("Unknown AnalyzerOptions::ExplorationStrategyKind");
}
-CoreEngine::CoreEngine(SubEngine &subengine, FunctionSummariesTy *FS,
+CoreEngine::CoreEngine(ExprEngine &exprengine, FunctionSummariesTy *FS,
AnalyzerOptions &Opts)
- : SubEng(subengine), WList(generateWorkList(Opts, subengine)),
+ : ExprEng(exprengine), WList(generateWorkList(Opts)),
BCounterFactory(G.getAllocator()), FunctionSummaries(FS) {}
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
@@ -104,7 +103,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
if (!InitState)
- InitState = SubEng.getInitialState(L);
+ InitState = ExprEng.getInitialState(L);
bool IsNew;
ExplodedNode *Node = G.getNode(StartLoc, InitState, false, &IsNew);
@@ -113,7 +112,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
NodeBuilderContext BuilderCtx(*this, StartLoc.getDst(), Node);
ExplodedNodeSet DstBegin;
- SubEng.processBeginOfFunction(BuilderCtx, Node, DstBegin, StartLoc);
+ ExprEng.processBeginOfFunction(BuilderCtx, Node, DstBegin, StartLoc);
enqueue(DstBegin);
}
@@ -147,7 +146,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
dispatchWorkItem(Node, Node->getLocation(), WU);
}
- SubEng.processEndWorklist();
+ ExprEng.processEndWorklist();
return WList->hasWork();
}
@@ -172,7 +171,7 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
break;
case ProgramPoint::CallExitBeginKind:
- SubEng.processCallExit(Pred);
+ ExprEng.processCallExit(Pred);
break;
case ProgramPoint::EpsilonKind: {
@@ -221,7 +220,7 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
if (L.getSrc()->getTerminator().isVirtualBaseBranch() &&
L.getDst() == *L.getSrc()->succ_begin()) {
ProgramPoint P = L.withTag(getNoteTags().makeNoteTag(
- [](BugReporterContext &, BugReport &) -> std::string {
+ [](BugReporterContext &, PathSensitiveBugReport &) -> std::string {
// TODO: Just call out the name of the most derived class
// when we know it.
return "Virtual base initialization skipped because "
@@ -253,17 +252,17 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
}
// Process the final state transition.
- SubEng.processEndOfFunction(BuilderCtx, Pred, RS);
+ ExprEng.processEndOfFunction(BuilderCtx, Pred, RS);
// This path is done. Don't enqueue any more nodes.
return;
}
- // Call into the SubEngine to process entering the CFGBlock.
+ // Call into the ExprEngine to process entering the CFGBlock.
ExplodedNodeSet dstNodes;
BlockEntrance BE(Blk, Pred->getLocationContext());
NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE);
- SubEng.processCFGBlockEntrance(L, nodeBuilder, Pred);
+ ExprEng.processCFGBlockEntrance(L, nodeBuilder, Pred);
// Auto-generate a node.
if (!nodeBuilder.hasGeneratedNodes()) {
@@ -287,7 +286,7 @@ void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
// Process the entrance of the block.
if (Optional<CFGElement> E = L.getFirstElement()) {
NodeBuilderContext Ctx(*this, L.getBlock(), Pred);
- SubEng.processCFGElement(*E, Pred, 0, &Ctx);
+ ExprEng.processCFGElement(*E, Pred, 0, &Ctx);
}
else
HandleBlockExit(L.getBlock(), Pred);
@@ -367,7 +366,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
*(B->succ_begin()), this);
- SubEng.processIndirectGoto(builder);
+ ExprEng.processIndirectGoto(builder);
return;
}
@@ -378,7 +377,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
// 'element' variable to a value.
// (2) in a terminator, which represents the branch.
//
- // For (1), subengines will bind a value (i.e., 0 or 1) indicating
+ // For (1), ExprEngine will bind a value (i.e., 0 or 1) indicating
// whether or not collection contains any more elements. We cannot
// just test to see if the element is nil because a container can
// contain nil elements.
@@ -389,7 +388,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
this);
- SubEng.processSwitch(builder);
+ ExprEng.processSwitch(builder);
return;
}
@@ -418,7 +417,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
void CoreEngine::HandleCallEnter(const CallEnter &CE, ExplodedNode *Pred) {
NodeBuilderContext BuilderCtx(*this, CE.getEntry(), Pred);
- SubEng.processCallEnter(BuilderCtx, CE, Pred);
+ ExprEng.processCallEnter(BuilderCtx, CE, Pred);
}
void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
@@ -426,7 +425,7 @@ void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
assert(B->succ_size() == 2);
NodeBuilderContext Ctx(*this, B, Pred);
ExplodedNodeSet Dst;
- SubEng.processBranch(Cond, Ctx, Pred, Dst, *(B->succ_begin()),
+ ExprEng.processBranch(Cond, Ctx, Pred, Dst, *(B->succ_begin()),
*(B->succ_begin() + 1));
// Enqueue the new frontier onto the worklist.
enqueue(Dst);
@@ -438,7 +437,7 @@ void CoreEngine::HandleCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE,
assert(B->succ_size() == 2);
NodeBuilderContext Ctx(*this, B, Pred);
ExplodedNodeSet Dst;
- SubEng.processCleanupTemporaryBranch(BTE, Ctx, Pred, Dst, *(B->succ_begin()),
+ ExprEng.processCleanupTemporaryBranch(BTE, Ctx, Pred, Dst, *(B->succ_begin()),
*(B->succ_begin() + 1));
// Enqueue the new frontier onto the worklist.
enqueue(Dst);
@@ -449,7 +448,7 @@ void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B,
assert(B->succ_size() == 2);
NodeBuilderContext Ctx(*this, B, Pred);
ExplodedNodeSet Dst;
- SubEng.processStaticInitializer(DS, Ctx, Pred, Dst,
+ ExprEng.processStaticInitializer(DS, Ctx, Pred, Dst,
*(B->succ_begin()), *(B->succ_begin()+1));
// Enqueue the new frontier onto the worklist.
enqueue(Dst);
@@ -464,7 +463,7 @@ void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
HandleBlockExit(B, Pred);
else {
NodeBuilderContext Ctx(*this, B, Pred);
- SubEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx);
+ ExprEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx);
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/DynamicSize.cpp b/clang/lib/StaticAnalyzer/Core/DynamicSize.cpp
new file mode 100644
index 000000000000..8b2172db445c
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Core/DynamicSize.cpp
@@ -0,0 +1,71 @@
+//===- DynamicSize.cpp - Dynamic size related APIs --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs that track and query dynamic size information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+
+namespace clang {
+namespace ento {
+
+DefinedOrUnknownSVal getDynamicSize(ProgramStateRef State, const MemRegion *MR,
+ SValBuilder &SVB) {
+ return MR->getMemRegionManager().getStaticSize(MR, SVB);
+}
+
+DefinedOrUnknownSVal getDynamicElementCount(ProgramStateRef State,
+ const MemRegion *MR,
+ SValBuilder &SVB,
+ QualType ElementTy) {
+ MemRegionManager &MemMgr = MR->getMemRegionManager();
+ ASTContext &Ctx = MemMgr.getContext();
+
+ DefinedOrUnknownSVal Size = getDynamicSize(State, MR, SVB);
+ SVal ElementSizeV = SVB.makeIntVal(
+ Ctx.getTypeSizeInChars(ElementTy).getQuantity(), SVB.getArrayIndexType());
+
+ SVal DivisionV =
+ SVB.evalBinOp(State, BO_Div, Size, ElementSizeV, SVB.getArrayIndexType());
+
+ return DivisionV.castAs<DefinedOrUnknownSVal>();
+}
+
+SVal getDynamicSizeWithOffset(ProgramStateRef State, const SVal &BufV) {
+ SValBuilder &SvalBuilder = State->getStateManager().getSValBuilder();
+ const MemRegion *MRegion = BufV.getAsRegion();
+ if (!MRegion)
+ return UnknownVal();
+ RegionOffset Offset = MRegion->getAsOffset();
+ if (Offset.hasSymbolicOffset())
+ return UnknownVal();
+ const MemRegion *BaseRegion = MRegion->getBaseRegion();
+ if (!BaseRegion)
+ return UnknownVal();
+
+ NonLoc OffsetInBytes = SvalBuilder.makeArrayIndex(
+ Offset.getOffset() /
+ MRegion->getMemRegionManager().getContext().getCharWidth());
+ DefinedOrUnknownSVal ExtentInBytes =
+ getDynamicSize(State, BaseRegion, SvalBuilder);
+
+ return SvalBuilder.evalBinOp(State, BinaryOperator::Opcode::BO_Sub,
+ ExtentInBytes, OffsetInBytes,
+ SvalBuilder.getArrayIndexType());
+}
+
+} // namespace ento
+} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Core/DynamicType.cpp b/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
index a78e0e05e903..e9b64fd79614 100644
--- a/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
+++ b/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
@@ -34,6 +34,10 @@ REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(CastSet, clang::ento::DynamicCastInfo)
REGISTER_MAP_WITH_PROGRAMSTATE(DynamicCastMap, const clang::ento::MemRegion *,
CastSet)
+// A map from Class object symbols to the most likely pointed-to type.
+REGISTER_MAP_WITH_PROGRAMSTATE(DynamicClassObjectMap, clang::ento::SymbolRef,
+ clang::ento::DynamicTypeInfo)
+
namespace clang {
namespace ento {
@@ -76,6 +80,12 @@ const DynamicCastInfo *getDynamicCastInfo(ProgramStateRef State,
return nullptr;
}
+DynamicTypeInfo getClassObjectDynamicTypeInfo(ProgramStateRef State,
+ SymbolRef Sym) {
+ const DynamicTypeInfo *DTI = State->get<DynamicClassObjectMap>(Sym);
+ return DTI ? *DTI : DynamicTypeInfo{};
+}
+
ProgramStateRef setDynamicTypeInfo(ProgramStateRef State, const MemRegion *MR,
DynamicTypeInfo NewTy) {
State = State->set<DynamicTypeMap>(MR->StripCasts(), NewTy);
@@ -118,111 +128,165 @@ ProgramStateRef setDynamicTypeAndCastInfo(ProgramStateRef State,
return State;
}
+ProgramStateRef setClassObjectDynamicTypeInfo(ProgramStateRef State,
+ SymbolRef Sym,
+ DynamicTypeInfo NewTy) {
+ State = State->set<DynamicClassObjectMap>(Sym, NewTy);
+ return State;
+}
+
+ProgramStateRef setClassObjectDynamicTypeInfo(ProgramStateRef State,
+ SymbolRef Sym, QualType NewTy,
+ bool CanBeSubClassed) {
+ return setClassObjectDynamicTypeInfo(State, Sym,
+ DynamicTypeInfo(NewTy, CanBeSubClassed));
+}
+
+static bool isLive(SymbolReaper &SR, const MemRegion *MR) {
+ return SR.isLiveRegion(MR);
+}
+
+static bool isLive(SymbolReaper &SR, SymbolRef Sym) { return SR.isLive(Sym); }
+
template <typename MapTy>
-ProgramStateRef removeDead(ProgramStateRef State, const MapTy &Map,
- SymbolReaper &SR) {
+static ProgramStateRef removeDeadImpl(ProgramStateRef State, SymbolReaper &SR) {
+ const auto &Map = State->get<MapTy>();
+
for (const auto &Elem : Map)
- if (!SR.isLiveRegion(Elem.first))
- State = State->remove<DynamicCastMap>(Elem.first);
+ if (!isLive(SR, Elem.first))
+ State = State->remove<MapTy>(Elem.first);
return State;
}
ProgramStateRef removeDeadTypes(ProgramStateRef State, SymbolReaper &SR) {
- return removeDead(State, State->get<DynamicTypeMap>(), SR);
+ return removeDeadImpl<DynamicTypeMap>(State, SR);
}
ProgramStateRef removeDeadCasts(ProgramStateRef State, SymbolReaper &SR) {
- return removeDead(State, State->get<DynamicCastMap>(), SR);
+ return removeDeadImpl<DynamicCastMap>(State, SR);
}
-static void printDynamicTypesJson(raw_ostream &Out, ProgramStateRef State,
- const char *NL, unsigned int Space,
- bool IsDot) {
- Indent(Out, Space, IsDot) << "\"dynamic_types\": ";
+ProgramStateRef removeDeadClassObjectTypes(ProgramStateRef State,
+ SymbolReaper &SR) {
+ return removeDeadImpl<DynamicClassObjectMap>(State, SR);
+}
- const DynamicTypeMapTy &Map = State->get<DynamicTypeMap>();
- if (Map.isEmpty()) {
- Out << "null," << NL;
- return;
- }
+//===----------------------------------------------------------------------===//
+// Implementation of the 'printer-to-JSON' function
+//===----------------------------------------------------------------------===//
- ++Space;
- Out << '[' << NL;
- for (DynamicTypeMapTy::iterator I = Map.begin(); I != Map.end(); ++I) {
- const MemRegion *MR = I->first;
- const DynamicTypeInfo &DTI = I->second;
- Indent(Out, Space, IsDot)
- << "{ \"region\": \"" << MR << "\", \"dyn_type\": ";
- if (!DTI.isValid()) {
- Out << "null";
- } else {
- Out << '\"' << DTI.getType()->getPointeeType().getAsString()
- << "\", \"sub_classable\": "
- << (DTI.canBeASubClass() ? "true" : "false");
- }
- Out << " }";
-
- if (std::next(I) != Map.end())
- Out << ',';
- Out << NL;
+static raw_ostream &printJson(const MemRegion *Region, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ return Out << "\"region\": \"" << Region << "\"";
+}
+
+static raw_ostream &printJson(const SymExpr *Symbol, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ return Out << "\"symbol\": \"" << Symbol << "\"";
+}
+
+static raw_ostream &printJson(const DynamicTypeInfo &DTI, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ Out << "\"dyn_type\": ";
+ if (!DTI.isValid()) {
+ Out << "null";
+ } else {
+ QualType ToPrint = DTI.getType();
+ if (ToPrint->isAnyPointerType())
+ ToPrint = ToPrint->getPointeeType();
+
+ Out << '\"' << ToPrint.getAsString() << "\", \"sub_classable\": "
+ << (DTI.canBeASubClass() ? "true" : "false");
}
+ return Out;
+}
- --Space;
- Indent(Out, Space, IsDot) << "]," << NL;
+static raw_ostream &printJson(const DynamicCastInfo &DCI, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ return Out << "\"from\": \"" << DCI.from().getAsString() << "\", \"to\": \""
+ << DCI.to().getAsString() << "\", \"kind\": \""
+ << (DCI.succeeds() ? "success" : "fail") << "\"";
}
-static void printDynamicCastsJson(raw_ostream &Out, ProgramStateRef State,
- const char *NL, unsigned int Space,
- bool IsDot) {
- Indent(Out, Space, IsDot) << "\"dynamic_casts\": ";
+template <class T, class U>
+static raw_ostream &printJson(const std::pair<T, U> &Pair, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ printJson(Pair.first, Out, NL, Space, IsDot) << ", ";
+ return printJson(Pair.second, Out, NL, Space, IsDot);
+}
- const DynamicCastMapTy &Map = State->get<DynamicCastMap>();
- if (Map.isEmpty()) {
- Out << "null," << NL;
- return;
+template <class ContainerTy>
+static raw_ostream &printJsonContainer(const ContainerTy &Container,
+ raw_ostream &Out, const char *NL,
+ unsigned int Space, bool IsDot) {
+ if (Container.isEmpty()) {
+ return Out << "null";
}
++Space;
Out << '[' << NL;
- for (DynamicCastMapTy::iterator I = Map.begin(); I != Map.end(); ++I) {
- const MemRegion *MR = I->first;
- const CastSet &Set = I->second;
-
- Indent(Out, Space, IsDot) << "{ \"region\": \"" << MR << "\", \"casts\": ";
- if (Set.isEmpty()) {
- Out << "null ";
- } else {
- ++Space;
- Out << '[' << NL;
- for (CastSet::iterator SI = Set.begin(); SI != Set.end(); ++SI) {
- Indent(Out, Space, IsDot)
- << "{ \"from\": \"" << SI->from().getAsString() << "\", \"to\": \""
- << SI->to().getAsString() << "\", \"kind\": \""
- << (SI->succeeds() ? "success" : "fail") << "\" }";
-
- if (std::next(SI) != Set.end())
- Out << ',';
- Out << NL;
- }
- --Space;
- Indent(Out, Space, IsDot) << ']';
- }
- Out << '}';
-
- if (std::next(I) != Map.end())
+ for (auto I = Container.begin(); I != Container.end(); ++I) {
+ const auto &Element = *I;
+
+ Indent(Out, Space, IsDot) << "{ ";
+ printJson(Element, Out, NL, Space, IsDot) << " }";
+
+ if (std::next(I) != Container.end())
Out << ',';
Out << NL;
}
--Space;
- Indent(Out, Space, IsDot) << "]," << NL;
+ return Indent(Out, Space, IsDot) << "]";
+}
+
+static raw_ostream &printJson(const CastSet &Set, raw_ostream &Out,
+ const char *NL, unsigned int Space, bool IsDot) {
+ Out << "\"casts\": ";
+ return printJsonContainer(Set, Out, NL, Space, IsDot);
+}
+
+template <class MapTy>
+static void printJsonImpl(raw_ostream &Out, ProgramStateRef State,
+ const char *Name, const char *NL, unsigned int Space,
+ bool IsDot, bool PrintEvenIfEmpty = true) {
+ const auto &Map = State->get<MapTy>();
+ if (Map.isEmpty() && !PrintEvenIfEmpty)
+ return;
+
+ Indent(Out, Space, IsDot) << "\"" << Name << "\": ";
+ printJsonContainer(Map, Out, NL, Space, IsDot) << "," << NL;
+}
+
+static void printDynamicTypesJson(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, unsigned int Space,
+ bool IsDot) {
+ printJsonImpl<DynamicTypeMap>(Out, State, "dynamic_types", NL, Space, IsDot);
+}
+
+static void printDynamicCastsJson(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, unsigned int Space,
+ bool IsDot) {
+ printJsonImpl<DynamicCastMap>(Out, State, "dynamic_casts", NL, Space, IsDot);
+}
+
+static void printClassObjectDynamicTypesJson(raw_ostream &Out,
+ ProgramStateRef State,
+ const char *NL, unsigned int Space,
+ bool IsDot) {
+ // Let's print Class object type information only if we have something
+ // meaningful to print.
+ printJsonImpl<DynamicClassObjectMap>(Out, State, "class_object_types", NL,
+ Space, IsDot,
+ /*PrintEvenIfEmpty=*/false);
}
void printDynamicTypeInfoJson(raw_ostream &Out, ProgramStateRef State,
const char *NL, unsigned int Space, bool IsDot) {
printDynamicTypesJson(Out, State, NL, Space, IsDot);
printDynamicCastsJson(Out, State, NL, Space, IsDot);
+ printClassObjectDynamicTypesJson(Out, State, NL, Space, IsDot);
}
} // namespace ento
diff --git a/clang/lib/StaticAnalyzer/Core/Environment.cpp b/clang/lib/StaticAnalyzer/Core/Environment.cpp
index 1ccf4c6104a6..9e6d79bb7dcc 100644
--- a/clang/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -183,12 +183,18 @@ EnvironmentManager::removeDeadBindings(Environment Env,
F.getTreeFactory());
// Iterate over the block-expr bindings.
- for (Environment::iterator I = Env.begin(), E = Env.end();
- I != E; ++I) {
+ for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
const EnvironmentEntry &BlkExpr = I.getKey();
const SVal &X = I.getData();
- if (SymReaper.isLive(BlkExpr.getStmt(), BlkExpr.getLocationContext())) {
+ const bool IsBlkExprLive =
+ SymReaper.isLive(BlkExpr.getStmt(), BlkExpr.getLocationContext());
+
+ assert((isa<Expr>(BlkExpr.getStmt()) || !IsBlkExprLive) &&
+ "Only Exprs can be live, LivenessAnalysis argues about the liveness "
+ "of *values*!");
+
+ if (IsBlkExprLive) {
// Copy the binding to the new map.
EBMapRef = EBMapRef.add(BlkExpr, X);
diff --git a/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index c4838492271c..635495e9bf60 100644
--- a/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -50,9 +50,8 @@ ExplodedGraph::~ExplodedGraph() = default;
bool ExplodedGraph::isInterestingLValueExpr(const Expr *Ex) {
if (!Ex->isLValue())
return false;
- return isa<DeclRefExpr>(Ex) ||
- isa<MemberExpr>(Ex) ||
- isa<ObjCIvarRefExpr>(Ex);
+ return isa<DeclRefExpr>(Ex) || isa<MemberExpr>(Ex) ||
+ isa<ObjCIvarRefExpr>(Ex) || isa<ArraySubscriptExpr>(Ex);
}
bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index f917a4c8637b..265dcd134213 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1210,9 +1210,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
switch (S->getStmtClass()) {
// C++, OpenMP and ARC stuff we don't support yet.
- case Expr::ObjCIndirectCopyRestoreExprClass:
case Stmt::CXXDependentScopeMemberExprClass:
- case Stmt::CXXInheritedCtorInitExprClass:
case Stmt::CXXTryStmtClass:
case Stmt::CXXTypeidExprClass:
case Stmt::CXXUuidofExprClass:
@@ -1226,6 +1224,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::UnresolvedLookupExprClass:
case Stmt::UnresolvedMemberExprClass:
case Stmt::TypoExprClass:
+ case Stmt::RecoveryExprClass:
case Stmt::CXXNoexceptExprClass:
case Stmt::PackExpansionExprClass:
case Stmt::SubstNonTypeTemplateParmPackExprClass:
@@ -1258,6 +1257,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPTaskwaitDirectiveClass:
case Stmt::OMPTaskgroupDirectiveClass:
case Stmt::OMPFlushDirectiveClass:
+ case Stmt::OMPDepobjDirectiveClass:
+ case Stmt::OMPScanDirectiveClass:
case Stmt::OMPOrderedDirectiveClass:
case Stmt::OMPAtomicDirectiveClass:
case Stmt::OMPTargetDirectiveClass:
@@ -1386,6 +1387,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::AsTypeExprClass:
case Stmt::ConceptSpecializationExprClass:
case Stmt::CXXRewrittenBinaryOperatorClass:
+ case Stmt::RequiresExprClass:
// Fall through.
// Cases we intentionally don't evaluate, since they don't need
@@ -1410,6 +1412,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::SubstNonTypeTemplateParmExprClass:
case Stmt::CXXNullPtrLiteralExprClass:
case Stmt::OMPArraySectionExprClass:
+ case Stmt::OMPArrayShapingExprClass:
+ case Stmt::OMPIteratorExprClass:
case Stmt::TypeTraitExprClass: {
Bldr.takeNodes(Pred);
ExplodedNodeSet preVisit;
@@ -1510,6 +1514,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
+ case Stmt::MatrixSubscriptExprClass:
+ llvm_unreachable("Support for MatrixSubscriptExpr is not implemented.");
+ break;
+
case Stmt::GCCAsmStmtClass:
Bldr.takeNodes(Pred);
VisitGCCAsmStmt(cast<GCCAsmStmt>(S), Pred, Dst);
@@ -1617,6 +1625,13 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
+ case Stmt::CXXInheritedCtorInitExprClass:
+ Bldr.takeNodes(Pred);
+ VisitCXXInheritedCtorInitExpr(cast<CXXInheritedCtorInitExpr>(S), Pred,
+ Dst);
+ Bldr.addNodes(Dst);
+ break;
+
case Stmt::CXXNewExprClass: {
Bldr.takeNodes(Pred);
@@ -1637,8 +1652,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ExplodedNodeSet PreVisit;
const auto *CDE = cast<CXXDeleteExpr>(S);
getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+ ExplodedNodeSet PostVisit;
+ getCheckerManager().runCheckersForPostStmt(PostVisit, PreVisit, S, *this);
- for (const auto i : PreVisit)
+ for (const auto i : PostVisit)
VisitCXXDeleteExpr(CDE, i, Dst);
Bldr.addNodes(Dst);
@@ -1704,7 +1721,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXConstCastExprClass:
case Stmt::CXXFunctionalCastExprClass:
case Stmt::BuiltinBitCastExprClass:
- case Stmt::ObjCBridgedCastExprClass: {
+ case Stmt::ObjCBridgedCastExprClass:
+ case Stmt::CXXAddrspaceCastExprClass: {
Bldr.takeNodes(Pred);
const auto *C = cast<CastExpr>(S);
ExplodedNodeSet dstExpr;
@@ -1851,6 +1869,21 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
}
+
+ case Expr::ObjCIndirectCopyRestoreExprClass: {
+ // ObjCIndirectCopyRestoreExpr implies passing a temporary for
+ // correctness of lifetime management. Due to limited analysis
+ // of ARC, this is implemented as direct arg passing.
+ Bldr.takeNodes(Pred);
+ ProgramStateRef state = Pred->getState();
+ const auto *OIE = cast<ObjCIndirectCopyRestoreExpr>(S);
+ const Expr *E = OIE->getSubExpr();
+ SVal V = state->getSVal(E, Pred->getLocationContext());
+ Bldr.generateNode(S, Pred,
+ state->BindExpr(S, Pred->getLocationContext(), V));
+ Bldr.addNodes(Dst);
+ break;
+ }
}
}
@@ -3160,11 +3193,13 @@ std::string ExprEngine::DumpGraph(bool trim, StringRef Filename) {
return DumpGraph(Src, Filename);
} else {
return llvm::WriteGraph(&G, "ExprEngine", /*ShortNames=*/false,
- /*Title=*/"Exploded Graph", /*Filename=*/Filename);
+ /*Title=*/"Exploded Graph",
+ /*Filename=*/std::string(Filename));
}
-#endif
+#else
llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
return "";
+#endif
}
std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode*> Nodes,
@@ -3178,7 +3213,7 @@ std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode*> Nodes,
return llvm::WriteGraph(TrimmedG.get(), "TrimmedExprEngine",
/*ShortNames=*/false,
/*Title=*/"Trimmed Exploded Graph",
- /*Filename=*/Filename);
+ /*Filename=*/std::string(Filename));
}
#endif
llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
@@ -3189,3 +3224,5 @@ void *ProgramStateTrait<ReplayWithoutInlining>::GDMIndex() {
static int index = 0;
return &index;
}
+
+void ExprEngine::anchor() { }
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index b17f26aa9c53..c5e38cc7423d 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -218,7 +218,7 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
auto CE = BD->capture_end();
for (; I != E; ++I) {
const VarRegion *capturedR = I.getCapturedRegion();
- const VarRegion *originalR = I.getOriginalRegion();
+ const TypedValueRegion *originalR = I.getOriginalRegion();
// If the capture had a copy expression, use the result of evaluating
// that expression, otherwise use the original value.
@@ -573,6 +573,18 @@ void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ if (isa<TypedefNameDecl>(*DS->decl_begin())) {
+ // C99 6.7.7 "Any array size expressions associated with variable length
+ // array declarators are evaluated each time the declaration of the typedef
+ // name is reached in the order of execution."
+ // The checkers should know about typedef to be able to handle VLA size
+ // expressions.
+ ExplodedNodeSet DstPre;
+ getCheckerManager().runCheckersForPreStmt(DstPre, Pred, DS, *this);
+ getCheckerManager().runCheckersForPostStmt(Dst, DstPre, DS, *this);
+ return;
+ }
+
// Assumption: The CFG has one DeclStmt per Decl.
const VarDecl *VD = dyn_cast_or_null<VarDecl>(*DS->decl_begin());
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index b816aab7c18f..38a680eb04c0 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -109,15 +109,14 @@ SVal ExprEngine::makeZeroElementRegion(ProgramStateRef State, SVal LValue,
return LValue;
}
-std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
+SVal ExprEngine::computeObjectUnderConstruction(
const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
const ConstructionContext *CC, EvalCallOptions &CallOpts) {
SValBuilder &SVB = getSValBuilder();
MemRegionManager &MRMgr = SVB.getRegionManager();
ASTContext &ACtx = SVB.getContext();
- // See if we're constructing an existing region by looking at the
- // current construction context.
+ // Compute the target region by exploring the construction context.
if (CC) {
switch (CC->getKind()) {
case ConstructionContext::CXX17ElidedCopyVariableKind:
@@ -125,13 +124,9 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
const auto *DSCC = cast<VariableConstructionContext>(CC);
const auto *DS = DSCC->getDeclStmt();
const auto *Var = cast<VarDecl>(DS->getSingleDecl());
- SVal LValue = State->getLValue(Var, LCtx);
QualType Ty = Var->getType();
- LValue =
- makeZeroElementRegion(State, LValue, Ty, CallOpts.IsArrayCtorOrDtor);
- State =
- addObjectUnderConstruction(State, DSCC->getDeclStmt(), LCtx, LValue);
- return std::make_pair(State, LValue);
+ return makeZeroElementRegion(State, State->getLValue(Var, LCtx), Ty,
+ CallOpts.IsArrayCtorOrDtor);
}
case ConstructionContext::CXX17ElidedCopyConstructorInitializerKind:
case ConstructionContext::SimpleConstructorInitializerKind: {
@@ -139,8 +134,7 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
const auto *Init = ICC->getCXXCtorInitializer();
assert(Init->isAnyMemberInitializer());
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
- Loc ThisPtr =
- SVB.getCXXThis(CurCtor, LCtx->getStackFrame());
+ Loc ThisPtr = SVB.getCXXThis(CurCtor, LCtx->getStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
const ValueDecl *Field;
@@ -154,10 +148,8 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
}
QualType Ty = Field->getType();
- FieldVal = makeZeroElementRegion(State, FieldVal, Ty,
- CallOpts.IsArrayCtorOrDtor);
- State = addObjectUnderConstruction(State, Init, LCtx, FieldVal);
- return std::make_pair(State, FieldVal);
+ return makeZeroElementRegion(State, FieldVal, Ty,
+ CallOpts.IsArrayCtorOrDtor);
}
case ConstructionContext::NewAllocatedObjectKind: {
if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
@@ -170,11 +162,10 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
// TODO: In fact, we need to call the constructor for every
// allocated element, not just the first one!
CallOpts.IsArrayCtorOrDtor = true;
- return std::make_pair(
- State, loc::MemRegionVal(getStoreManager().GetElementZeroRegion(
- MR, NE->getType()->getPointeeType())));
+ return loc::MemRegionVal(getStoreManager().GetElementZeroRegion(
+ MR, NE->getType()->getPointeeType()));
}
- return std::make_pair(State, V);
+ return V;
}
// TODO: Detect when the allocator returns a null pointer.
// Constructor shall not be called in this case.
@@ -202,7 +193,7 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
CallerLCtx = CallerLCtx->getParent();
assert(!isa<BlockInvocationContext>(CallerLCtx));
}
- return prepareForObjectConstruction(
+ return computeObjectUnderConstruction(
cast<Expr>(SFC->getCallSite()), State, CallerLCtx,
RTC->getConstructionContext(), CallOpts);
} else {
@@ -223,64 +214,46 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
assert(RetE && "Void returns should not have a construction context");
QualType ReturnTy = RetE->getType();
QualType RegionTy = ACtx.getPointerType(ReturnTy);
- SVal V = SVB.conjureSymbolVal(&TopLevelSymRegionTag, RetE, SFC,
- RegionTy, currBldrCtx->blockCount());
- return std::make_pair(State, V);
+ return SVB.conjureSymbolVal(&TopLevelSymRegionTag, RetE, SFC, RegionTy,
+ currBldrCtx->blockCount());
}
llvm_unreachable("Unhandled return value construction context!");
}
case ConstructionContext::ElidedTemporaryObjectKind: {
assert(AMgr.getAnalyzerOptions().ShouldElideConstructors);
const auto *TCC = cast<ElidedTemporaryObjectConstructionContext>(CC);
- const CXXBindTemporaryExpr *BTE = TCC->getCXXBindTemporaryExpr();
- const MaterializeTemporaryExpr *MTE = TCC->getMaterializedTemporaryExpr();
- const CXXConstructExpr *CE = TCC->getConstructorAfterElision();
// Support pre-C++17 copy elision. We'll have the elidable copy
// constructor in the AST and in the CFG, but we'll skip it
// and construct directly into the final object. This call
// also sets the CallOpts flags for us.
- SVal V;
// If the elided copy/move constructor is not supported, there's still
// benefit in trying to model the non-elided constructor.
// Stash our state before trying to elide, as it'll get overwritten.
ProgramStateRef PreElideState = State;
EvalCallOptions PreElideCallOpts = CallOpts;
- std::tie(State, V) = prepareForObjectConstruction(
- CE, State, LCtx, TCC->getConstructionContextAfterElision(), CallOpts);
+ SVal V = computeObjectUnderConstruction(
+ TCC->getConstructorAfterElision(), State, LCtx,
+ TCC->getConstructionContextAfterElision(), CallOpts);
// FIXME: This definition of "copy elision has not failed" is unreliable.
// It doesn't indicate that the constructor will actually be inlined
- // later; it is still up to evalCall() to decide.
- if (!CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) {
- // Remember that we've elided the constructor.
- State = addObjectUnderConstruction(State, CE, LCtx, V);
-
- // Remember that we've elided the destructor.
- if (BTE)
- State = elideDestructor(State, BTE, LCtx);
-
- // Instead of materialization, shamelessly return
- // the final object destination.
- if (MTE)
- State = addObjectUnderConstruction(State, MTE, LCtx, V);
-
- return std::make_pair(State, V);
- } else {
- // Copy elision failed. Revert the changes and proceed as if we have
- // a simple temporary.
- State = PreElideState;
- CallOpts = PreElideCallOpts;
- }
+ // later; this is still up to evalCall() to decide.
+ if (!CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
+ return V;
+
+ // Copy elision failed. Revert the changes and proceed as if we have
+ // a simple temporary.
+ CallOpts = PreElideCallOpts;
+ CallOpts.IsElidableCtorThatHasNotBeenElided = true;
LLVM_FALLTHROUGH;
}
case ConstructionContext::SimpleTemporaryObjectKind: {
const auto *TCC = cast<TemporaryObjectConstructionContext>(CC);
- const CXXBindTemporaryExpr *BTE = TCC->getCXXBindTemporaryExpr();
const MaterializeTemporaryExpr *MTE = TCC->getMaterializedTemporaryExpr();
- SVal V = UnknownVal();
+ CallOpts.IsTemporaryCtorOrDtor = true;
if (MTE) {
if (const ValueDecl *VD = MTE->getExtendingDecl()) {
assert(MTE->getStorageDuration() != SD_FullExpression);
@@ -296,20 +269,10 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
if (MTE->getStorageDuration() == SD_Static ||
MTE->getStorageDuration() == SD_Thread)
- V = loc::MemRegionVal(MRMgr.getCXXStaticTempObjectRegion(E));
+ return loc::MemRegionVal(MRMgr.getCXXStaticTempObjectRegion(E));
}
- if (V.isUnknown())
- V = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
-
- if (BTE)
- State = addObjectUnderConstruction(State, BTE, LCtx, V);
-
- if (MTE)
- State = addObjectUnderConstruction(State, MTE, LCtx, V);
-
- CallOpts.IsTemporaryCtorOrDtor = true;
- return std::make_pair(State, V);
+ return loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
}
case ConstructionContext::ArgumentKind: {
// Arguments are technically temporaries.
@@ -318,10 +281,8 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
const auto *ACC = cast<ArgumentConstructionContext>(CC);
const Expr *E = ACC->getCallLikeExpr();
unsigned Idx = ACC->getIndex();
- const CXXBindTemporaryExpr *BTE = ACC->getCXXBindTemporaryExpr();
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- SVal V = UnknownVal();
auto getArgLoc = [&](CallEventRef<> Caller) -> Optional<SVal> {
const LocationContext *FutureSFC =
Caller->getCalleeStackFrame(currBldrCtx->blockCount());
@@ -342,76 +303,171 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
// Operator arguments do not correspond to operator parameters
// because this-argument is implemented as a normal argument in
// operator call expressions but not in operator declarations.
- const VarRegion *VR = Caller->getParameterLocation(
+ const TypedValueRegion *TVR = Caller->getParameterLocation(
*Caller->getAdjustedParameterIndex(Idx), currBldrCtx->blockCount());
- if (!VR)
+ if (!TVR)
return None;
- return loc::MemRegionVal(VR);
+ return loc::MemRegionVal(TVR);
};
if (const auto *CE = dyn_cast<CallExpr>(E)) {
CallEventRef<> Caller = CEMgr.getSimpleCall(CE, State, LCtx);
- if (auto OptV = getArgLoc(Caller))
- V = *OptV;
+ if (Optional<SVal> V = getArgLoc(Caller))
+ return *V;
else
break;
- State = addObjectUnderConstruction(State, {CE, Idx}, LCtx, V);
} else if (const auto *CCE = dyn_cast<CXXConstructExpr>(E)) {
// Don't bother figuring out the target region for the future
// constructor because we won't need it.
CallEventRef<> Caller =
CEMgr.getCXXConstructorCall(CCE, /*Target=*/nullptr, State, LCtx);
- if (auto OptV = getArgLoc(Caller))
- V = *OptV;
+ if (Optional<SVal> V = getArgLoc(Caller))
+ return *V;
else
break;
- State = addObjectUnderConstruction(State, {CCE, Idx}, LCtx, V);
} else if (const auto *ME = dyn_cast<ObjCMessageExpr>(E)) {
CallEventRef<> Caller = CEMgr.getObjCMethodCall(ME, State, LCtx);
- if (auto OptV = getArgLoc(Caller))
- V = *OptV;
+ if (Optional<SVal> V = getArgLoc(Caller))
+ return *V;
else
break;
- State = addObjectUnderConstruction(State, {ME, Idx}, LCtx, V);
+ }
+ }
+ } // switch (CC->getKind())
+ }
+
+ // If we couldn't find an existing region to construct into, assume we're
+ // constructing a temporary. Notify the caller of our failure.
+ CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
+ return loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
+}
+
+ProgramStateRef ExprEngine::updateObjectsUnderConstruction(
+ SVal V, const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
+ const ConstructionContext *CC, const EvalCallOptions &CallOpts) {
+ if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) {
+ // Sounds like we failed to find the target region and therefore
+ // copy elision failed. There's nothing we can do about it here.
+ return State;
+ }
+
+ // See if we're constructing an existing region by looking at the
+ // current construction context.
+ assert(CC && "Computed target region without construction context?");
+ switch (CC->getKind()) {
+ case ConstructionContext::CXX17ElidedCopyVariableKind:
+ case ConstructionContext::SimpleVariableKind: {
+ const auto *DSCC = cast<VariableConstructionContext>(CC);
+ return addObjectUnderConstruction(State, DSCC->getDeclStmt(), LCtx, V);
+ }
+ case ConstructionContext::CXX17ElidedCopyConstructorInitializerKind:
+ case ConstructionContext::SimpleConstructorInitializerKind: {
+ const auto *ICC = cast<ConstructorInitializerConstructionContext>(CC);
+ return addObjectUnderConstruction(State, ICC->getCXXCtorInitializer(),
+ LCtx, V);
+ }
+ case ConstructionContext::NewAllocatedObjectKind: {
+ return State;
+ }
+ case ConstructionContext::SimpleReturnedValueKind:
+ case ConstructionContext::CXX17ElidedCopyReturnedValueKind: {
+ const StackFrameContext *SFC = LCtx->getStackFrame();
+ const LocationContext *CallerLCtx = SFC->getParent();
+ if (!CallerLCtx) {
+ // No extra work is necessary in top frame.
+ return State;
}
- assert(!V.isUnknown());
+ auto RTC = (*SFC->getCallSiteBlock())[SFC->getIndex()]
+ .getAs<CFGCXXRecordTypedCall>();
+ assert(RTC && "Could not have had a target region without it");
+ if (isa<BlockInvocationContext>(CallerLCtx)) {
+ // Unwrap block invocation contexts. They're mostly part of
+ // the current stack frame.
+ CallerLCtx = CallerLCtx->getParent();
+ assert(!isa<BlockInvocationContext>(CallerLCtx));
+ }
- if (BTE)
+ return updateObjectsUnderConstruction(V,
+ cast<Expr>(SFC->getCallSite()), State, CallerLCtx,
+ RTC->getConstructionContext(), CallOpts);
+ }
+ case ConstructionContext::ElidedTemporaryObjectKind: {
+ assert(AMgr.getAnalyzerOptions().ShouldElideConstructors);
+ if (!CallOpts.IsElidableCtorThatHasNotBeenElided) {
+ const auto *TCC = cast<ElidedTemporaryObjectConstructionContext>(CC);
+ State = updateObjectsUnderConstruction(
+ V, TCC->getConstructorAfterElision(), State, LCtx,
+ TCC->getConstructionContextAfterElision(), CallOpts);
+
+ // Remember that we've elided the constructor.
+ State = addObjectUnderConstruction(
+ State, TCC->getConstructorAfterElision(), LCtx, V);
+
+ // Remember that we've elided the destructor.
+ if (const auto *BTE = TCC->getCXXBindTemporaryExpr())
+ State = elideDestructor(State, BTE, LCtx);
+
+ // Instead of materialization, shamelessly return
+ // the final object destination.
+ if (const auto *MTE = TCC->getMaterializedTemporaryExpr())
+ State = addObjectUnderConstruction(State, MTE, LCtx, V);
+
+ return State;
+ }
+ // If we decided not to elide the constructor, proceed as if
+ // it's a simple temporary.
+ LLVM_FALLTHROUGH;
+ }
+ case ConstructionContext::SimpleTemporaryObjectKind: {
+ const auto *TCC = cast<TemporaryObjectConstructionContext>(CC);
+ if (const auto *BTE = TCC->getCXXBindTemporaryExpr())
State = addObjectUnderConstruction(State, BTE, LCtx, V);
- return std::make_pair(State, V);
+ if (const auto *MTE = TCC->getMaterializedTemporaryExpr())
+ State = addObjectUnderConstruction(State, MTE, LCtx, V);
+
+ return State;
}
+ case ConstructionContext::ArgumentKind: {
+ const auto *ACC = cast<ArgumentConstructionContext>(CC);
+ if (const auto *BTE = ACC->getCXXBindTemporaryExpr())
+ State = addObjectUnderConstruction(State, BTE, LCtx, V);
+
+ return addObjectUnderConstruction(
+ State, {ACC->getCallLikeExpr(), ACC->getIndex()}, LCtx, V);
}
}
- // If we couldn't find an existing region to construct into, assume we're
- // constructing a temporary. Notify the caller of our failure.
- CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
- return std::make_pair(
- State, loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx)));
+ llvm_unreachable("Unhandled construction context!");
}
-void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
- ExplodedNode *Pred,
- ExplodedNodeSet &destNodes) {
+void ExprEngine::handleConstructor(const Expr *E,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &destNodes) {
+ const auto *CE = dyn_cast<CXXConstructExpr>(E);
+ const auto *CIE = dyn_cast<CXXInheritedCtorInitExpr>(E);
+ assert(CE || CIE);
+
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
SVal Target = UnknownVal();
- if (Optional<SVal> ElidedTarget =
- getObjectUnderConstruction(State, CE, LCtx)) {
- // We've previously modeled an elidable constructor by pretending that it in
- // fact constructs into the correct target. This constructor can therefore
- // be skipped.
- Target = *ElidedTarget;
- StmtNodeBuilder Bldr(Pred, destNodes, *currBldrCtx);
- State = finishObjectConstruction(State, CE, LCtx);
- if (auto L = Target.getAs<Loc>())
- State = State->BindExpr(CE, LCtx, State->getSVal(*L, CE->getType()));
- Bldr.generateNode(CE, Pred, State);
- return;
+ if (CE) {
+ if (Optional<SVal> ElidedTarget =
+ getObjectUnderConstruction(State, CE, LCtx)) {
+ // We've previously modeled an elidable constructor by pretending that it
+ // in fact constructs into the correct target. This constructor can
+ // therefore be skipped.
+ Target = *ElidedTarget;
+ StmtNodeBuilder Bldr(Pred, destNodes, *currBldrCtx);
+ State = finishObjectConstruction(State, CE, LCtx);
+ if (auto L = Target.getAs<Loc>())
+ State = State->BindExpr(CE, LCtx, State->getSVal(*L, CE->getType()));
+ Bldr.generateNode(CE, Pred, State);
+ return;
+ }
}
// FIXME: Handle arrays, which run the same constructor for every element.
@@ -423,10 +479,16 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
assert(C || getCurrentCFGElement().getAs<CFGStmt>());
const ConstructionContext *CC = C ? C->getConstructionContext() : nullptr;
- switch (CE->getConstructionKind()) {
+ const CXXConstructExpr::ConstructionKind CK =
+ CE ? CE->getConstructionKind() : CIE->getConstructionKind();
+ switch (CK) {
case CXXConstructExpr::CK_Complete: {
+ // Inherited constructors are always base class constructors.
+ assert(CE && !CIE && "A complete constructor is inherited?!");
+
+ // The target region is found from construction context.
std::tie(State, Target) =
- prepareForObjectConstruction(CE, State, LCtx, CC, CallOpts);
+ handleConstructionContext(CE, State, LCtx, CC, CallOpts);
break;
}
case CXXConstructExpr::CK_VirtualBase: {
@@ -455,9 +517,9 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
// FIXME: Instead of relying on the ParentMap, we should have the
// trigger-statement (InitListExpr in this case) passed down from CFG or
// otherwise always available during construction.
- if (dyn_cast_or_null<InitListExpr>(LCtx->getParentMap().getParent(CE))) {
+ if (dyn_cast_or_null<InitListExpr>(LCtx->getParentMap().getParent(E))) {
MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
- Target = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(CE, LCtx));
+ Target = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
break;
}
@@ -468,14 +530,13 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
LCtx->getStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
- if (CE->getConstructionKind() == CXXConstructExpr::CK_Delegating) {
+ if (CK == CXXConstructExpr::CK_Delegating) {
Target = ThisVal;
} else {
// Cast to the base type.
- bool IsVirtual =
- (CE->getConstructionKind() == CXXConstructExpr::CK_VirtualBase);
- SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, CE->getType(),
- IsVirtual);
+ bool IsVirtual = (CK == CXXConstructExpr::CK_VirtualBase);
+ SVal BaseVal =
+ getStoreManager().evalDerivedToBase(ThisVal, E->getType(), IsVirtual);
Target = BaseVal;
}
break;
@@ -487,23 +548,27 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
"Prepare for object construction");
ExplodedNodeSet DstPrepare;
StmtNodeBuilder BldrPrepare(Pred, DstPrepare, *currBldrCtx);
- BldrPrepare.generateNode(CE, Pred, State, &T, ProgramPoint::PreStmtKind);
+ BldrPrepare.generateNode(E, Pred, State, &T, ProgramPoint::PreStmtKind);
assert(DstPrepare.size() <= 1);
if (DstPrepare.size() == 0)
return;
Pred = *BldrPrepare.begin();
}
+ const MemRegion *TargetRegion = Target.getAsRegion();
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- CallEventRef<CXXConstructorCall> Call =
- CEMgr.getCXXConstructorCall(CE, Target.getAsRegion(), State, LCtx);
+ CallEventRef<> Call =
+ CIE ? (CallEventRef<>)CEMgr.getCXXInheritedConstructorCall(
+ CIE, TargetRegion, State, LCtx)
+ : (CallEventRef<>)CEMgr.getCXXConstructorCall(
+ CE, TargetRegion, State, LCtx);
ExplodedNodeSet DstPreVisit;
- getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, CE, *this);
+ getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, E, *this);
- // FIXME: Is it possible and/or useful to do this before PreStmt?
ExplodedNodeSet PreInitialized;
- {
+ if (CE) {
+ // FIXME: Is it possible and/or useful to do this before PreStmt?
StmtNodeBuilder Bldr(DstPreVisit, PreInitialized, *currBldrCtx);
for (ExplodedNodeSet::iterator I = DstPreVisit.begin(),
E = DstPreVisit.end();
@@ -528,6 +593,8 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
Bldr.generateNode(CE, *I, State, /*tag=*/nullptr,
ProgramPoint::PreStmtKind);
}
+ } else {
+ PreInitialized = DstPreVisit;
}
ExplodedNodeSet DstPreCall;
@@ -537,7 +604,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
ExplodedNodeSet DstEvaluated;
StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
- if (CE->getConstructor()->isTrivial() &&
+ if (CE && CE->getConstructor()->isTrivial() &&
CE->getConstructor()->isCopyOrMoveConstructor() &&
!CallOpts.IsArrayCtorOrDtor) {
// FIXME: Handle other kinds of trivial constructors as well.
@@ -548,7 +615,8 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
} else {
for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
I != E; ++I)
- defaultEvalCall(Bldr, *I, *Call, CallOpts);
+ getCheckerManager().runCheckersForEvalCall(DstEvaluated, *I, *Call, *this,
+ CallOpts);
}
// If the CFG was constructed without elements for temporary destructors
@@ -560,9 +628,10 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
// paths when no-return temporary destructors are used for assertions.
const AnalysisDeclContext *ADC = LCtx->getAnalysisDeclContext();
if (!ADC->getCFGBuildOptions().AddTemporaryDtors) {
- const MemRegion *Target = Call->getCXXThisVal().getAsRegion();
- if (Target && isa<CXXTempObjectRegion>(Target) &&
- Call->getDecl()->getParent()->isAnyDestructorNoReturn()) {
+ if (llvm::isa_and_nonnull<CXXTempObjectRegion>(TargetRegion) &&
+ cast<CXXConstructorDecl>(Call->getDecl())
+ ->getParent()
+ ->isAnyDestructorNoReturn()) {
// If we've inlined the constructor, then DstEvaluated would be empty.
// In this case we still want a sink, which could be implemented
@@ -575,7 +644,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
"We should not have inlined this constructor!");
for (ExplodedNode *N : DstEvaluated) {
- Bldr.generateSink(CE, N, N->getState());
+ Bldr.generateSink(E, N, N->getState());
}
// There is no need to run the PostCall and PostStmt checker
@@ -586,7 +655,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
}
ExplodedNodeSet DstPostArgumentCleanup;
- for (auto I : DstEvaluated)
+ for (ExplodedNode *I : DstEvaluated)
finishArgumentConstruction(DstPostArgumentCleanup, I, *Call);
// If there were other constructors called for object-type arguments
@@ -595,7 +664,19 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
getCheckerManager().runCheckersForPostCall(DstPostCall,
DstPostArgumentCleanup,
*Call, *this);
- getCheckerManager().runCheckersForPostStmt(destNodes, DstPostCall, CE, *this);
+ getCheckerManager().runCheckersForPostStmt(destNodes, DstPostCall, E, *this);
+}
+
+void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ handleConstructor(CE, Pred, Dst);
+}
+
+void ExprEngine::VisitCXXInheritedCtorInitExpr(
+ const CXXInheritedCtorInitExpr *CE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ handleConstructor(CE, Pred, Dst);
}
void ExprEngine::VisitCXXDestructor(QualType ObjectType,
@@ -683,7 +764,7 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
ExplodedNodeSet DstPostCall;
StmtNodeBuilder CallBldr(DstPreCall, DstPostCall, *currBldrCtx);
- for (auto I : DstPreCall) {
+ for (ExplodedNode *I : DstPreCall) {
// FIXME: Provide evalCall for checkers?
defaultEvalCall(CallBldr, I, *Call);
}
@@ -693,7 +774,7 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
// CXXNewExpr gets processed.
ExplodedNodeSet DstPostValue;
StmtNodeBuilder ValueBldr(DstPostCall, DstPostValue, *currBldrCtx);
- for (auto I : DstPostCall) {
+ for (ExplodedNode *I : DstPostCall) {
// FIXME: Because CNE serves as the "call site" for the allocator (due to
// lack of a better expression in the AST), the conjured return value symbol
// is going to be of the same type (C++ object pointer type). Technically
@@ -727,10 +808,8 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
ExplodedNodeSet DstPostPostCallCallback;
getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
DstPostValue, *Call, *this);
- for (auto I : DstPostPostCallCallback) {
- getCheckerManager().runCheckersForNewAllocator(
- CNE, *getObjectUnderConstruction(I->getState(), CNE, LCtx), Dst, I,
- *this);
+ for (ExplodedNode *I : DstPostPostCallCallback) {
+ getCheckerManager().runCheckersForNewAllocator(*Call, Dst, I, *this);
}
}
@@ -846,13 +925,18 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
- StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
- ProgramStateRef state = Pred->getState();
- Bldr.generateNode(CDE, Pred, state);
+
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ CallEventRef<CXXDeallocatorCall> Call = CEMgr.getCXXDeallocatorCall(
+ CDE, Pred->getState(), Pred->getLocationContext());
+
+ ExplodedNodeSet DstPreCall;
+ getCheckerManager().runCheckersForPreCall(DstPreCall, Pred, *Call, *this);
+
+ getCheckerManager().runCheckersForPostCall(Dst, DstPreCall, *Call, *this);
}
-void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS,
- ExplodedNode *Pred,
+void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
const VarDecl *VD = CS->getExceptionDecl();
if (!VD) {
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 01a371e664b2..52ba17d59ae0 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -10,17 +10,19 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/Decl.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "PrettyStackTraceLocationContext.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/ConstructionContext.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -324,17 +326,14 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
ExplodedNodeSet DstPostCall;
- if (const CXXNewExpr *CNE = dyn_cast_or_null<CXXNewExpr>(CE)) {
+ if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
ExplodedNodeSet DstPostPostCallCallback;
getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
CEENode, *UpdatedCall, *this,
/*wasInlined=*/true);
- for (auto I : DstPostPostCallCallback) {
+ for (ExplodedNode *I : DstPostPostCallCallback) {
getCheckerManager().runCheckersForNewAllocator(
- CNE,
- *getObjectUnderConstruction(I->getState(), CNE,
- calleeCtx->getParent()),
- DstPostCall, I, *this,
+ cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
/*wasInlined=*/true);
}
} else {
@@ -585,12 +584,12 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
// defaultEvalCall if all of them fail.
ExplodedNodeSet dstCallEvaluated;
getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
- Call, *this);
+ Call, *this, EvalCallOptions());
// If there were other constructors called for object-type arguments
// of this call, clean them up.
ExplodedNodeSet dstArgumentCleanup;
- for (auto I : dstCallEvaluated)
+ for (ExplodedNode *I : dstCallEvaluated)
finishArgumentConstruction(dstArgumentCleanup, I, Call);
ExplodedNodeSet dstPostCall;
@@ -604,7 +603,7 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
// Run pointerEscape callback with the newly conjured symbols.
SmallVector<std::pair<SVal, SVal>, 8> Escaped;
- for (auto I : dstPostCall) {
+ for (ExplodedNode *I : dstPostCall) {
NodeBuilder B(I, Dst, *currBldrCtx);
ProgramStateRef State = I->getState();
Escaped.clear();
@@ -668,8 +667,8 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
assert(RTC->getStmt() == Call.getOriginExpr());
EvalCallOptions CallOpts; // FIXME: We won't really need those.
std::tie(State, Target) =
- prepareForObjectConstruction(Call.getOriginExpr(), State, LCtx,
- RTC->getConstructionContext(), CallOpts);
+ handleConstructionContext(Call.getOriginExpr(), State, LCtx,
+ RTC->getConstructionContext(), CallOpts);
const MemRegion *TargetR = Target.getAsRegion();
assert(TargetR);
// Invalidate the region so that it didn't look uninitialized. If this is
@@ -718,7 +717,7 @@ void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
ExprEngine::CallInlinePolicy
ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
AnalyzerOptions &Opts,
- const ExprEngine::EvalCallOptions &CallOpts) {
+ const EvalCallOptions &CallOpts) {
const LocationContext *CurLC = Pred->getLocationContext();
const StackFrameContext *CallerSFC = CurLC->getStackFrame();
switch (Call.getKind()) {
@@ -742,7 +741,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
: nullptr;
- if (CC && isa<NewAllocatedObjectConstructionContext>(CC) &&
+ if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
!Opts.MayInlineCXXAllocator)
return CIP_DisallowedOnce;
@@ -789,6 +788,11 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
break;
}
+ case CE_CXXInheritedConstructor: {
+ // This doesn't really increase the cost of inlining ever, because
+ // the stack frame of the inherited constructor is trivial.
+ return CIP_Allowed;
+ }
case CE_CXXDestructor: {
if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
return CIP_DisallowedAlways;
@@ -814,6 +818,8 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
return CIP_DisallowedOnce;
break;
}
+ case CE_CXXDeallocator:
+ LLVM_FALLTHROUGH;
case CE_CXXAllocator:
if (Opts.MayInlineCXXAllocator)
break;
diff --git a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index a4918d7179ff..bc7c41d039c4 100644
--- a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -66,11 +66,9 @@ class HTMLDiagnostics : public PathDiagnosticConsumer {
const bool SupportsCrossFileDiagnostics;
public:
- HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts,
- const std::string& prefix,
- const Preprocessor &pp,
- bool supportsMultipleFiles)
- : Directory(prefix), PP(pp), AnalyzerOpts(AnalyzerOpts),
+ HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts, const std::string &OutputDir,
+ const Preprocessor &pp, bool supportsMultipleFiles)
+ : Directory(OutputDir), PP(pp), AnalyzerOpts(AnalyzerOpts),
SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
~HTMLDiagnostics() override { FlushDiagnostics(nullptr); }
@@ -136,16 +134,45 @@ private:
void ento::createHTMLDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &prefix, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &) {
- C.push_back(new HTMLDiagnostics(AnalyzerOpts, prefix, PP, true));
+ const std::string &OutputDir, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+
+ // FIXME: HTML is currently our default output type, but if the output
+ // directory isn't specified, it acts like if it was in the minimal text
+ // output mode. This doesn't make much sense, we should have the minimal text
+ // as our default. In the case of backward compatibility concerns, this could
+ // be preserved with -analyzer-config-compatibility-mode=true.
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputDir, PP, CTU);
+
+ // TODO: Emit an error here.
+ if (OutputDir.empty())
+ return;
+
+ C.push_back(new HTMLDiagnostics(AnalyzerOpts, OutputDir, PP, true));
}
void ento::createHTMLSingleFileDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ const std::string &OutputDir, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+
+ // TODO: Emit an error here.
+ if (OutputDir.empty())
+ return;
+
+ C.push_back(new HTMLDiagnostics(AnalyzerOpts, OutputDir, PP, false));
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputDir, PP, CTU);
+}
+
+void ento::createPlistHTMLDiagnosticConsumer(
+ AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
const std::string &prefix, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &) {
- C.push_back(new HTMLDiagnostics(AnalyzerOpts, prefix, PP, false));
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+ createHTMLDiagnosticConsumer(
+ AnalyzerOpts, C, std::string(llvm::sys::path::parent_path(prefix)), PP,
+ CTU);
+ createPlistMultiFileDiagnosticConsumer(AnalyzerOpts, C, prefix, PP, CTU);
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, prefix, PP, CTU);
}
//===----------------------------------------------------------------------===//
@@ -607,10 +634,17 @@ window.addEventListener("keydown", function (event) {
)<<<";
}
+static bool shouldDisplayPopUpRange(const SourceRange &Range) {
+ return !(Range.getBegin().isMacroID() || Range.getEnd().isMacroID());
+}
+
static void
HandlePopUpPieceStartTag(Rewriter &R,
const std::vector<SourceRange> &PopUpRanges) {
for (const auto &Range : PopUpRanges) {
+ if (!shouldDisplayPopUpRange(Range))
+ continue;
+
html::HighlightRange(R, Range.getBegin(), Range.getEnd(), "",
"<table class='variable_popup'><tbody>",
/*IsTokenRange=*/true);
@@ -626,6 +660,8 @@ static void HandlePopUpPieceEndTag(Rewriter &R,
llvm::raw_svector_ostream Out(Buf);
SourceRange Range(Piece.getLocation().asRange());
+ if (!shouldDisplayPopUpRange(Range))
+ return;
// Write out the path indices with a right arrow and the message as a row.
Out << "<tr><td valign='top'><div class='PathIndex PathIndexPopUp'>"
@@ -870,7 +906,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter &R, FileID BugFileID,
<< (num - 1)
<< "\" title=\"Previous event ("
<< (num - 1)
- << ")\">&#x2190;</a></div></td>";
+ << ")\">&#x2190;</a></div>";
}
os << "</td><td>";
@@ -1034,8 +1070,13 @@ StringRef HTMLDiagnostics::generateKeyboardNavigationJavascript() {
<script type='text/javascript'>
var digitMatcher = new RegExp("[0-9]+");
+var querySelectorAllArray = function(selector) {
+ return Array.prototype.slice.call(
+ document.querySelectorAll(selector));
+}
+
document.addEventListener("DOMContentLoaded", function() {
- document.querySelectorAll(".PathNav > a").forEach(
+ querySelectorAllArray(".PathNav > a").forEach(
function(currentValue, currentIndex) {
var hrefValue = currentValue.getAttribute("href");
currentValue.onclick = function() {
@@ -1055,7 +1096,7 @@ var findNum = function() {
};
var scrollTo = function(el) {
- document.querySelectorAll(".selected").forEach(function(s) {
+ querySelectorAllArray(".selected").forEach(function(s) {
s.classList.remove("selected");
});
el.classList.add("selected");
diff --git a/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp b/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
index 1a09a521f116..dc268e562237 100644
--- a/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
+++ b/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
@@ -130,10 +130,10 @@ static internal::Matcher<Stmt> hasSuspiciousStmt(StringRef NodeName) {
// Escaping and not known mutation of the loop counter is handled
// by exclusion of assigning and address-of operators and
// pass-by-ref function calls on the loop counter from the body.
- changeIntBoundNode(equalsBoundNode(NodeName)),
- callByRef(equalsBoundNode(NodeName)),
- getAddrTo(equalsBoundNode(NodeName)),
- assignedToRef(equalsBoundNode(NodeName)))));
+ changeIntBoundNode(equalsBoundNode(std::string(NodeName))),
+ callByRef(equalsBoundNode(std::string(NodeName))),
+ getAddrTo(equalsBoundNode(std::string(NodeName))),
+ assignedToRef(equalsBoundNode(std::string(NodeName))))));
}
static internal::Matcher<Stmt> forLoopMatcher() {
@@ -164,6 +164,11 @@ static bool isPossiblyEscaped(const VarDecl *VD, ExplodedNode *N) {
if (VD->hasGlobalStorage())
return true;
+ const bool isParm = isa<ParmVarDecl>(VD);
+ // Reference parameters are assumed as escaped variables.
+ if (isParm && VD->getType()->isReferenceType())
+ return true;
+
while (!N->pred_empty()) {
// FIXME: getStmtForDiagnostics() does nasty things in order to provide
// a valid statement for body farms, do we need this behavior here?
@@ -193,6 +198,11 @@ static bool isPossiblyEscaped(const VarDecl *VD, ExplodedNode *N) {
N = N->getFirstPred();
}
+
+ // Parameter declaration will not be found.
+ if (isParm)
+ return false;
+
llvm_unreachable("Reached root without finding the declaration of VD");
}
diff --git a/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp b/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
index 9a7b1a24b819..47e34dd84b9a 100644
--- a/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
+++ b/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
@@ -67,8 +67,10 @@ ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
}
// References should not be invalidated.
- auto Matches = match(findAll(stmt(hasDescendant(varDecl(hasType(referenceType())).bind(MatchRef)))),
- *LCtx->getDecl()->getBody(), ASTCtx);
+ auto Matches = match(
+ findAll(stmt(hasDescendant(
+ varDecl(hasType(hasCanonicalType(referenceType()))).bind(MatchRef)))),
+ *LCtx->getDecl()->getBody(), ASTCtx);
for (BoundNodes Match : Matches) {
const VarDecl *VD = Match.getNodeAs<VarDecl>(MatchRef);
assert(VD);
diff --git a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index a10d7e69ad7e..455adf53ac99 100644
--- a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -142,7 +142,7 @@ bool SubRegion::isSubRegionOf(const MemRegion* R) const {
return false;
}
-MemRegionManager* SubRegion::getMemRegionManager() const {
+MemRegionManager &SubRegion::getMemRegionManager() const {
const SubRegion* r = this;
do {
const MemRegion *superRegion = r->getSuperRegion();
@@ -159,62 +159,10 @@ const StackFrameContext *VarRegion::getStackFrame() const {
return SSR ? SSR->getStackFrame() : nullptr;
}
-//===----------------------------------------------------------------------===//
-// Region extents.
-//===----------------------------------------------------------------------===//
-
-DefinedOrUnknownSVal TypedValueRegion::getExtent(SValBuilder &svalBuilder) const {
- ASTContext &Ctx = svalBuilder.getContext();
- QualType T = getDesugaredValueType(Ctx);
-
- if (isa<VariableArrayType>(T))
- return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
- if (T->isIncompleteType())
- return UnknownVal();
-
- CharUnits size = Ctx.getTypeSizeInChars(T);
- QualType sizeTy = svalBuilder.getArrayIndexType();
- return svalBuilder.makeIntVal(size.getQuantity(), sizeTy);
-}
-
-DefinedOrUnknownSVal FieldRegion::getExtent(SValBuilder &svalBuilder) const {
- // Force callers to deal with bitfields explicitly.
- if (getDecl()->isBitField())
- return UnknownVal();
-
- DefinedOrUnknownSVal Extent = DeclRegion::getExtent(svalBuilder);
-
- // A zero-length array at the end of a struct often stands for dynamically-
- // allocated extra memory.
- if (Extent.isZeroConstant()) {
- QualType T = getDesugaredValueType(svalBuilder.getContext());
-
- if (isa<ConstantArrayType>(T))
- return UnknownVal();
- }
-
- return Extent;
-}
-
-DefinedOrUnknownSVal AllocaRegion::getExtent(SValBuilder &svalBuilder) const {
- return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
-}
-
-DefinedOrUnknownSVal SymbolicRegion::getExtent(SValBuilder &svalBuilder) const {
- return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
-}
-
-DefinedOrUnknownSVal StringRegion::getExtent(SValBuilder &svalBuilder) const {
- return svalBuilder.makeIntVal(getStringLiteral()->getByteLength()+1,
- svalBuilder.getArrayIndexType());
-}
-
ObjCIvarRegion::ObjCIvarRegion(const ObjCIvarDecl *ivd, const SubRegion *sReg)
- : DeclRegion(ivd, sReg, ObjCIvarRegionKind) {}
+ : DeclRegion(sReg, ObjCIvarRegionKind), IVD(ivd) {}
-const ObjCIvarDecl *ObjCIvarRegion::getDecl() const {
- return cast<ObjCIvarDecl>(D);
-}
+const ObjCIvarDecl *ObjCIvarRegion::getDecl() const { return IVD; }
QualType ObjCIvarRegion::getValueType() const {
return getDecl()->getType();
@@ -228,6 +176,33 @@ QualType CXXDerivedObjectRegion::getValueType() const {
return QualType(getDecl()->getTypeForDecl(), 0);
}
+QualType ParamVarRegion::getValueType() const {
+ assert(getDecl() &&
+ "`ParamVarRegion` support functions without `Decl` not implemented"
+ " yet.");
+ return getDecl()->getType();
+}
+
+const ParmVarDecl *ParamVarRegion::getDecl() const {
+ const Decl *D = getStackFrame()->getDecl();
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ assert(Index < FD->param_size());
+ return FD->parameters()[Index];
+ } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
+ assert(Index < BD->param_size());
+ return BD->parameters()[Index];
+ } else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ assert(Index < MD->param_size());
+ return MD->parameters()[Index];
+ } else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D)) {
+ assert(Index < CD->param_size());
+ return CD->parameters()[Index];
+ } else {
+ llvm_unreachable("Unexpected Decl kind!");
+ }
+}
+
//===----------------------------------------------------------------------===//
// FoldingSet profiling.
//===----------------------------------------------------------------------===//
@@ -299,25 +274,44 @@ void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const {
CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion);
}
+void FieldRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getDecl(), superRegion);
+}
+
void ObjCIvarRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
const ObjCIvarDecl *ivd,
const MemRegion* superRegion) {
- DeclRegion::ProfileRegion(ID, ivd, superRegion, ObjCIvarRegionKind);
+ ID.AddInteger(static_cast<unsigned>(ObjCIvarRegionKind));
+ ID.AddPointer(ivd);
+ ID.AddPointer(superRegion);
}
-void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl *D,
- const MemRegion* superRegion, Kind k) {
- ID.AddInteger(static_cast<unsigned>(k));
- ID.AddPointer(D);
+void ObjCIvarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getDecl(), superRegion);
+}
+
+void NonParamVarRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const VarDecl *VD,
+ const MemRegion *superRegion) {
+ ID.AddInteger(static_cast<unsigned>(NonParamVarRegionKind));
+ ID.AddPointer(VD);
ID.AddPointer(superRegion);
}
-void DeclRegion::Profile(llvm::FoldingSetNodeID& ID) const {
- DeclRegion::ProfileRegion(ID, D, superRegion, getKind());
+void NonParamVarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getDecl(), superRegion);
}
-void VarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
- VarRegion::ProfileRegion(ID, getDecl(), superRegion);
+void ParamVarRegion::ProfileRegion(llvm::FoldingSetNodeID &ID, const Expr *OE,
+ unsigned Idx, const MemRegion *SReg) {
+ ID.AddInteger(static_cast<unsigned>(ParamVarRegionKind));
+ ID.AddPointer(OE);
+ ID.AddInteger(Idx);
+ ID.AddPointer(SReg);
+}
+
+void ParamVarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getOriginExpr(), getIndex(), superRegion);
}
void SymbolicRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, SymbolRef sym,
@@ -529,12 +523,11 @@ void SymbolicRegion::dumpToStream(raw_ostream &os) const {
os << "SymRegion{" << sym << '}';
}
-void VarRegion::dumpToStream(raw_ostream &os) const {
- const auto *VD = cast<VarDecl>(D);
+void NonParamVarRegion::dumpToStream(raw_ostream &os) const {
if (const IdentifierInfo *ID = VD->getIdentifier())
os << ID->getName();
else
- os << "VarRegion{D" << VD->getID() << '}';
+ os << "NonParamVarRegion{D" << VD->getID() << '}';
}
LLVM_DUMP_METHOD void RegionRawOffset::dump() const {
@@ -581,6 +574,18 @@ void StackLocalsSpaceRegion::dumpToStream(raw_ostream &os) const {
os << "StackLocalsSpaceRegion";
}
+void ParamVarRegion::dumpToStream(raw_ostream &os) const {
+ const ParmVarDecl *PVD = getDecl();
+ assert(PVD &&
+ "`ParamVarRegion` support functions without `Decl` not implemented"
+ " yet.");
+ if (const IdentifierInfo *ID = PVD->getIdentifier()) {
+ os << ID->getName();
+ } else {
+ os << "ParamVarRegion{P" << PVD->getID() << '}';
+ }
+}
+
bool MemRegion::canPrintPretty() const {
return canPrintPrettyAsExpr();
}
@@ -600,11 +605,18 @@ void MemRegion::printPrettyAsExpr(raw_ostream &) const {
llvm_unreachable("This region cannot be printed pretty.");
}
-bool VarRegion::canPrintPrettyAsExpr() const {
- return true;
+bool NonParamVarRegion::canPrintPrettyAsExpr() const { return true; }
+
+void NonParamVarRegion::printPrettyAsExpr(raw_ostream &os) const {
+ os << getDecl()->getName();
}
-void VarRegion::printPrettyAsExpr(raw_ostream &os) const {
+bool ParamVarRegion::canPrintPrettyAsExpr() const { return true; }
+
+void ParamVarRegion::printPrettyAsExpr(raw_ostream &os) const {
+ assert(getDecl() &&
+ "`ParamVarRegion` support functions without `Decl` not implemented"
+ " yet.");
os << getDecl()->getName();
}
@@ -717,11 +729,79 @@ SourceRange MemRegion::sourceRange() const {
// MemRegionManager methods.
//===----------------------------------------------------------------------===//
+static DefinedOrUnknownSVal getTypeSize(QualType Ty, ASTContext &Ctx,
+ SValBuilder &SVB) {
+ CharUnits Size = Ctx.getTypeSizeInChars(Ty);
+ QualType SizeTy = SVB.getArrayIndexType();
+ return SVB.makeIntVal(Size.getQuantity(), SizeTy);
+}
+
+DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
+ SValBuilder &SVB) const {
+ const auto *SR = cast<SubRegion>(MR);
+ SymbolManager &SymMgr = SVB.getSymbolManager();
+
+ switch (SR->getKind()) {
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::SymbolicRegionKind:
+ return nonloc::SymbolVal(SymMgr.getExtentSymbol(SR));
+ case MemRegion::StringRegionKind:
+ return SVB.makeIntVal(
+ cast<StringRegion>(SR)->getStringLiteral()->getByteLength() + 1,
+ SVB.getArrayIndexType());
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::CXXBaseObjectRegionKind:
+ case MemRegion::CXXDerivedObjectRegionKind:
+ case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXThisRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
+ case MemRegion::ElementRegionKind:
+ case MemRegion::ObjCStringRegionKind: {
+ QualType Ty = cast<TypedValueRegion>(SR)->getDesugaredValueType(Ctx);
+ if (isa<VariableArrayType>(Ty))
+ return nonloc::SymbolVal(SymMgr.getExtentSymbol(SR));
+
+ if (Ty->isIncompleteType())
+ return UnknownVal();
+
+ return getTypeSize(Ty, Ctx, SVB);
+ }
+ case MemRegion::FieldRegionKind: {
+ // Force callers to deal with bitfields explicitly.
+ if (cast<FieldRegion>(SR)->getDecl()->isBitField())
+ return UnknownVal();
+
+ QualType Ty = cast<TypedValueRegion>(SR)->getDesugaredValueType(Ctx);
+ DefinedOrUnknownSVal Size = getTypeSize(Ty, Ctx, SVB);
+
+ // A zero-length array at the end of a struct often stands for dynamically
+ // allocated extra memory.
+ if (Size.isZeroConstant()) {
+ if (isa<ConstantArrayType>(Ty))
+ return UnknownVal();
+ }
+
+ return Size;
+ }
+ // FIXME: The following are being used in 'SimpleSValBuilder' and in
+ // 'ArrayBoundChecker::checkLocation' because there is no symbol to
+ // represent the regions more appropriately.
+ case MemRegion::BlockDataRegionKind:
+ case MemRegion::BlockCodeRegionKind:
+ case MemRegion::FunctionCodeRegionKind:
+ return nonloc::SymbolVal(SymMgr.getExtentSymbol(SR));
+ default:
+ llvm_unreachable("Unhandled region");
+ }
+}
+
template <typename REG>
const REG *MemRegionManager::LazyAllocate(REG*& region) {
if (!region) {
region = A.Allocate<REG>();
- new (region) REG(this);
+ new (region) REG(*this);
}
return region;
@@ -746,7 +826,7 @@ MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) {
return R;
R = A.Allocate<StackLocalsSpaceRegion>();
- new (R) StackLocalsSpaceRegion(this, STC);
+ new (R) StackLocalsSpaceRegion(*this, STC);
return R;
}
@@ -759,7 +839,7 @@ MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
return R;
R = A.Allocate<StackArgumentsSpaceRegion>();
- new (R) StackArgumentsSpaceRegion(this, STC);
+ new (R) StackArgumentsSpaceRegion(*this, STC);
return R;
}
@@ -781,7 +861,7 @@ const GlobalsSpaceRegion
return R;
R = A.Allocate<StaticGlobalSpaceRegion>();
- new (R) StaticGlobalSpaceRegion(this, CR);
+ new (R) StaticGlobalSpaceRegion(*this, CR);
return R;
}
@@ -825,15 +905,16 @@ getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
return SFC;
}
if (const auto *BC = dyn_cast<BlockInvocationContext>(LC)) {
- const auto *BR =
- static_cast<const BlockDataRegion *>(BC->getContextData());
+ const auto *BR = static_cast<const BlockDataRegion *>(BC->getData());
// FIXME: This can be made more efficient.
for (BlockDataRegion::referenced_vars_iterator
I = BR->referenced_vars_begin(),
E = BR->referenced_vars_end(); I != E; ++I) {
- const VarRegion *VR = I.getOriginalRegion();
- if (VR->getDecl() == VD)
- return cast<VarRegion>(I.getCapturedRegion());
+ const TypedValueRegion *OrigR = I.getOriginalRegion();
+ if (const auto *VR = dyn_cast<VarRegion>(OrigR)) {
+ if (VR->getDecl() == VD)
+ return cast<VarRegion>(I.getCapturedRegion());
+ }
}
}
@@ -842,15 +923,37 @@ getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
return (const StackFrameContext *)nullptr;
}
-const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
+const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
const LocationContext *LC) {
+ const auto *PVD = dyn_cast<ParmVarDecl>(D);
+ if (PVD) {
+ unsigned Index = PVD->getFunctionScopeIndex();
+ const StackFrameContext *SFC = LC->getStackFrame();
+ const Stmt *CallSite = SFC->getCallSite();
+ if (CallSite) {
+ const Decl *D = SFC->getDecl();
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (Index < FD->param_size() && FD->parameters()[Index] == PVD)
+ return getSubRegion<ParamVarRegion>(cast<Expr>(CallSite), Index,
+ getStackArgumentsRegion(SFC));
+ } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
+ if (Index < BD->param_size() && BD->parameters()[Index] == PVD)
+ return getSubRegion<ParamVarRegion>(cast<Expr>(CallSite), Index,
+ getStackArgumentsRegion(SFC));
+ } else {
+ return getSubRegion<ParamVarRegion>(cast<Expr>(CallSite), Index,
+ getStackArgumentsRegion(SFC));
+ }
+ }
+ }
+
D = D->getCanonicalDecl();
const MemRegion *sReg = nullptr;
if (D->hasGlobalStorage() && !D->isStaticLocal()) {
// First handle the globals defined in system headers.
- if (C.getSourceManager().isInSystemHeader(D->getLocation())) {
+ if (Ctx.getSourceManager().isInSystemHeader(D->getLocation())) {
// Whitelist the system globals which often DO GET modified, assume the
// rest are immutable.
if (D->getName().find("errno") != StringRef::npos)
@@ -914,7 +1017,7 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
T = getContext().getBlockPointerType(T);
const BlockCodeRegion *BTR =
- getBlockCodeRegion(BD, C.getCanonicalType(T),
+ getBlockCodeRegion(BD, Ctx.getCanonicalType(T),
STC->getAnalysisDeclContext());
sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
BTR);
@@ -926,13 +1029,23 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
}
}
- return getSubRegion<VarRegion>(D, sReg);
+ return getSubRegion<NonParamVarRegion>(D, sReg);
}
-const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
- const MemRegion *superR) {
+const NonParamVarRegion *
+MemRegionManager::getNonParamVarRegion(const VarDecl *D,
+ const MemRegion *superR) {
D = D->getCanonicalDecl();
- return getSubRegion<VarRegion>(D, superR);
+ return getSubRegion<NonParamVarRegion>(D, superR);
+}
+
+const ParamVarRegion *
+MemRegionManager::getParamVarRegion(const Expr *OriginExpr, unsigned Index,
+ const LocationContext *LC) {
+ const StackFrameContext *SFC = LC->getStackFrame();
+ assert(SFC);
+ return getSubRegion<ParamVarRegion>(OriginExpr, Index,
+ getStackArgumentsRegion(SFC));
}
const BlockDataRegion *
@@ -1325,7 +1438,8 @@ static RegionOffset calculateOffset(const MemRegion *R) {
case MemRegion::CXXThisRegionKind:
case MemRegion::StringRegionKind:
case MemRegion::ObjCStringRegionKind:
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
// Usual base regions.
goto Finish;
@@ -1476,12 +1590,12 @@ RegionOffset MemRegion::getAsOffset() const {
std::pair<const VarRegion *, const VarRegion *>
BlockDataRegion::getCaptureRegions(const VarDecl *VD) {
- MemRegionManager &MemMgr = *getMemRegionManager();
+ MemRegionManager &MemMgr = getMemRegionManager();
const VarRegion *VR = nullptr;
const VarRegion *OriginalVR = nullptr;
if (!VD->hasAttr<BlocksAttr>() && VD->hasLocalStorage()) {
- VR = MemMgr.getVarRegion(VD, this);
+ VR = MemMgr.getNonParamVarRegion(VD, this);
OriginalVR = MemMgr.getVarRegion(VD, LC);
}
else {
@@ -1490,7 +1604,7 @@ BlockDataRegion::getCaptureRegions(const VarDecl *VD) {
OriginalVR = VR;
}
else {
- VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
+ VR = MemMgr.getNonParamVarRegion(VD, MemMgr.getUnknownRegion());
OriginalVR = MemMgr.getVarRegion(VD, LC);
}
}
@@ -1511,7 +1625,7 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
return;
}
- MemRegionManager &MemMgr = *getMemRegionManager();
+ MemRegionManager &MemMgr = getMemRegionManager();
llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
BumpVectorContext BC(A);
diff --git a/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 3a3942a8301b..ed62778623a8 100644
--- a/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -45,8 +45,8 @@ namespace {
AnalyzerOptions &AnOpts;
const bool SupportsCrossFileDiagnostics;
public:
- PlistDiagnostics(AnalyzerOptions &AnalyzerOpts, const std::string &prefix,
- const Preprocessor &PP,
+ PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
+ const std::string &OutputFile, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU,
bool supportsMultipleFiles);
@@ -582,19 +582,32 @@ PlistDiagnostics::PlistDiagnostics(
void ento::createPlistDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &s, const Preprocessor &PP,
+ const std::string &OutputFile, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
- C.push_back(new PlistDiagnostics(AnalyzerOpts, s, PP, CTU,
+
+ // TODO: Emit an error here.
+ if (OutputFile.empty())
+ return;
+
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, OutputFile, PP, CTU,
/*supportsMultipleFiles*/ false));
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputFile, PP, CTU);
}
void ento::createPlistMultiFileDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &s, const Preprocessor &PP,
+ const std::string &OutputFile, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU) {
- C.push_back(new PlistDiagnostics(AnalyzerOpts, s, PP, CTU,
+
+ // TODO: Emit an error here.
+ if (OutputFile.empty())
+ return;
+
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, OutputFile, PP, CTU,
/*supportsMultipleFiles*/ true));
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, OutputFile, PP, CTU);
}
+
void PlistDiagnostics::FlushDiagnosticsImpl(
std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) {
@@ -939,7 +952,7 @@ getExpandedMacro(SourceLocation MacroLoc, const Preprocessor &PP,
std::string MacroName = getMacroNameAndPrintExpansion(
Printer, MacroLoc, *PPToUse, MacroArgMap{}, AlreadyProcessedTokens);
- return { MacroName, OS.str() };
+ return {MacroName, std::string(OS.str())};
}
static std::string getMacroNameAndPrintExpansion(
@@ -960,9 +973,8 @@ static std::string getMacroNameAndPrintExpansion(
// in this case we don't get the full expansion text in the Plist file. See
// the test file where "value" is expanded to "garbage_" instead of
// "garbage_value".
- if (AlreadyProcessedTokens.find(IDInfo) != AlreadyProcessedTokens.end())
+ if (!AlreadyProcessedTokens.insert(IDInfo).second)
return Info.Name;
- AlreadyProcessedTokens.insert(IDInfo);
if (!Info.MI)
return Info.Name;
diff --git a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 14006f79fd0f..006a4006b7fc 100644
--- a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -16,8 +16,8 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -76,12 +76,12 @@ ProgramStateManager::ProgramStateManager(ASTContext &Ctx,
StoreManagerCreator CreateSMgr,
ConstraintManagerCreator CreateCMgr,
llvm::BumpPtrAllocator &alloc,
- SubEngine *SubEng)
- : Eng(SubEng), EnvMgr(alloc), GDMFactory(alloc),
+ ExprEngine *ExprEng)
+ : Eng(ExprEng), EnvMgr(alloc), GDMFactory(alloc),
svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
CallEventMgr(new CallEventManager(alloc)), Alloc(alloc) {
StoreMgr = (*CreateSMgr)(*this);
- ConstraintMgr = (*CreateCMgr)(*this, SubEng);
+ ConstraintMgr = (*CreateCMgr)(*this, ExprEng);
}
@@ -189,7 +189,7 @@ ProgramState::invalidateRegionsImpl(ValueList Values,
RegionAndSymbolInvalidationTraits *ITraits,
const CallEvent *Call) const {
ProgramStateManager &Mgr = getStateManager();
- SubEngine &Eng = Mgr.getOwningEngine();
+ ExprEngine &Eng = Mgr.getOwningEngine();
InvalidatedSymbols InvalidatedSyms;
if (!IS)
@@ -240,6 +240,13 @@ ProgramState::enterStackFrame(const CallEvent &Call,
return makeWithStore(NewStore);
}
+SVal ProgramState::getSelfSVal(const LocationContext *LCtx) const {
+ const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl();
+ if (!SelfDecl)
+ return SVal();
+ return getSVal(getRegion(SelfDecl, LCtx));
+}
+
SVal ProgramState::getSValAsScalarOrLoc(const MemRegion *R) const {
// We only want to do fetches from regions that we can actually bind
// values. For example, SymbolicRegions of type 'id<...>' cannot
diff --git a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 9752a0e22832..cb6f61e86ae3 100644
--- a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/Support/raw_ostream.h"
@@ -23,10 +24,89 @@
using namespace clang;
using namespace ento;
+// This class can be extended with other tables which will help to reason
+// about ranges more precisely.
+class OperatorRelationsTable {
+ static_assert(BO_LT < BO_GT && BO_GT < BO_LE && BO_LE < BO_GE &&
+ BO_GE < BO_EQ && BO_EQ < BO_NE,
+ "This class relies on operators order. Rework it otherwise.");
+
+public:
+ enum TriStateKind {
+ False = 0,
+ True,
+ Unknown,
+ };
+
+private:
+ // CmpOpTable holds states which represent the corresponding range for
+ // branching an exploded graph. We can reason about the branch if there is
+ // a previously known fact of the existence of a comparison expression with
+ // operands used in the current expression.
+ // E.g. assuming (x < y) is true that means (x != y) is surely true.
+ // if (x previous_operation y) // < | != | >
+ // if (x operation y) // != | > | <
+ // tristate // True | Unknown | False
+ //
+ // CmpOpTable represents next:
+ // __|< |> |<=|>=|==|!=|UnknownX2|
+ // < |1 |0 |* |0 |0 |* |1 |
+ // > |0 |1 |0 |* |0 |* |1 |
+ // <=|1 |0 |1 |* |1 |* |0 |
+ // >=|0 |1 |* |1 |1 |* |0 |
+ // ==|0 |0 |* |* |1 |0 |1 |
+ // !=|1 |1 |* |* |0 |1 |0 |
+ //
+ // Columns stands for a previous operator.
+ // Rows stands for a current operator.
+ // Each row has exactly two `Unknown` cases.
+ // UnknownX2 means that both `Unknown` previous operators are met in code,
+ // and there is a special column for that, for example:
+ // if (x >= y)
+ // if (x != y)
+ // if (x <= y)
+ // False only
+ static constexpr size_t CmpOpCount = BO_NE - BO_LT + 1;
+ const TriStateKind CmpOpTable[CmpOpCount][CmpOpCount + 1] = {
+ // < > <= >= == != UnknownX2
+ {True, False, Unknown, False, False, Unknown, True}, // <
+ {False, True, False, Unknown, False, Unknown, True}, // >
+ {True, False, True, Unknown, True, Unknown, False}, // <=
+ {False, True, Unknown, True, True, Unknown, False}, // >=
+ {False, False, Unknown, Unknown, True, False, True}, // ==
+ {True, True, Unknown, Unknown, False, True, False}, // !=
+ };
+
+ static size_t getIndexFromOp(BinaryOperatorKind OP) {
+ return static_cast<size_t>(OP - BO_LT);
+ }
+
+public:
+ constexpr size_t getCmpOpCount() const { return CmpOpCount; }
+
+ static BinaryOperatorKind getOpFromIndex(size_t Index) {
+ return static_cast<BinaryOperatorKind>(Index + BO_LT);
+ }
+
+ TriStateKind getCmpOpState(BinaryOperatorKind CurrentOP,
+ BinaryOperatorKind QueriedOP) const {
+ return CmpOpTable[getIndexFromOp(CurrentOP)][getIndexFromOp(QueriedOP)];
+ }
+
+ TriStateKind getCmpOpStateForUnknownX2(BinaryOperatorKind CurrentOP) const {
+ return CmpOpTable[getIndexFromOp(CurrentOP)][CmpOpCount];
+ }
+};
+//===----------------------------------------------------------------------===//
+// RangeSet implementation
+//===----------------------------------------------------------------------===//
+
void RangeSet::IntersectInRange(BasicValueFactory &BV, Factory &F,
- const llvm::APSInt &Lower, const llvm::APSInt &Upper,
- PrimRangeSet &newRanges, PrimRangeSet::iterator &i,
- PrimRangeSet::iterator &e) const {
+ const llvm::APSInt &Lower,
+ const llvm::APSInt &Upper,
+ PrimRangeSet &newRanges,
+ PrimRangeSet::iterator &i,
+ PrimRangeSet::iterator &e) const {
// There are six cases for each range R in the set:
// 1. R is entirely before the intersection range.
// 2. R is entirely after the intersection range.
@@ -62,10 +142,27 @@ void RangeSet::IntersectInRange(BasicValueFactory &BV, Factory &F,
const llvm::APSInt &RangeSet::getMinValue() const {
assert(!isEmpty());
- return ranges.begin()->From();
+ return begin()->From();
+}
+
+const llvm::APSInt &RangeSet::getMaxValue() const {
+ assert(!isEmpty());
+ // NOTE: It's a shame that we can't implement 'getMaxValue' without scanning
+ // the whole tree to get to the last element.
+ // llvm::ImmutableSet should support decrement for 'end' iterators
+ // or reverse order iteration.
+ auto It = begin();
+ for (auto End = end(); std::next(It) != End; ++It) {
+ }
+ return It->To();
}
bool RangeSet::pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
+ if (isEmpty()) {
+ // This range is already infeasible.
+ return false;
+ }
+
// This function has nine cases, the cartesian product of range-testing
// both the upper and lower bounds against the symbol's type.
// Each case requires a different pinning operation.
@@ -155,11 +252,11 @@ bool RangeSet::pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
// or, alternatively, /removing/ all integers between Upper and Lower.
RangeSet RangeSet::Intersect(BasicValueFactory &BV, Factory &F,
llvm::APSInt Lower, llvm::APSInt Upper) const {
- if (!pin(Lower, Upper))
- return F.getEmptySet();
-
PrimRangeSet newRanges = F.getEmptySet();
+ if (isEmpty() || !pin(Lower, Upper))
+ return newRanges;
+
PrimRangeSet::iterator i = begin(), e = end();
if (Lower <= Upper)
IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
@@ -190,33 +287,78 @@ RangeSet RangeSet::Intersect(BasicValueFactory &BV, Factory &F,
return newRanges;
}
-// Turn all [A, B] ranges to [-B, -A]. Ranges [MIN, B] are turned to range set
-// [MIN, MIN] U [-B, MAX], when MIN and MAX are the minimal and the maximal
-// signed values of the type.
+// Turn all [A, B] ranges to [-B, -A], when "-" is a C-like unary minus
+// operation under the values of the type.
+//
+// We also handle MIN because applying unary minus to MIN does not change it.
+// Example 1:
+// char x = -128; // -128 is a MIN value in a range of 'char'
+// char y = -x; // y: -128
+// Example 2:
+// unsigned char x = 0; // 0 is a MIN value in a range of 'unsigned char'
+// unsigned char y = -x; // y: 0
+//
+// And it makes us to separate the range
+// like [MIN, N] to [MIN, MIN] U [-N,MAX].
+// For instance, whole range is {-128..127} and subrange is [-128,-126],
+// thus [-128,-127,-126,.....] negates to [-128,.....,126,127].
+//
+// Negate restores disrupted ranges on bounds,
+// e.g. [MIN, B] => [MIN, MIN] U [-B, MAX] => [MIN, B].
RangeSet RangeSet::Negate(BasicValueFactory &BV, Factory &F) const {
PrimRangeSet newRanges = F.getEmptySet();
- for (iterator i = begin(), e = end(); i != e; ++i) {
- const llvm::APSInt &from = i->From(), &to = i->To();
- const llvm::APSInt &newTo = (from.isMinSignedValue() ?
- BV.getMaxValue(from) :
- BV.getValue(- from));
- if (to.isMaxSignedValue() && !newRanges.isEmpty() &&
- newRanges.begin()->From().isMinSignedValue()) {
- assert(newRanges.begin()->To().isMinSignedValue() &&
- "Ranges should not overlap");
- assert(!from.isMinSignedValue() && "Ranges should not overlap");
- const llvm::APSInt &newFrom = newRanges.begin()->From();
- newRanges =
- F.add(F.remove(newRanges, *newRanges.begin()), Range(newFrom, newTo));
- } else if (!to.isMinSignedValue()) {
- const llvm::APSInt &newFrom = BV.getValue(- to);
- newRanges = F.add(newRanges, Range(newFrom, newTo));
- }
- if (from.isMinSignedValue()) {
- newRanges = F.add(newRanges, Range(BV.getMinValue(from),
- BV.getMinValue(from)));
+ if (isEmpty())
+ return newRanges;
+
+ const llvm::APSInt sampleValue = getMinValue();
+ const llvm::APSInt &MIN = BV.getMinValue(sampleValue);
+ const llvm::APSInt &MAX = BV.getMaxValue(sampleValue);
+
+ // Handle a special case for MIN value.
+ iterator i = begin();
+ const llvm::APSInt &from = i->From();
+ const llvm::APSInt &to = i->To();
+ if (from == MIN) {
+ // If [from, to] are [MIN, MAX], then just return the same [MIN, MAX].
+ if (to == MAX) {
+ newRanges = ranges;
+ } else {
+ // Add separate range for the lowest value.
+ newRanges = F.add(newRanges, Range(MIN, MIN));
+ // Skip adding the second range in case when [from, to] are [MIN, MIN].
+ if (to != MIN) {
+ newRanges = F.add(newRanges, Range(BV.getValue(-to), MAX));
+ }
}
+ // Skip the first range in the loop.
+ ++i;
+ }
+
+ // Negate all other ranges.
+ for (iterator e = end(); i != e; ++i) {
+ // Negate int values.
+ const llvm::APSInt &newFrom = BV.getValue(-i->To());
+ const llvm::APSInt &newTo = BV.getValue(-i->From());
+ // Add a negated range.
+ newRanges = F.add(newRanges, Range(newFrom, newTo));
+ }
+
+ if (newRanges.isSingleton())
+ return newRanges;
+
+ // Try to find and unite next ranges:
+ // [MIN, MIN] & [MIN + 1, N] => [MIN, N].
+ iterator iter1 = newRanges.begin();
+ iterator iter2 = std::next(iter1);
+
+ if (iter1->To() == MIN && (iter2->From() - 1) == MIN) {
+ const llvm::APSInt &to = iter2->To();
+ // remove adjacent ranges
+ newRanges = F.remove(newRanges, *iter1);
+ newRanges = F.remove(newRanges, *newRanges.begin());
+ // add united range
+ newRanges = F.add(newRanges, Range(MIN, to));
}
return newRanges;
@@ -238,10 +380,534 @@ void RangeSet::print(raw_ostream &os) const {
}
namespace {
+
+/// A little component aggregating all of the reasoning we have about
+/// the ranges of symbolic expressions.
+///
+/// Even when we don't know the exact values of the operands, we still
+/// can get a pretty good estimate of the result's range.
+class SymbolicRangeInferrer
+ : public SymExprVisitor<SymbolicRangeInferrer, RangeSet> {
+public:
+ static RangeSet inferRange(BasicValueFactory &BV, RangeSet::Factory &F,
+ ProgramStateRef State, SymbolRef Sym) {
+ SymbolicRangeInferrer Inferrer(BV, F, State);
+ return Inferrer.infer(Sym);
+ }
+
+ RangeSet VisitSymExpr(SymbolRef Sym) {
+ // If we got to this function, the actual type of the symbolic
+ // expression is not supported for advanced inference.
+ // In this case, we simply backoff to the default "let's simply
+ // infer the range from the expression's type".
+ return infer(Sym->getType());
+ }
+
+ RangeSet VisitSymIntExpr(const SymIntExpr *Sym) {
+ return VisitBinaryOperator(Sym);
+ }
+
+ RangeSet VisitIntSymExpr(const IntSymExpr *Sym) {
+ return VisitBinaryOperator(Sym);
+ }
+
+ RangeSet VisitSymSymExpr(const SymSymExpr *Sym) {
+ return VisitBinaryOperator(Sym);
+ }
+
+private:
+ SymbolicRangeInferrer(BasicValueFactory &BV, RangeSet::Factory &F,
+ ProgramStateRef S)
+ : ValueFactory(BV), RangeFactory(F), State(S) {}
+
+ /// Infer range information from the given integer constant.
+ ///
+ /// It's not a real "inference", but is here for operating with
+ /// sub-expressions in a more polymorphic manner.
+ RangeSet inferAs(const llvm::APSInt &Val, QualType) {
+ return {RangeFactory, Val};
+ }
+
+ /// Infer range information from symbol in the context of the given type.
+ RangeSet inferAs(SymbolRef Sym, QualType DestType) {
+ QualType ActualType = Sym->getType();
+ // Check that we can reason about the symbol at all.
+ if (ActualType->isIntegralOrEnumerationType() ||
+ Loc::isLocType(ActualType)) {
+ return infer(Sym);
+ }
+ // Otherwise, let's simply infer from the destination type.
+ // We couldn't figure out nothing else about that expression.
+ return infer(DestType);
+ }
+
+ RangeSet infer(SymbolRef Sym) {
+ const RangeSet *AssociatedRange = State->get<ConstraintRange>(Sym);
+
+ // If Sym is a difference of symbols A - B, then maybe we have range set
+ // stored for B - A.
+ const RangeSet *RangeAssociatedWithNegatedSym =
+ getRangeForMinusSymbol(State, Sym);
+
+ // If we have range set stored for both A - B and B - A then calculate the
+ // effective range set by intersecting the range set for A - B and the
+ // negated range set of B - A.
+ if (AssociatedRange && RangeAssociatedWithNegatedSym)
+ return AssociatedRange->Intersect(
+ ValueFactory, RangeFactory,
+ RangeAssociatedWithNegatedSym->Negate(ValueFactory, RangeFactory));
+
+ if (AssociatedRange)
+ return *AssociatedRange;
+
+ if (RangeAssociatedWithNegatedSym)
+ return RangeAssociatedWithNegatedSym->Negate(ValueFactory, RangeFactory);
+
+ // If Sym is a comparison expression (except <=>),
+ // find any other comparisons with the same operands.
+ // See function description.
+ const RangeSet CmpRangeSet = getRangeForComparisonSymbol(State, Sym);
+ if (!CmpRangeSet.isEmpty())
+ return CmpRangeSet;
+
+ return Visit(Sym);
+ }
+
+ /// Infer range information solely from the type.
+ RangeSet infer(QualType T) {
+ // Lazily generate a new RangeSet representing all possible values for the
+ // given symbol type.
+ RangeSet Result(RangeFactory, ValueFactory.getMinValue(T),
+ ValueFactory.getMaxValue(T));
+
+ // References are known to be non-zero.
+ if (T->isReferenceType())
+ return assumeNonZero(Result, T);
+
+ return Result;
+ }
+
+ template <class BinarySymExprTy>
+ RangeSet VisitBinaryOperator(const BinarySymExprTy *Sym) {
+ // TODO #1: VisitBinaryOperator implementation might not make a good
+ // use of the inferred ranges. In this case, we might be calculating
+ // everything for nothing. This being said, we should introduce some
+ // sort of laziness mechanism here.
+ //
+ // TODO #2: We didn't go into the nested expressions before, so it
+ // might cause us spending much more time doing the inference.
+ // This can be a problem for deeply nested expressions that are
+ // involved in conditions and get tested continuously. We definitely
+ // need to address this issue and introduce some sort of caching
+ // in here.
+ QualType ResultType = Sym->getType();
+ return VisitBinaryOperator(inferAs(Sym->getLHS(), ResultType),
+ Sym->getOpcode(),
+ inferAs(Sym->getRHS(), ResultType), ResultType);
+ }
+
+ RangeSet VisitBinaryOperator(RangeSet LHS, BinaryOperator::Opcode Op,
+ RangeSet RHS, QualType T) {
+ switch (Op) {
+ case BO_Or:
+ return VisitBinaryOperator<BO_Or>(LHS, RHS, T);
+ case BO_And:
+ return VisitBinaryOperator<BO_And>(LHS, RHS, T);
+ case BO_Rem:
+ return VisitBinaryOperator<BO_Rem>(LHS, RHS, T);
+ default:
+ return infer(T);
+ }
+ }
+
+ //===----------------------------------------------------------------------===//
+ // Ranges and operators
+ //===----------------------------------------------------------------------===//
+
+ /// Return a rough approximation of the given range set.
+ ///
+ /// For the range set:
+ /// { [x_0, y_0], [x_1, y_1], ... , [x_N, y_N] }
+ /// it will return the range [x_0, y_N].
+ static Range fillGaps(RangeSet Origin) {
+ assert(!Origin.isEmpty());
+ return {Origin.getMinValue(), Origin.getMaxValue()};
+ }
+
+ /// Try to convert given range into the given type.
+ ///
+ /// It will return llvm::None only when the trivial conversion is possible.
+ llvm::Optional<Range> convert(const Range &Origin, APSIntType To) {
+ if (To.testInRange(Origin.From(), false) != APSIntType::RTR_Within ||
+ To.testInRange(Origin.To(), false) != APSIntType::RTR_Within) {
+ return llvm::None;
+ }
+ return Range(ValueFactory.Convert(To, Origin.From()),
+ ValueFactory.Convert(To, Origin.To()));
+ }
+
+ template <BinaryOperator::Opcode Op>
+ RangeSet VisitBinaryOperator(RangeSet LHS, RangeSet RHS, QualType T) {
+ // We should propagate information about unfeasbility of one of the
+ // operands to the resulting range.
+ if (LHS.isEmpty() || RHS.isEmpty()) {
+ return RangeFactory.getEmptySet();
+ }
+
+ Range CoarseLHS = fillGaps(LHS);
+ Range CoarseRHS = fillGaps(RHS);
+
+ APSIntType ResultType = ValueFactory.getAPSIntType(T);
+
+ // We need to convert ranges to the resulting type, so we can compare values
+ // and combine them in a meaningful (in terms of the given operation) way.
+ auto ConvertedCoarseLHS = convert(CoarseLHS, ResultType);
+ auto ConvertedCoarseRHS = convert(CoarseRHS, ResultType);
+
+ // It is hard to reason about ranges when conversion changes
+ // borders of the ranges.
+ if (!ConvertedCoarseLHS || !ConvertedCoarseRHS) {
+ return infer(T);
+ }
+
+ return VisitBinaryOperator<Op>(*ConvertedCoarseLHS, *ConvertedCoarseRHS, T);
+ }
+
+ template <BinaryOperator::Opcode Op>
+ RangeSet VisitBinaryOperator(Range LHS, Range RHS, QualType T) {
+ return infer(T);
+ }
+
+ /// Return a symmetrical range for the given range and type.
+ ///
+ /// If T is signed, return the smallest range [-x..x] that covers the original
+ /// range, or [-min(T), max(T)] if the aforementioned symmetric range doesn't
+ /// exist due to original range covering min(T)).
+ ///
+ /// If T is unsigned, return the smallest range [0..x] that covers the
+ /// original range.
+ Range getSymmetricalRange(Range Origin, QualType T) {
+ APSIntType RangeType = ValueFactory.getAPSIntType(T);
+
+ if (RangeType.isUnsigned()) {
+ return Range(ValueFactory.getMinValue(RangeType), Origin.To());
+ }
+
+ if (Origin.From().isMinSignedValue()) {
+ // If mini is a minimal signed value, absolute value of it is greater
+ // than the maximal signed value. In order to avoid these
+ // complications, we simply return the whole range.
+ return {ValueFactory.getMinValue(RangeType),
+ ValueFactory.getMaxValue(RangeType)};
+ }
+
+ // At this point, we are sure that the type is signed and we can safely
+ // use unary - operator.
+ //
+ // While calculating absolute maximum, we can use the following formula
+ // because of these reasons:
+ // * If From >= 0 then To >= From and To >= -From.
+ // AbsMax == To == max(To, -From)
+ // * If To <= 0 then -From >= -To and -From >= From.
+ // AbsMax == -From == max(-From, To)
+ // * Otherwise, From <= 0, To >= 0, and
+ // AbsMax == max(abs(From), abs(To))
+ llvm::APSInt AbsMax = std::max(-Origin.From(), Origin.To());
+
+ // Intersection is guaranteed to be non-empty.
+ return {ValueFactory.getValue(-AbsMax), ValueFactory.getValue(AbsMax)};
+ }
+
+ /// Return a range set subtracting zero from \p Domain.
+ RangeSet assumeNonZero(RangeSet Domain, QualType T) {
+ APSIntType IntType = ValueFactory.getAPSIntType(T);
+ return Domain.Intersect(ValueFactory, RangeFactory,
+ ++IntType.getZeroValue(), --IntType.getZeroValue());
+ }
+
+ // FIXME: Once SValBuilder supports unary minus, we should use SValBuilder to
+ // obtain the negated symbolic expression instead of constructing the
+ // symbol manually. This will allow us to support finding ranges of not
+ // only negated SymSymExpr-type expressions, but also of other, simpler
+ // expressions which we currently do not know how to negate.
+ const RangeSet *getRangeForMinusSymbol(ProgramStateRef State, SymbolRef Sym) {
+ if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
+ if (SSE->getOpcode() == BO_Sub) {
+ QualType T = Sym->getType();
+ SymbolManager &SymMgr = State->getSymbolManager();
+ SymbolRef negSym =
+ SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), T);
+
+ if (const RangeSet *negV = State->get<ConstraintRange>(negSym)) {
+ // Unsigned range set cannot be negated, unless it is [0, 0].
+ if (T->isUnsignedIntegerOrEnumerationType() ||
+ T->isSignedIntegerOrEnumerationType())
+ return negV;
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ // Returns ranges only for binary comparison operators (except <=>)
+ // when left and right operands are symbolic values.
+ // Finds any other comparisons with the same operands.
+ // Then do logical calculations and refuse impossible branches.
+ // E.g. (x < y) and (x > y) at the same time are impossible.
+ // E.g. (x >= y) and (x != y) at the same time makes (x > y) true only.
+ // E.g. (x == y) and (y == x) are just reversed but the same.
+ // It covers all possible combinations (see CmpOpTable description).
+ // Note that `x` and `y` can also stand for subexpressions,
+ // not only for actual symbols.
+ RangeSet getRangeForComparisonSymbol(ProgramStateRef State, SymbolRef Sym) {
+ const RangeSet EmptyRangeSet = RangeFactory.getEmptySet();
+
+ auto SSE = dyn_cast<SymSymExpr>(Sym);
+ if (!SSE)
+ return EmptyRangeSet;
+
+ BinaryOperatorKind CurrentOP = SSE->getOpcode();
+
+ // We currently do not support <=> (C++20).
+ if (!BinaryOperator::isComparisonOp(CurrentOP) || (CurrentOP == BO_Cmp))
+ return EmptyRangeSet;
+
+ static const OperatorRelationsTable CmpOpTable{};
+
+ const SymExpr *LHS = SSE->getLHS();
+ const SymExpr *RHS = SSE->getRHS();
+ QualType T = SSE->getType();
+
+ SymbolManager &SymMgr = State->getSymbolManager();
+ const llvm::APSInt &Zero = ValueFactory.getValue(0, T);
+ const llvm::APSInt &One = ValueFactory.getValue(1, T);
+ const RangeSet TrueRangeSet(RangeFactory, One, One);
+ const RangeSet FalseRangeSet(RangeFactory, Zero, Zero);
+
+ int UnknownStates = 0;
+
+ // Loop goes through all of the columns exept the last one ('UnknownX2').
+ // We treat `UnknownX2` column separately at the end of the loop body.
+ for (size_t i = 0; i < CmpOpTable.getCmpOpCount(); ++i) {
+
+ // Let's find an expression e.g. (x < y).
+ BinaryOperatorKind QueriedOP = OperatorRelationsTable::getOpFromIndex(i);
+ const SymSymExpr *SymSym = SymMgr.getSymSymExpr(LHS, QueriedOP, RHS, T);
+ const RangeSet *QueriedRangeSet = State->get<ConstraintRange>(SymSym);
+
+ // If ranges were not previously found,
+ // try to find a reversed expression (y > x).
+ if (!QueriedRangeSet) {
+ const BinaryOperatorKind ROP =
+ BinaryOperator::reverseComparisonOp(QueriedOP);
+ SymSym = SymMgr.getSymSymExpr(RHS, ROP, LHS, T);
+ QueriedRangeSet = State->get<ConstraintRange>(SymSym);
+ }
+
+ if (!QueriedRangeSet || QueriedRangeSet->isEmpty())
+ continue;
+
+ const llvm::APSInt *ConcreteValue = QueriedRangeSet->getConcreteValue();
+ const bool isInFalseBranch =
+ ConcreteValue ? (*ConcreteValue == 0) : false;
+
+ // If it is a false branch, we shall be guided by opposite operator,
+ // because the table is made assuming we are in the true branch.
+ // E.g. when (x <= y) is false, then (x > y) is true.
+ if (isInFalseBranch)
+ QueriedOP = BinaryOperator::negateComparisonOp(QueriedOP);
+
+ OperatorRelationsTable::TriStateKind BranchState =
+ CmpOpTable.getCmpOpState(CurrentOP, QueriedOP);
+
+ if (BranchState == OperatorRelationsTable::Unknown) {
+ if (++UnknownStates == 2)
+ // If we met both Unknown states.
+ // if (x <= y) // assume true
+ // if (x != y) // assume true
+ // if (x < y) // would be also true
+ // Get a state from `UnknownX2` column.
+ BranchState = CmpOpTable.getCmpOpStateForUnknownX2(CurrentOP);
+ else
+ continue;
+ }
+
+ return (BranchState == OperatorRelationsTable::True) ? TrueRangeSet
+ : FalseRangeSet;
+ }
+
+ return EmptyRangeSet;
+ }
+
+ BasicValueFactory &ValueFactory;
+ RangeSet::Factory &RangeFactory;
+ ProgramStateRef State;
+};
+
+template <>
+RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_Or>(Range LHS, Range RHS,
+ QualType T) {
+ APSIntType ResultType = ValueFactory.getAPSIntType(T);
+ llvm::APSInt Zero = ResultType.getZeroValue();
+
+ bool IsLHSPositiveOrZero = LHS.From() >= Zero;
+ bool IsRHSPositiveOrZero = RHS.From() >= Zero;
+
+ bool IsLHSNegative = LHS.To() < Zero;
+ bool IsRHSNegative = RHS.To() < Zero;
+
+ // Check if both ranges have the same sign.
+ if ((IsLHSPositiveOrZero && IsRHSPositiveOrZero) ||
+ (IsLHSNegative && IsRHSNegative)) {
+ // The result is definitely greater or equal than any of the operands.
+ const llvm::APSInt &Min = std::max(LHS.From(), RHS.From());
+
+ // We estimate maximal value for positives as the maximal value for the
+ // given type. For negatives, we estimate it with -1 (e.g. 0x11111111).
+ //
+ // TODO: We basically, limit the resulting range from below, but don't do
+ // anything with the upper bound.
+ //
+ // For positive operands, it can be done as follows: for the upper
+ // bound of LHS and RHS we calculate the most significant bit set.
+ // Let's call it the N-th bit. Then we can estimate the maximal
+ // number to be 2^(N+1)-1, i.e. the number with all the bits up to
+ // the N-th bit set.
+ const llvm::APSInt &Max = IsLHSNegative
+ ? ValueFactory.getValue(--Zero)
+ : ValueFactory.getMaxValue(ResultType);
+
+ return {RangeFactory, ValueFactory.getValue(Min), Max};
+ }
+
+ // Otherwise, let's check if at least one of the operands is negative.
+ if (IsLHSNegative || IsRHSNegative) {
+ // This means that the result is definitely negative as well.
+ return {RangeFactory, ValueFactory.getMinValue(ResultType),
+ ValueFactory.getValue(--Zero)};
+ }
+
+ RangeSet DefaultRange = infer(T);
+
+ // It is pretty hard to reason about operands with different signs
+ // (and especially with possibly different signs). We simply check if it
+ // can be zero. In order to conclude that the result could not be zero,
+ // at least one of the operands should be definitely not zero itself.
+ if (!LHS.Includes(Zero) || !RHS.Includes(Zero)) {
+ return assumeNonZero(DefaultRange, T);
+ }
+
+ // Nothing much else to do here.
+ return DefaultRange;
+}
+
+template <>
+RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_And>(Range LHS,
+ Range RHS,
+ QualType T) {
+ APSIntType ResultType = ValueFactory.getAPSIntType(T);
+ llvm::APSInt Zero = ResultType.getZeroValue();
+
+ bool IsLHSPositiveOrZero = LHS.From() >= Zero;
+ bool IsRHSPositiveOrZero = RHS.From() >= Zero;
+
+ bool IsLHSNegative = LHS.To() < Zero;
+ bool IsRHSNegative = RHS.To() < Zero;
+
+ // Check if both ranges have the same sign.
+ if ((IsLHSPositiveOrZero && IsRHSPositiveOrZero) ||
+ (IsLHSNegative && IsRHSNegative)) {
+ // The result is definitely less or equal than any of the operands.
+ const llvm::APSInt &Max = std::min(LHS.To(), RHS.To());
+
+ // We conservatively estimate lower bound to be the smallest positive
+ // or negative value corresponding to the sign of the operands.
+ const llvm::APSInt &Min = IsLHSNegative
+ ? ValueFactory.getMinValue(ResultType)
+ : ValueFactory.getValue(Zero);
+
+ return {RangeFactory, Min, Max};
+ }
+
+ // Otherwise, let's check if at least one of the operands is positive.
+ if (IsLHSPositiveOrZero || IsRHSPositiveOrZero) {
+ // This makes result definitely positive.
+ //
+ // We can also reason about a maximal value by finding the maximal
+ // value of the positive operand.
+ const llvm::APSInt &Max = IsLHSPositiveOrZero ? LHS.To() : RHS.To();
+
+ // The minimal value on the other hand is much harder to reason about.
+ // The only thing we know for sure is that the result is positive.
+ return {RangeFactory, ValueFactory.getValue(Zero),
+ ValueFactory.getValue(Max)};
+ }
+
+ // Nothing much else to do here.
+ return infer(T);
+}
+
+template <>
+RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_Rem>(Range LHS,
+ Range RHS,
+ QualType T) {
+ llvm::APSInt Zero = ValueFactory.getAPSIntType(T).getZeroValue();
+
+ Range ConservativeRange = getSymmetricalRange(RHS, T);
+
+ llvm::APSInt Max = ConservativeRange.To();
+ llvm::APSInt Min = ConservativeRange.From();
+
+ if (Max == Zero) {
+ // It's an undefined behaviour to divide by 0 and it seems like we know
+ // for sure that RHS is 0. Let's say that the resulting range is
+ // simply infeasible for that matter.
+ return RangeFactory.getEmptySet();
+ }
+
+ // At this point, our conservative range is closed. The result, however,
+ // couldn't be greater than the RHS' maximal absolute value. Because of
+ // this reason, we turn the range into open (or half-open in case of
+ // unsigned integers).
+ //
+ // While we operate on integer values, an open interval (a, b) can be easily
+ // represented by the closed interval [a + 1, b - 1]. And this is exactly
+ // what we do next.
+ //
+ // If we are dealing with unsigned case, we shouldn't move the lower bound.
+ if (Min.isSigned()) {
+ ++Min;
+ }
+ --Max;
+
+ bool IsLHSPositiveOrZero = LHS.From() >= Zero;
+ bool IsRHSPositiveOrZero = RHS.From() >= Zero;
+
+ // Remainder operator results with negative operands is implementation
+ // defined. Positive cases are much easier to reason about though.
+ if (IsLHSPositiveOrZero && IsRHSPositiveOrZero) {
+ // If maximal value of LHS is less than maximal value of RHS,
+ // the result won't get greater than LHS.To().
+ Max = std::min(LHS.To(), Max);
+ // We want to check if it is a situation similar to the following:
+ //
+ // <------------|---[ LHS ]--------[ RHS ]----->
+ // -INF 0 +INF
+ //
+ // In this situation, we can conclude that (LHS / RHS) == 0 and
+ // (LHS % RHS) == LHS.
+ Min = LHS.To() < RHS.From() ? LHS.From() : Zero;
+ }
+
+ // Nevertheless, the symmetrical range for RHS is a conservative estimate
+ // for any sign of either LHS, or RHS.
+ return {RangeFactory, ValueFactory.getValue(Min), ValueFactory.getValue(Max)};
+}
+
class RangeConstraintManager : public RangedConstraintManager {
public:
- RangeConstraintManager(SubEngine *SE, SValBuilder &SVB)
- : RangedConstraintManager(SE, SVB) {}
+ RangeConstraintManager(ExprEngine *EE, SValBuilder &SVB)
+ : RangedConstraintManager(EE, SVB) {}
//===------------------------------------------------------------------===//
// Implementation for interface from ConstraintManager.
@@ -305,8 +971,6 @@ private:
RangeSet::Factory F;
RangeSet getRange(ProgramStateRef State, SymbolRef Sym);
- const RangeSet* getRangeForMinusSymbol(ProgramStateRef State,
- SymbolRef Sym);
RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
@@ -323,13 +987,13 @@ private:
RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment);
-
};
} // end anonymous namespace
std::unique_ptr<ConstraintManager>
-ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
+ento::CreateRangeConstraintManager(ProgramStateManager &StMgr,
+ ExprEngine *Eng) {
return std::make_unique<RangeConstraintManager>(Eng, StMgr.getSValBuilder());
}
@@ -429,113 +1093,9 @@ RangeConstraintManager::removeDeadBindings(ProgramStateRef State,
return Changed ? State->set<ConstraintRange>(CR) : State;
}
-/// Return a range set subtracting zero from \p Domain.
-static RangeSet assumeNonZero(
- BasicValueFactory &BV,
- RangeSet::Factory &F,
- SymbolRef Sym,
- RangeSet Domain) {
- APSIntType IntType = BV.getAPSIntType(Sym->getType());
- return Domain.Intersect(BV, F, ++IntType.getZeroValue(),
- --IntType.getZeroValue());
-}
-
-/// Apply implicit constraints for bitwise OR- and AND-.
-/// For unsigned types, bitwise OR with a constant always returns
-/// a value greater-or-equal than the constant, and bitwise AND
-/// returns a value less-or-equal then the constant.
-///
-/// Pattern matches the expression \p Sym against those rule,
-/// and applies the required constraints.
-/// \p Input Previously established expression range set
-static RangeSet applyBitwiseConstraints(
- BasicValueFactory &BV,
- RangeSet::Factory &F,
- RangeSet Input,
- const SymIntExpr* SIE) {
- QualType T = SIE->getType();
- bool IsUnsigned = T->isUnsignedIntegerType();
- const llvm::APSInt &RHS = SIE->getRHS();
- const llvm::APSInt &Zero = BV.getAPSIntType(T).getZeroValue();
- BinaryOperator::Opcode Operator = SIE->getOpcode();
-
- // For unsigned types, the output of bitwise-or is bigger-or-equal than RHS.
- if (Operator == BO_Or && IsUnsigned)
- return Input.Intersect(BV, F, RHS, BV.getMaxValue(T));
-
- // Bitwise-or with a non-zero constant is always non-zero.
- if (Operator == BO_Or && RHS != Zero)
- return assumeNonZero(BV, F, SIE, Input);
-
- // For unsigned types, or positive RHS,
- // bitwise-and output is always smaller-or-equal than RHS (assuming two's
- // complement representation of signed types).
- if (Operator == BO_And && (IsUnsigned || RHS >= Zero))
- return Input.Intersect(BV, F, BV.getMinValue(T), RHS);
-
- return Input;
-}
-
RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
SymbolRef Sym) {
- ConstraintRangeTy::data_type *V = State->get<ConstraintRange>(Sym);
-
- // If Sym is a difference of symbols A - B, then maybe we have range set
- // stored for B - A.
- BasicValueFactory &BV = getBasicVals();
- const RangeSet *R = getRangeForMinusSymbol(State, Sym);
-
- // If we have range set stored for both A - B and B - A then calculate the
- // effective range set by intersecting the range set for A - B and the
- // negated range set of B - A.
- if (V && R)
- return V->Intersect(BV, F, R->Negate(BV, F));
- if (V)
- return *V;
- if (R)
- return R->Negate(BV, F);
-
- // Lazily generate a new RangeSet representing all possible values for the
- // given symbol type.
- QualType T = Sym->getType();
-
- RangeSet Result(F, BV.getMinValue(T), BV.getMaxValue(T));
-
- // References are known to be non-zero.
- if (T->isReferenceType())
- return assumeNonZero(BV, F, Sym, Result);
-
- // Known constraints on ranges of bitwise expressions.
- if (const SymIntExpr* SIE = dyn_cast<SymIntExpr>(Sym))
- return applyBitwiseConstraints(BV, F, Result, SIE);
-
- return Result;
-}
-
-// FIXME: Once SValBuilder supports unary minus, we should use SValBuilder to
-// obtain the negated symbolic expression instead of constructing the
-// symbol manually. This will allow us to support finding ranges of not
-// only negated SymSymExpr-type expressions, but also of other, simpler
-// expressions which we currently do not know how to negate.
-const RangeSet*
-RangeConstraintManager::getRangeForMinusSymbol(ProgramStateRef State,
- SymbolRef Sym) {
- if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
- if (SSE->getOpcode() == BO_Sub) {
- QualType T = Sym->getType();
- SymbolManager &SymMgr = State->getSymbolManager();
- SymbolRef negSym = SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub,
- SSE->getLHS(), T);
- if (const RangeSet *negV = State->get<ConstraintRange>(negSym)) {
- // Unsigned range set cannot be negated, unless it is [0, 0].
- if ((negV->getConcreteValue() &&
- (*negV->getConcreteValue() == 0)) ||
- T->isSignedIntegerOrEnumerationType())
- return negV;
- }
- }
- }
- return nullptr;
+ return SymbolicRangeInferrer::inferRange(getBasicVals(), F, State, Sym);
}
//===------------------------------------------------------------------------===
diff --git a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 4797f564a837..57fde32bc01d 100644
--- a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -23,10 +23,11 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/raw_ostream.h"
@@ -381,7 +382,7 @@ public:
: StoreManager(mgr), Features(f),
RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()),
SmallStructLimit(0) {
- SubEngine &Eng = StateMgr.getOwningEngine();
+ ExprEngine &Eng = StateMgr.getOwningEngine();
AnalyzerOptions &Options = Eng.getAnalysisManager().options;
SmallStructLimit = Options.RegionStoreSmallStructLimit;
}
@@ -622,15 +623,6 @@ public: // Part of public interface to class.
SymbolReaper& SymReaper) override;
//===------------------------------------------------------------------===//
- // Region "extents".
- //===------------------------------------------------------------------===//
-
- // FIXME: This method will soon be eliminated; see the note in Store.h.
- DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
- const MemRegion* R,
- QualType EleTy) override;
-
- //===------------------------------------------------------------------===//
// Utility methods.
//===------------------------------------------------------------------===//
@@ -876,7 +868,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
// Find the length (in bits) of the region being invalidated.
uint64_t Length = UINT64_MAX;
- SVal Extent = Top->getExtent(SVB);
+ SVal Extent = Top->getMemRegionManager().getStaticSize(Top, SVB);
if (Optional<nonloc::ConcreteInt> ExtentCI =
Extent.getAs<nonloc::ConcreteInt>()) {
const llvm::APSInt &ExtentInt = ExtentCI->getValue();
@@ -1387,37 +1379,6 @@ RegionStoreManager::invalidateRegions(Store store,
}
//===----------------------------------------------------------------------===//
-// Extents for regions.
-//===----------------------------------------------------------------------===//
-
-DefinedOrUnknownSVal
-RegionStoreManager::getSizeInElements(ProgramStateRef state,
- const MemRegion *R,
- QualType EleTy) {
- SVal Size = cast<SubRegion>(R)->getExtent(svalBuilder);
- const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
- if (!SizeInt)
- return UnknownVal();
-
- CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue());
-
- if (Ctx.getAsVariableArrayType(EleTy)) {
- // FIXME: We need to track extra state to properly record the size
- // of VLAs. Returning UnknownVal here, however, is a stop-gap so that
- // we don't have a divide-by-zero below.
- return UnknownVal();
- }
-
- CharUnits EleSize = Ctx.getTypeSizeInChars(EleTy);
-
- // If a variable is reinterpreted as a type that doesn't fit into a larger
- // type evenly, round it down.
- // This is a signed value, since it's used in arithmetic with signed indices.
- return svalBuilder.makeIntVal(RegionSize / EleSize,
- svalBuilder.getArrayIndexType());
-}
-
-//===----------------------------------------------------------------------===//
// Location and region casting.
//===----------------------------------------------------------------------===//
@@ -1667,10 +1628,6 @@ RegionStoreManager::findLazyBinding(RegionBindingsConstRef B,
SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
const ElementRegion* R) {
- // We do not currently model bindings of the CompoundLiteralregion.
- if (isa<CompoundLiteralRegion>(R->getBaseRegion()))
- return UnknownVal();
-
// Check if the region has a binding.
if (const Optional<SVal> &V = B.getDirectBinding(R))
return *V;
diff --git a/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
index 6ad12ca0a688..7395622a659c 100644
--- a/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
@@ -13,6 +13,6 @@ using namespace clang;
using namespace ento;
std::unique_ptr<ConstraintManager>
-ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
+ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, ExprEngine *Eng) {
return std::make_unique<SMTConstraintManager>(Eng, StMgr.getSValBuilder());
}
diff --git a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 3a5841137e1a..c00a2c8ba8a2 100644
--- a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -24,12 +24,12 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/APSInt.h"
diff --git a/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
index 12332aaf936f..8c2e85601576 100644
--- a/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/Version.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
@@ -49,8 +50,14 @@ public:
void ento::createSarifDiagnosticConsumer(
AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
const std::string &Output, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &) {
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+
+ // TODO: Emit an error here.
+ if (Output.empty())
+ return;
+
C.push_back(new SarifDiagnostics(AnalyzerOpts, Output, PP.getLangOpts()));
+ createTextMinimalPathDiagnosticConsumer(AnalyzerOpts, C, Output, PP, CTU);
}
static StringRef getFileName(const FileEntry &FE) {
@@ -106,7 +113,7 @@ static std::string fileNameToURI(StringRef Filename) {
}
});
- return Ret.str().str();
+ return std::string(Ret);
}
static json::Object createArtifactLocation(const FileEntry &FE) {
@@ -322,7 +329,7 @@ static json::Object createRule(const PathDiagnostic &Diag) {
{"name", CheckName},
{"id", CheckName}};
- std::string RuleURI = getRuleHelpURIStr(CheckName);
+ std::string RuleURI = std::string(getRuleHelpURIStr(CheckName));
if (!RuleURI.empty())
Ret["helpUri"] = RuleURI;
diff --git a/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index 85f60231a276..3709106ad44c 100644
--- a/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -44,8 +44,8 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
NonLoc Cond, bool Assumption) {
State = assumeAux(State, Cond, Assumption);
- if (NotifyAssumeClients && SU)
- return SU->processAssume(State, Cond, Assumption);
+ if (NotifyAssumeClients && EE)
+ return EE->processAssume(State, Cond, Assumption);
return State;
}
diff --git a/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 84c52f53ca5e..2e269f6a596e 100644
--- a/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -13,8 +13,8 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
using namespace clang;
@@ -652,6 +652,11 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
if (LHSValue == 0)
return evalCastFromNonLoc(lhs, resultTy);
return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
+ case BO_Rem:
+ // 0 % x == 0
+ if (LHSValue == 0)
+ return makeZeroVal(resultTy);
+ LLVM_FALLTHROUGH;
default:
return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
}
diff --git a/clang/lib/StaticAnalyzer/Core/Store.cpp b/clang/lib/StaticAnalyzer/Core/Store.cpp
index b33129c88cea..ea617bbeeba1 100644
--- a/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -134,7 +134,8 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
case MemRegion::ObjCStringRegionKind:
- case MemRegion::VarRegionKind:
+ case MemRegion::NonParamVarRegionKind:
+ case MemRegion::ParamVarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
case MemRegion::CXXBaseObjectRegionKind:
case MemRegion::CXXDerivedObjectRegionKind:
diff --git a/clang/lib/StaticAnalyzer/Core/SubEngine.cpp b/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
deleted file mode 100644
index d7ddd9cf4610..000000000000
--- a/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-//== SubEngine.cpp - Interface of the subengine of CoreEngine ------*- C++ -*-//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
-
-using namespace clang::ento;
-
-void SubEngine::anchor() { }
diff --git a/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 675209f6fd7e..6ca7aec9caec 100644
--- a/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -34,45 +34,27 @@ using namespace ento;
void SymExpr::anchor() {}
-LLVM_DUMP_METHOD void SymExpr::dump() const {
- dumpToStream(llvm::errs());
-}
+LLVM_DUMP_METHOD void SymExpr::dump() const { dumpToStream(llvm::errs()); }
-void SymIntExpr::dumpToStream(raw_ostream &os) const {
- os << '(';
- getLHS()->dumpToStream(os);
- os << ") "
- << BinaryOperator::getOpcodeStr(getOpcode()) << ' ';
- if (getRHS().isUnsigned())
- os << getRHS().getZExtValue();
- else
- os << getRHS().getSExtValue();
- if (getRHS().isUnsigned())
- os << 'U';
+void BinarySymExpr::dumpToStreamImpl(raw_ostream &OS, const SymExpr *Sym) {
+ OS << '(';
+ Sym->dumpToStream(OS);
+ OS << ')';
}
-void IntSymExpr::dumpToStream(raw_ostream &os) const {
- if (getLHS().isUnsigned())
- os << getLHS().getZExtValue();
+void BinarySymExpr::dumpToStreamImpl(raw_ostream &OS,
+ const llvm::APSInt &Value) {
+ if (Value.isUnsigned())
+ OS << Value.getZExtValue();
else
- os << getLHS().getSExtValue();
- if (getLHS().isUnsigned())
- os << 'U';
- os << ' '
- << BinaryOperator::getOpcodeStr(getOpcode())
- << " (";
- getRHS()->dumpToStream(os);
- os << ')';
+ OS << Value.getSExtValue();
+ if (Value.isUnsigned())
+ OS << 'U';
}
-void SymSymExpr::dumpToStream(raw_ostream &os) const {
- os << '(';
- getLHS()->dumpToStream(os);
- os << ") "
- << BinaryOperator::getOpcodeStr(getOpcode())
- << " (";
- getRHS()->dumpToStream(os);
- os << ')';
+void BinarySymExpr::dumpToStreamImpl(raw_ostream &OS,
+ BinaryOperator::Opcode Op) {
+ OS << ' ' << BinaryOperator::getOpcodeStr(Op) << ' ';
}
void SymbolCast::dumpToStream(raw_ostream &os) const {
@@ -329,7 +311,7 @@ QualType SymbolDerived::getType() const {
}
QualType SymbolExtent::getType() const {
- ASTContext &Ctx = R->getMemRegionManager()->getContext();
+ ASTContext &Ctx = R->getMemRegionManager().getContext();
return Ctx.getSizeType();
}
@@ -341,10 +323,6 @@ QualType SymbolRegionValue::getType() const {
return R->getValueType();
}
-SymbolManager::~SymbolManager() {
- llvm::DeleteContainerSeconds(SymbolDependencies);
-}
-
bool SymbolManager::canSymbolicate(QualType T) {
T = T.getCanonicalType();
@@ -362,13 +340,9 @@ bool SymbolManager::canSymbolicate(QualType T) {
void SymbolManager::addSymbolDependency(const SymbolRef Primary,
const SymbolRef Dependent) {
- SymbolDependTy::iterator I = SymbolDependencies.find(Primary);
- SymbolRefSmallVectorTy *dependencies = nullptr;
- if (I == SymbolDependencies.end()) {
- dependencies = new SymbolRefSmallVectorTy();
- SymbolDependencies[Primary] = dependencies;
- } else {
- dependencies = I->second;
+ auto &dependencies = SymbolDependencies[Primary];
+ if (!dependencies) {
+ dependencies = std::make_unique<SymbolRefSmallVectorTy>();
}
dependencies->push_back(Dependent);
}
@@ -378,7 +352,7 @@ const SymbolRefSmallVectorTy *SymbolManager::getDependentSymbols(
SymbolDependTy::const_iterator I = SymbolDependencies.find(Primary);
if (I == SymbolDependencies.end())
return nullptr;
- return I->second;
+ return I->second.get();
}
void SymbolReaper::markDependentsLive(SymbolRef sym) {
@@ -542,6 +516,11 @@ bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
if (!Loc)
return true;
+ // Anonymous parameters of an inheriting constructor are live for the entire
+ // duration of the constructor.
+ if (isa<CXXInheritedCtorInitExpr>(Loc))
+ return true;
+
if (LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, VR->getDecl()))
return true;
diff --git a/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
new file mode 100644
index 000000000000..f4c7e5978e19
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
@@ -0,0 +1,156 @@
+//===--- TextDiagnostics.cpp - Text Diagnostics for Paths -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TextDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/CrossTU/CrossTranslationUnit.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "clang/Tooling/Core/Replacement.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+
+using namespace clang;
+using namespace ento;
+using namespace tooling;
+
+namespace {
+/// Emitsd minimal diagnostics (report message + notes) for the 'none' output
+/// type to the standard error, or to to compliment many others. Emits detailed
+/// diagnostics in textual format for the 'text' output type.
+class TextDiagnostics : public PathDiagnosticConsumer {
+ DiagnosticsEngine &DiagEng;
+ const LangOptions &LO;
+ const bool IncludePath = false;
+ const bool ShouldEmitAsError = false;
+ const bool ApplyFixIts = false;
+ const bool ShouldDisplayCheckerName = false;
+
+public:
+ TextDiagnostics(DiagnosticsEngine &DiagEng, const LangOptions &LO,
+ bool ShouldIncludePath, const AnalyzerOptions &AnOpts)
+ : DiagEng(DiagEng), LO(LO), IncludePath(ShouldIncludePath),
+ ShouldEmitAsError(AnOpts.AnalyzerWerror),
+ ApplyFixIts(AnOpts.ShouldApplyFixIts),
+ ShouldDisplayCheckerName(AnOpts.ShouldDisplayCheckerNameForText) {}
+ ~TextDiagnostics() override {}
+
+ StringRef getName() const override { return "TextDiagnostics"; }
+
+ bool supportsLogicalOpControlFlow() const override { return true; }
+ bool supportsCrossFileDiagnostics() const override { return true; }
+
+ PathGenerationScheme getGenerationScheme() const override {
+ return IncludePath ? Minimal : None;
+ }
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *filesMade) override {
+ unsigned WarnID =
+ ShouldEmitAsError
+ ? DiagEng.getCustomDiagID(DiagnosticsEngine::Error, "%0")
+ : DiagEng.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
+ unsigned NoteID = DiagEng.getCustomDiagID(DiagnosticsEngine::Note, "%0");
+ SourceManager &SM = DiagEng.getSourceManager();
+
+ Replacements Repls;
+ auto reportPiece = [&](unsigned ID, FullSourceLoc Loc, StringRef String,
+ ArrayRef<SourceRange> Ranges,
+ ArrayRef<FixItHint> Fixits) {
+ if (!ApplyFixIts) {
+ DiagEng.Report(Loc, ID) << String << Ranges << Fixits;
+ return;
+ }
+
+ DiagEng.Report(Loc, ID) << String << Ranges;
+ for (const FixItHint &Hint : Fixits) {
+ Replacement Repl(SM, Hint.RemoveRange, Hint.CodeToInsert);
+
+ if (llvm::Error Err = Repls.add(Repl)) {
+ llvm::errs() << "Error applying replacement " << Repl.toString()
+ << ": " << Err << "\n";
+ }
+ }
+ };
+
+ for (std::vector<const PathDiagnostic *>::iterator I = Diags.begin(),
+ E = Diags.end();
+ I != E; ++I) {
+ const PathDiagnostic *PD = *I;
+ std::string WarningMsg =
+ (ShouldDisplayCheckerName ? " [" + PD->getCheckerName() + "]" : "")
+ .str();
+
+ reportPiece(WarnID, PD->getLocation().asLocation(),
+ (PD->getShortDescription() + WarningMsg).str(),
+ PD->path.back()->getRanges(), PD->path.back()->getFixits());
+
+ // First, add extra notes, even if paths should not be included.
+ for (const auto &Piece : PD->path) {
+ if (!isa<PathDiagnosticNotePiece>(Piece.get()))
+ continue;
+
+ reportPiece(NoteID, Piece->getLocation().asLocation(),
+ Piece->getString(), Piece->getRanges(),
+ Piece->getFixits());
+ }
+
+ if (!IncludePath)
+ continue;
+
+ // Then, add the path notes if necessary.
+ PathPieces FlatPath = PD->path.flatten(/*ShouldFlattenMacros=*/true);
+ for (const auto &Piece : FlatPath) {
+ if (isa<PathDiagnosticNotePiece>(Piece.get()))
+ continue;
+
+ reportPiece(NoteID, Piece->getLocation().asLocation(),
+ Piece->getString(), Piece->getRanges(),
+ Piece->getFixits());
+ }
+ }
+
+ if (!ApplyFixIts || Repls.empty())
+ return;
+
+ Rewriter Rewrite(SM, LO);
+ if (!applyAllReplacements(Repls, Rewrite)) {
+ llvm::errs() << "An error occured during applying fix-it.\n";
+ }
+
+ Rewrite.overwriteChangedFiles();
+ }
+};
+} // end anonymous namespace
+
+void ento::createTextPathDiagnosticConsumer(
+ AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ const std::string &Prefix, const clang::Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+ C.emplace_back(new TextDiagnostics(PP.getDiagnostics(), PP.getLangOpts(),
+ /*ShouldIncludePath*/ true, AnalyzerOpts));
+}
+
+void ento::createTextMinimalPathDiagnosticConsumer(
+ AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
+ const std::string &Prefix, const clang::Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU) {
+ C.emplace_back(new TextDiagnostics(PP.getDiagnostics(), PP.getLangOpts(),
+ /*ShouldIncludePath*/ false,
+ AnalyzerOpts));
+}
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index fea8100c3b3b..392049e21c6e 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -12,7 +12,6 @@
#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
#include "ModelInjector.h"
-#include "clang/Analysis/PathDiagnostic.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -21,10 +20,12 @@
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CallGraph.h"
#include "clang/Analysis/CodeInjector.h"
+#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/CrossTU/CrossTranslationUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
@@ -32,7 +33,6 @@
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/FileSystem.h"
@@ -61,114 +61,6 @@ STATISTIC(PercentReachableBlocks, "The % of reachable basic blocks.");
STATISTIC(MaxCFGSize, "The maximum number of basic blocks in a function.");
//===----------------------------------------------------------------------===//
-// Special PathDiagnosticConsumers.
-//===----------------------------------------------------------------------===//
-
-void ento::createPlistHTMLDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &prefix, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
- createHTMLDiagnosticConsumer(AnalyzerOpts, C,
- llvm::sys::path::parent_path(prefix), PP, CTU);
- createPlistMultiFileDiagnosticConsumer(AnalyzerOpts, C, prefix, PP, CTU);
-}
-
-void ento::createTextPathDiagnosticConsumer(
- AnalyzerOptions &AnalyzerOpts, PathDiagnosticConsumers &C,
- const std::string &Prefix, const clang::Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
- llvm_unreachable("'text' consumer should be enabled on ClangDiags");
-}
-
-namespace {
-class ClangDiagPathDiagConsumer : public PathDiagnosticConsumer {
- DiagnosticsEngine &Diag;
- bool IncludePath = false, ShouldEmitAsError = false, FixitsAsRemarks = false;
-
-public:
- ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag)
- : Diag(Diag) {}
- ~ClangDiagPathDiagConsumer() override {}
- StringRef getName() const override { return "ClangDiags"; }
-
- bool supportsLogicalOpControlFlow() const override { return true; }
- bool supportsCrossFileDiagnostics() const override { return true; }
-
- PathGenerationScheme getGenerationScheme() const override {
- return IncludePath ? Minimal : None;
- }
-
- void enablePaths() { IncludePath = true; }
- void enableWerror() { ShouldEmitAsError = true; }
- void enableFixitsAsRemarks() { FixitsAsRemarks = true; }
-
- void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
- FilesMade *filesMade) override {
- unsigned WarnID =
- ShouldEmitAsError
- ? Diag.getCustomDiagID(DiagnosticsEngine::Error, "%0")
- : Diag.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
- unsigned NoteID = Diag.getCustomDiagID(DiagnosticsEngine::Note, "%0");
- unsigned RemarkID = Diag.getCustomDiagID(DiagnosticsEngine::Remark, "%0");
-
- auto reportPiece =
- [&](unsigned ID, SourceLocation Loc, StringRef String,
- ArrayRef<SourceRange> Ranges, ArrayRef<FixItHint> Fixits) {
- if (!FixitsAsRemarks) {
- Diag.Report(Loc, ID) << String << Ranges << Fixits;
- } else {
- Diag.Report(Loc, ID) << String << Ranges;
- for (const FixItHint &Hint : Fixits) {
- SourceManager &SM = Diag.getSourceManager();
- llvm::SmallString<128> Str;
- llvm::raw_svector_ostream OS(Str);
- // FIXME: Add support for InsertFromRange and
- // BeforePreviousInsertion.
- assert(!Hint.InsertFromRange.isValid() && "Not implemented yet!");
- assert(!Hint.BeforePreviousInsertions && "Not implemented yet!");
- OS << SM.getSpellingColumnNumber(Hint.RemoveRange.getBegin())
- << "-" << SM.getSpellingColumnNumber(Hint.RemoveRange.getEnd())
- << ": '" << Hint.CodeToInsert << "'";
- Diag.Report(Loc, RemarkID) << OS.str();
- }
- }
- };
-
- for (std::vector<const PathDiagnostic *>::iterator I = Diags.begin(),
- E = Diags.end();
- I != E; ++I) {
- const PathDiagnostic *PD = *I;
- reportPiece(WarnID, PD->getLocation().asLocation(),
- PD->getShortDescription(), PD->path.back()->getRanges(),
- PD->path.back()->getFixits());
-
- // First, add extra notes, even if paths should not be included.
- for (const auto &Piece : PD->path) {
- if (!isa<PathDiagnosticNotePiece>(Piece.get()))
- continue;
-
- reportPiece(NoteID, Piece->getLocation().asLocation(),
- Piece->getString(), Piece->getRanges(), Piece->getFixits());
- }
-
- if (!IncludePath)
- continue;
-
- // Then, add the path notes if necessary.
- PathPieces FlatPath = PD->path.flatten(/*ShouldFlattenMacros=*/true);
- for (const auto &Piece : FlatPath) {
- if (isa<PathDiagnosticNotePiece>(Piece.get()))
- continue;
-
- reportPiece(NoteID, Piece->getLocation().asLocation(),
- Piece->getString(), Piece->getRanges(), Piece->getFixits());
- }
- }
- }
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
// AnalysisConsumer declaration.
//===----------------------------------------------------------------------===//
@@ -192,7 +84,7 @@ class AnalysisConsumer : public AnalysisASTConsumer,
public:
ASTContext *Ctx;
- const Preprocessor &PP;
+ Preprocessor &PP;
const std::string OutDir;
AnalyzerOptionsRef Opts;
ArrayRef<std::string> Plugins;
@@ -253,31 +145,16 @@ public:
}
void DigestAnalyzerOptions() {
- if (Opts->AnalysisDiagOpt != PD_NONE) {
- // Create the PathDiagnosticConsumer.
- ClangDiagPathDiagConsumer *clangDiags =
- new ClangDiagPathDiagConsumer(PP.getDiagnostics());
- PathConsumers.push_back(clangDiags);
-
- if (Opts->AnalyzerWerror)
- clangDiags->enableWerror();
-
- if (Opts->ShouldEmitFixItHintsAsRemarks)
- clangDiags->enableFixitsAsRemarks();
-
- if (Opts->AnalysisDiagOpt == PD_TEXT) {
- clangDiags->enablePaths();
-
- } else if (!OutDir.empty()) {
- switch (Opts->AnalysisDiagOpt) {
- default:
+ switch (Opts->AnalysisDiagOpt) {
+ case PD_NONE:
+ break;
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN) \
case PD_##NAME: \
CREATEFN(*Opts.get(), PathConsumers, OutDir, PP, CTU); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
- }
- }
+ default:
+ llvm_unreachable("Unknown analyzer output type!");
}
// Create the analyzer component creators.
@@ -313,30 +190,29 @@ public:
else if (Mode == AM_Path) {
llvm::errs() << " (Path, ";
switch (IMode) {
- case ExprEngine::Inline_Minimal:
- llvm::errs() << " Inline_Minimal";
- break;
- case ExprEngine::Inline_Regular:
- llvm::errs() << " Inline_Regular";
- break;
+ case ExprEngine::Inline_Minimal:
+ llvm::errs() << " Inline_Minimal";
+ break;
+ case ExprEngine::Inline_Regular:
+ llvm::errs() << " Inline_Regular";
+ break;
}
llvm::errs() << ")";
- }
- else
+ } else
assert(Mode == (AM_Syntax | AM_Path) && "Unexpected mode!");
- llvm::errs() << ": " << Loc.getFilename() << ' '
- << getFunctionName(D) << '\n';
+ llvm::errs() << ": " << Loc.getFilename() << ' ' << getFunctionName(D)
+ << '\n';
}
}
void Initialize(ASTContext &Context) override {
Ctx = &Context;
- checkerMgr = createCheckerManager(
- *Ctx, *Opts, Plugins, CheckerRegistrationFns, PP.getDiagnostics());
+ checkerMgr = std::make_unique<CheckerManager>(*Ctx, *Opts, PP, Plugins,
+ CheckerRegistrationFns);
- Mgr = std::make_unique<AnalysisManager>(*Ctx, PathConsumers, CreateStoreMgr,
- CreateConstraintMgr,
+ Mgr = std::make_unique<AnalysisManager>(*Ctx, PP, PathConsumers,
+ CreateStoreMgr, CreateConstraintMgr,
checkerMgr.get(), *Opts, Injector);
}
@@ -469,7 +345,7 @@ private:
/// Print \p S to stderr if \c Opts->AnalyzerDisplayProgress is set.
void reportAnalyzerProgress(StringRef S);
-};
+}; // namespace
} // end anonymous namespace
@@ -503,6 +379,13 @@ static bool shouldSkipFunction(const Decl *D,
if (VisitedAsTopLevel.count(D))
return true;
+ // Skip analysis of inheriting constructors as top-level functions. These
+ // constructors don't even have a body written down in the code, so even if
+ // we find a bug, we won't be able to display it.
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
+ if (CD->isInheritingConstructor())
+ return true;
+
// We want to re-analyse the functions as top level in the following cases:
// - The 'init' methods should be reanalyzed because
// ObjCNonNilReturnValueChecker assumes that '[super init]' never returns
diff --git a/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
index f4f06e32cd1d..eb6014a0629d 100644
--- a/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
@@ -10,8 +10,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "clang/StaticAnalyzer/Frontend/AnalyzerHelpFlags.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -24,53 +25,36 @@
using namespace clang;
using namespace ento;
-std::unique_ptr<CheckerManager> ento::createCheckerManager(
- ASTContext &context,
- AnalyzerOptions &opts,
- ArrayRef<std::string> plugins,
- ArrayRef<std::function<void(CheckerRegistry &)>> checkerRegistrationFns,
- DiagnosticsEngine &diags) {
- auto checkerMgr = std::make_unique<CheckerManager>(context, opts);
-
- CheckerRegistry allCheckers(plugins, diags, opts, context.getLangOpts(),
- checkerRegistrationFns);
-
- allCheckers.initializeManager(*checkerMgr);
- allCheckers.validateCheckerOptions();
- checkerMgr->finishedCheckerRegistration();
-
- return checkerMgr;
-}
-
-void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins,
- AnalyzerOptions &anopts,
- DiagnosticsEngine &diags,
- const LangOptions &langOpts) {
+void ento::printCheckerHelp(raw_ostream &out, CompilerInstance &CI) {
out << "OVERVIEW: Clang Static Analyzer Checkers List\n\n";
out << "USAGE: -analyzer-checker <CHECKER or PACKAGE,...>\n\n";
- CheckerRegistry(plugins, diags, anopts, langOpts)
- .printCheckerWithDescList(out);
+ auto CheckerMgr = std::make_unique<CheckerManager>(
+ *CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
+ CI.getFrontendOpts().Plugins);
+
+ CheckerMgr->getCheckerRegistryData().printCheckerWithDescList(
+ *CI.getAnalyzerOpts(), out);
}
-void ento::printEnabledCheckerList(raw_ostream &out,
- ArrayRef<std::string> plugins,
- AnalyzerOptions &anopts,
- DiagnosticsEngine &diags,
- const LangOptions &langOpts) {
+void ento::printEnabledCheckerList(raw_ostream &out, CompilerInstance &CI) {
out << "OVERVIEW: Clang Static Analyzer Enabled Checkers List\n\n";
- CheckerRegistry(plugins, diags, anopts, langOpts)
- .printEnabledCheckerList(out);
+ auto CheckerMgr = std::make_unique<CheckerManager>(
+ *CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
+ CI.getFrontendOpts().Plugins);
+
+ CheckerMgr->getCheckerRegistryData().printEnabledCheckerList(out);
}
-void ento::printCheckerConfigList(raw_ostream &OS,
- ArrayRef<std::string> plugins,
- AnalyzerOptions &opts,
- DiagnosticsEngine &diags,
- const LangOptions &LangOpts) {
- CheckerRegistry(plugins, diags, opts, LangOpts)
- .printCheckerOptionList(OS);
+void ento::printCheckerConfigList(raw_ostream &out, CompilerInstance &CI) {
+
+ auto CheckerMgr = std::make_unique<CheckerManager>(
+ *CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
+ CI.getFrontendOpts().Plugins);
+
+ CheckerMgr->getCheckerRegistryData().printCheckerOptionList(
+ *CI.getAnalyzerOpts(), out);
}
void ento::printAnalyzerConfigList(raw_ostream &out) {
diff --git a/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp b/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
index f5c05281adab..528284ca8985 100644
--- a/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
@@ -25,9 +25,12 @@
using namespace clang;
using namespace ento;
+using namespace checker_registry;
using llvm::sys::DynamicLibrary;
-using RegisterCheckersFn = void (*)(CheckerRegistry &);
+//===----------------------------------------------------------------------===//
+// Utilities.
+//===----------------------------------------------------------------------===//
static bool isCompatibleAPIVersion(const char *VersionString) {
// If the version string is null, its not an analyzer plugin.
@@ -39,80 +42,17 @@ static bool isCompatibleAPIVersion(const char *VersionString) {
return strcmp(VersionString, CLANG_ANALYZER_API_VERSION_STRING) == 0;
}
-namespace {
-template <class T> struct FullNameLT {
- bool operator()(const T &Lhs, const T &Rhs) {
- return Lhs.FullName < Rhs.FullName;
- }
-};
-
-using PackageNameLT = FullNameLT<CheckerRegistry::PackageInfo>;
-using CheckerNameLT = FullNameLT<CheckerRegistry::CheckerInfo>;
-} // end of anonymous namespace
-
-template <class CheckerOrPackageInfoList>
-static
- typename std::conditional<std::is_const<CheckerOrPackageInfoList>::value,
- typename CheckerOrPackageInfoList::const_iterator,
- typename CheckerOrPackageInfoList::iterator>::type
- binaryFind(CheckerOrPackageInfoList &Collection, StringRef FullName) {
-
- using CheckerOrPackage = typename CheckerOrPackageInfoList::value_type;
- using CheckerOrPackageFullNameLT = FullNameLT<CheckerOrPackage>;
-
- assert(std::is_sorted(Collection.begin(), Collection.end(),
- CheckerOrPackageFullNameLT{}) &&
- "In order to efficiently gather checkers/packages, this function "
- "expects them to be already sorted!");
-
- return llvm::lower_bound(Collection, CheckerOrPackage(FullName),
- CheckerOrPackageFullNameLT{});
-}
-
static constexpr char PackageSeparator = '.';
-static bool isInPackage(const CheckerRegistry::CheckerInfo &Checker,
- StringRef PackageName) {
- // Does the checker's full name have the package as a prefix?
- if (!Checker.FullName.startswith(PackageName))
- return false;
-
- // Is the package actually just the name of a specific checker?
- if (Checker.FullName.size() == PackageName.size())
- return true;
-
- // Is the checker in the package (or a subpackage)?
- if (Checker.FullName[PackageName.size()] == PackageSeparator)
- return true;
-
- return false;
-}
-
-CheckerRegistry::CheckerInfoListRange
-CheckerRegistry::getMutableCheckersForCmdLineArg(StringRef CmdLineArg) {
- auto It = binaryFind(Checkers, CmdLineArg);
-
- if (!isInPackage(*It, CmdLineArg))
- return {Checkers.end(), Checkers.end()};
-
- // See how large the package is.
- // If the package doesn't exist, assume the option refers to a single
- // checker.
- size_t Size = 1;
- llvm::StringMap<size_t>::const_iterator PackageSize =
- PackageSizes.find(CmdLineArg);
-
- if (PackageSize != PackageSizes.end())
- Size = PackageSize->getValue();
-
- return {It, It + Size};
-}
+//===----------------------------------------------------------------------===//
+// Methods of CheckerRegistry.
+//===----------------------------------------------------------------------===//
CheckerRegistry::CheckerRegistry(
- ArrayRef<std::string> Plugins, DiagnosticsEngine &Diags,
- AnalyzerOptions &AnOpts, const LangOptions &LangOpts,
+ CheckerRegistryData &Data, ArrayRef<std::string> Plugins,
+ DiagnosticsEngine &Diags, AnalyzerOptions &AnOpts,
ArrayRef<std::function<void(CheckerRegistry &)>> CheckerRegistrationFns)
- : Diags(Diags), AnOpts(AnOpts), LangOpts(LangOpts) {
+ : Data(Data), Diags(Diags), AnOpts(AnOpts) {
// Register builtin checkers.
#define GET_CHECKERS
@@ -152,9 +92,10 @@ CheckerRegistry::CheckerRegistry(
continue;
}
+ using RegisterPluginCheckerFn = void (*)(CheckerRegistry &);
// Register its checkers.
- RegisterCheckersFn RegisterPluginCheckers =
- reinterpret_cast<RegisterCheckersFn>(
+ RegisterPluginCheckerFn RegisterPluginCheckers =
+ reinterpret_cast<RegisterPluginCheckerFn>(
Lib.getAddressOfSymbol("clang_registerCheckers"));
if (RegisterPluginCheckers)
RegisterPluginCheckers(*this);
@@ -171,38 +112,67 @@ CheckerRegistry::CheckerRegistry(
// FIXME: Alphabetical sort puts 'experimental' in the middle.
// Would it be better to name it '~experimental' or something else
// that's ASCIIbetically last?
- llvm::sort(Packages, PackageNameLT{});
- llvm::sort(Checkers, CheckerNameLT{});
+ llvm::sort(Data.Packages, checker_registry::PackageNameLT{});
+ llvm::sort(Data.Checkers, checker_registry::CheckerNameLT{});
#define GET_CHECKER_DEPENDENCIES
#define CHECKER_DEPENDENCY(FULLNAME, DEPENDENCY) \
addDependency(FULLNAME, DEPENDENCY);
+#define GET_CHECKER_WEAK_DEPENDENCIES
+
+#define CHECKER_WEAK_DEPENDENCY(FULLNAME, DEPENDENCY) \
+ addWeakDependency(FULLNAME, DEPENDENCY);
+
#define GET_CHECKER_OPTIONS
-#define CHECKER_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, DEVELOPMENT_STATUS, IS_HIDDEN) \
- addCheckerOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, DEVELOPMENT_STATUS, IS_HIDDEN);
+#define CHECKER_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, \
+ DEVELOPMENT_STATUS, IS_HIDDEN) \
+ addCheckerOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, \
+ DEVELOPMENT_STATUS, IS_HIDDEN);
#define GET_PACKAGE_OPTIONS
-#define PACKAGE_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, DEVELOPMENT_STATUS, IS_HIDDEN) \
- addPackageOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, DEVELOPMENT_STATUS, IS_HIDDEN);
+#define PACKAGE_OPTION(TYPE, FULLNAME, CMDFLAG, DESC, DEFAULT_VAL, \
+ DEVELOPMENT_STATUS, IS_HIDDEN) \
+ addPackageOption(TYPE, FULLNAME, CMDFLAG, DEFAULT_VAL, DESC, \
+ DEVELOPMENT_STATUS, IS_HIDDEN);
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER_DEPENDENCY
#undef GET_CHECKER_DEPENDENCIES
+#undef CHECKER_WEAK_DEPENDENCY
+#undef GET_CHECKER_WEAK_DEPENDENCIES
#undef CHECKER_OPTION
#undef GET_CHECKER_OPTIONS
#undef PACKAGE_OPTION
#undef GET_PACKAGE_OPTIONS
- resolveDependencies();
+ resolveDependencies<true>();
+ resolveDependencies<false>();
+
+#ifndef NDEBUG
+ for (auto &DepPair : Data.Dependencies) {
+ for (auto &WeakDepPair : Data.WeakDependencies) {
+ // Some assertions to enforce that strong dependencies are relations in
+ // between purely modeling checkers, and weak dependencies are about
+ // diagnostics.
+ assert(WeakDepPair != DepPair &&
+ "A checker cannot strong and weak depend on the same checker!");
+ assert(WeakDepPair.first != DepPair.second &&
+ "A strong dependency mustn't have weak dependencies!");
+ assert(WeakDepPair.second != DepPair.second &&
+ "A strong dependency mustn't be a weak dependency as well!");
+ }
+ }
+#endif
+
resolveCheckerAndPackageOptions();
// Parse '-analyzer-checker' and '-analyzer-disable-checker' options from the
// command line.
for (const std::pair<std::string, bool> &Opt : AnOpts.CheckersAndPackages) {
CheckerInfoListRange CheckerForCmdLineArg =
- getMutableCheckersForCmdLineArg(Opt.first);
+ Data.getMutableCheckersForCmdLineArg(Opt.first);
if (CheckerForCmdLineArg.begin() == CheckerForCmdLineArg.end()) {
Diags.Report(diag::err_unknown_analyzer_checker_or_package) << Opt.first;
@@ -214,109 +184,169 @@ CheckerRegistry::CheckerRegistry(
: StateFromCmdLine::State_Disabled;
}
}
+ validateCheckerOptions();
}
-/// Collects dependencies in \p ret, returns false on failure.
-static bool
-collectDependenciesImpl(const CheckerRegistry::ConstCheckerInfoList &Deps,
- const LangOptions &LO,
- CheckerRegistry::CheckerInfoSet &Ret);
-
-/// Collects dependenies in \p enabledCheckers. Return None on failure.
-LLVM_NODISCARD
-static llvm::Optional<CheckerRegistry::CheckerInfoSet>
-collectDependencies(const CheckerRegistry::CheckerInfo &checker,
- const LangOptions &LO) {
-
- CheckerRegistry::CheckerInfoSet Ret;
- // Add dependencies to the enabled checkers only if all of them can be
- // enabled.
- if (!collectDependenciesImpl(checker.Dependencies, LO, Ret))
- return None;
-
- return Ret;
-}
-
-static bool
-collectDependenciesImpl(const CheckerRegistry::ConstCheckerInfoList &Deps,
- const LangOptions &LO,
- CheckerRegistry::CheckerInfoSet &Ret) {
+//===----------------------------------------------------------------------===//
+// Dependency resolving.
+//===----------------------------------------------------------------------===//
- for (const CheckerRegistry::CheckerInfo *Dependency : Deps) {
+template <typename IsEnabledFn>
+static bool collectStrongDependencies(const ConstCheckerInfoList &Deps,
+ const CheckerManager &Mgr,
+ CheckerInfoSet &Ret,
+ IsEnabledFn IsEnabled);
+
+/// Collects weak dependencies in \p enabledData.Checkers.
+template <typename IsEnabledFn>
+static void collectWeakDependencies(const ConstCheckerInfoList &Deps,
+ const CheckerManager &Mgr,
+ CheckerInfoSet &Ret, IsEnabledFn IsEnabled);
+
+void CheckerRegistry::initializeRegistry(const CheckerManager &Mgr) {
+ // First, we calculate the list of enabled checkers as specified by the
+ // invocation. Weak dependencies will not enable their unspecified strong
+ // depenencies, but its only after resolving strong dependencies for all
+ // checkers when we know whether they will be enabled.
+ CheckerInfoSet Tmp;
+ auto IsEnabledFromCmdLine = [&](const CheckerInfo *Checker) {
+ return !Checker->isDisabled(Mgr);
+ };
+ for (const CheckerInfo &Checker : Data.Checkers) {
+ if (!Checker.isEnabled(Mgr))
+ continue;
- if (Dependency->isDisabled(LO))
- return false;
+ CheckerInfoSet Deps;
+ if (!collectStrongDependencies(Checker.Dependencies, Mgr, Deps,
+ IsEnabledFromCmdLine)) {
+ // If we failed to enable any of the dependencies, don't enable this
+ // checker.
+ continue;
+ }
- // Collect dependencies recursively.
- if (!collectDependenciesImpl(Dependency->Dependencies, LO, Ret))
- return false;
+ Tmp.insert(Deps.begin(), Deps.end());
- Ret.insert(Dependency);
+ // Enable the checker.
+ Tmp.insert(&Checker);
}
- return true;
-}
-
-CheckerRegistry::CheckerInfoSet CheckerRegistry::getEnabledCheckers() const {
-
- CheckerInfoSet EnabledCheckers;
-
- for (const CheckerInfo &Checker : Checkers) {
- if (!Checker.isEnabled(LangOpts))
+ // Calculate enabled checkers with the correct registration order. As this is
+ // done recursively, its arguably cheaper, but for sure less error prone to
+ // recalculate from scratch.
+ auto IsEnabled = [&](const CheckerInfo *Checker) {
+ return llvm::is_contained(Tmp, Checker);
+ };
+ for (const CheckerInfo &Checker : Data.Checkers) {
+ if (!Checker.isEnabled(Mgr))
continue;
- // Recursively enable its dependencies.
- llvm::Optional<CheckerInfoSet> Deps =
- collectDependencies(Checker, LangOpts);
+ CheckerInfoSet Deps;
- if (!Deps) {
+ collectWeakDependencies(Checker.WeakDependencies, Mgr, Deps, IsEnabled);
+
+ if (!collectStrongDependencies(Checker.Dependencies, Mgr, Deps,
+ IsEnabledFromCmdLine)) {
// If we failed to enable any of the dependencies, don't enable this
// checker.
continue;
}
// Note that set_union also preserves the order of insertion.
- EnabledCheckers.set_union(*Deps);
+ Data.EnabledCheckers.set_union(Deps);
+ Data.EnabledCheckers.insert(&Checker);
+ }
+}
- // Enable the checker.
- EnabledCheckers.insert(&Checker);
+template <typename IsEnabledFn>
+static bool collectStrongDependencies(const ConstCheckerInfoList &Deps,
+ const CheckerManager &Mgr,
+ CheckerInfoSet &Ret,
+ IsEnabledFn IsEnabled) {
+
+ for (const CheckerInfo *Dependency : Deps) {
+ if (!IsEnabled(Dependency))
+ return false;
+
+ // Collect dependencies recursively.
+ if (!collectStrongDependencies(Dependency->Dependencies, Mgr, Ret,
+ IsEnabled))
+ return false;
+ Ret.insert(Dependency);
}
- return EnabledCheckers;
+ return true;
+}
+
+template <typename IsEnabledFn>
+static void collectWeakDependencies(const ConstCheckerInfoList &WeakDeps,
+ const CheckerManager &Mgr,
+ CheckerInfoSet &Ret,
+ IsEnabledFn IsEnabled) {
+
+ for (const CheckerInfo *Dependency : WeakDeps) {
+ // Don't enable this checker if strong dependencies are unsatisfied, but
+ // assume that weak dependencies are transitive.
+ collectWeakDependencies(Dependency->WeakDependencies, Mgr, Ret, IsEnabled);
+
+ if (IsEnabled(Dependency) &&
+ collectStrongDependencies(Dependency->Dependencies, Mgr, Ret,
+ IsEnabled))
+ Ret.insert(Dependency);
+ }
}
-void CheckerRegistry::resolveDependencies() {
- for (const std::pair<StringRef, StringRef> &Entry : Dependencies) {
- auto CheckerIt = binaryFind(Checkers, Entry.first);
- assert(CheckerIt != Checkers.end() && CheckerIt->FullName == Entry.first &&
+template <bool IsWeak> void CheckerRegistry::resolveDependencies() {
+ for (const std::pair<StringRef, StringRef> &Entry :
+ (IsWeak ? Data.WeakDependencies : Data.Dependencies)) {
+
+ auto CheckerIt = binaryFind(Data.Checkers, Entry.first);
+ assert(CheckerIt != Data.Checkers.end() &&
+ CheckerIt->FullName == Entry.first &&
"Failed to find the checker while attempting to set up its "
"dependencies!");
- auto DependencyIt = binaryFind(Checkers, Entry.second);
- assert(DependencyIt != Checkers.end() &&
+ auto DependencyIt = binaryFind(Data.Checkers, Entry.second);
+ assert(DependencyIt != Data.Checkers.end() &&
DependencyIt->FullName == Entry.second &&
"Failed to find the dependency of a checker!");
- CheckerIt->Dependencies.emplace_back(&*DependencyIt);
+ // We do allow diagnostics from unit test/example dependency checkers.
+ assert((DependencyIt->FullName.startswith("test") ||
+ DependencyIt->FullName.startswith("example") || IsWeak ||
+ DependencyIt->IsHidden) &&
+ "Strong dependencies are modeling checkers, and as such "
+ "non-user facing! Mark them hidden in Checkers.td!");
+
+ if (IsWeak)
+ CheckerIt->WeakDependencies.emplace_back(&*DependencyIt);
+ else
+ CheckerIt->Dependencies.emplace_back(&*DependencyIt);
}
-
- Dependencies.clear();
}
void CheckerRegistry::addDependency(StringRef FullName, StringRef Dependency) {
- Dependencies.emplace_back(FullName, Dependency);
+ Data.Dependencies.emplace_back(FullName, Dependency);
}
+void CheckerRegistry::addWeakDependency(StringRef FullName,
+ StringRef Dependency) {
+ Data.WeakDependencies.emplace_back(FullName, Dependency);
+}
+
+//===----------------------------------------------------------------------===//
+// Checker option resolving and validating.
+//===----------------------------------------------------------------------===//
+
/// Insert the checker/package option to AnalyzerOptions' config table, and
/// validate it, if the user supplied it on the command line.
-static void insertAndValidate(StringRef FullName,
- const CheckerRegistry::CmdLineOption &Option,
+static void insertAndValidate(StringRef FullName, const CmdLineOption &Option,
AnalyzerOptions &AnOpts,
DiagnosticsEngine &Diags) {
std::string FullOption = (FullName + ":" + Option.OptionName).str();
- auto It = AnOpts.Config.insert({FullOption, Option.DefaultValStr});
+ auto It =
+ AnOpts.Config.insert({FullOption, std::string(Option.DefaultValStr)});
// Insertation was successful -- CmdLineOption's constructor will validate
// whether values received from plugins or TableGen files are correct.
@@ -337,7 +367,7 @@ static void insertAndValidate(StringRef FullName,
<< FullOption << "a boolean value";
}
- It.first->setValue(Option.DefaultValStr);
+ It.first->setValue(std::string(Option.DefaultValStr));
}
return;
}
@@ -351,17 +381,17 @@ static void insertAndValidate(StringRef FullName,
<< FullOption << "an integer value";
}
- It.first->setValue(Option.DefaultValStr);
+ It.first->setValue(std::string(Option.DefaultValStr));
}
return;
}
}
template <class T>
-static void
-insertOptionToCollection(StringRef FullName, T &Collection,
- const CheckerRegistry::CmdLineOption &Option,
- AnalyzerOptions &AnOpts, DiagnosticsEngine &Diags) {
+static void insertOptionToCollection(StringRef FullName, T &Collection,
+ const CmdLineOption &Option,
+ AnalyzerOptions &AnOpts,
+ DiagnosticsEngine &Diags) {
auto It = binaryFind(Collection, FullName);
assert(It != Collection.end() &&
"Failed to find the checker while attempting to add a command line "
@@ -374,22 +404,20 @@ insertOptionToCollection(StringRef FullName, T &Collection,
void CheckerRegistry::resolveCheckerAndPackageOptions() {
for (const std::pair<StringRef, CmdLineOption> &CheckerOptEntry :
- CheckerOptions) {
- insertOptionToCollection(CheckerOptEntry.first, Checkers,
+ Data.CheckerOptions) {
+ insertOptionToCollection(CheckerOptEntry.first, Data.Checkers,
CheckerOptEntry.second, AnOpts, Diags);
}
- CheckerOptions.clear();
for (const std::pair<StringRef, CmdLineOption> &PackageOptEntry :
- PackageOptions) {
- insertOptionToCollection(PackageOptEntry.first, Packages,
+ Data.PackageOptions) {
+ insertOptionToCollection(PackageOptEntry.first, Data.Packages,
PackageOptEntry.second, AnOpts, Diags);
}
- PackageOptions.clear();
}
void CheckerRegistry::addPackage(StringRef FullName) {
- Packages.emplace_back(PackageInfo(FullName));
+ Data.Packages.emplace_back(PackageInfo(FullName));
}
void CheckerRegistry::addPackageOption(StringRef OptionType,
@@ -399,22 +427,22 @@ void CheckerRegistry::addPackageOption(StringRef OptionType,
StringRef Description,
StringRef DevelopmentStatus,
bool IsHidden) {
- PackageOptions.emplace_back(
+ Data.PackageOptions.emplace_back(
PackageFullName, CmdLineOption{OptionType, OptionName, DefaultValStr,
Description, DevelopmentStatus, IsHidden});
}
-void CheckerRegistry::addChecker(InitializationFunction Rfn,
+void CheckerRegistry::addChecker(RegisterCheckerFn Rfn,
ShouldRegisterFunction Sfn, StringRef Name,
StringRef Desc, StringRef DocsUri,
bool IsHidden) {
- Checkers.emplace_back(Rfn, Sfn, Name, Desc, DocsUri, IsHidden);
+ Data.Checkers.emplace_back(Rfn, Sfn, Name, Desc, DocsUri, IsHidden);
// Record the presence of the checker in its packages.
StringRef PackageName, LeafName;
std::tie(PackageName, LeafName) = Name.rsplit(PackageSeparator);
while (!LeafName.empty()) {
- PackageSizes[PackageName] += 1;
+ Data.PackageSizes[PackageName] += 1;
std::tie(PackageName, LeafName) = PackageName.rsplit(PackageSeparator);
}
}
@@ -426,37 +454,33 @@ void CheckerRegistry::addCheckerOption(StringRef OptionType,
StringRef Description,
StringRef DevelopmentStatus,
bool IsHidden) {
- CheckerOptions.emplace_back(
+ Data.CheckerOptions.emplace_back(
CheckerFullName, CmdLineOption{OptionType, OptionName, DefaultValStr,
Description, DevelopmentStatus, IsHidden});
}
void CheckerRegistry::initializeManager(CheckerManager &CheckerMgr) const {
- // Collect checkers enabled by the options.
- CheckerInfoSet enabledCheckers = getEnabledCheckers();
-
// Initialize the CheckerManager with all enabled checkers.
- for (const auto *Checker : enabledCheckers) {
+ for (const auto *Checker : Data.EnabledCheckers) {
CheckerMgr.setCurrentCheckerName(CheckerNameRef(Checker->FullName));
Checker->Initialize(CheckerMgr);
}
}
-static void
-isOptionContainedIn(const CheckerRegistry::CmdLineOptionList &OptionList,
- StringRef SuppliedChecker, StringRef SuppliedOption,
- const AnalyzerOptions &AnOpts, DiagnosticsEngine &Diags) {
+static void isOptionContainedIn(const CmdLineOptionList &OptionList,
+ StringRef SuppliedChecker,
+ StringRef SuppliedOption,
+ const AnalyzerOptions &AnOpts,
+ DiagnosticsEngine &Diags) {
if (!AnOpts.ShouldEmitErrorsOnInvalidConfigValue)
return;
- using CmdLineOption = CheckerRegistry::CmdLineOption;
-
auto SameOptName = [SuppliedOption](const CmdLineOption &Opt) {
return Opt.OptionName == SuppliedOption;
};
- auto OptionIt = llvm::find_if(OptionList, SameOptName);
+ const auto *OptionIt = llvm::find_if(OptionList, SameOptName);
if (OptionIt == OptionList.end()) {
Diags.Report(diag::err_analyzer_checker_option_unknown)
@@ -485,16 +509,16 @@ void CheckerRegistry::validateCheckerOptions() const {
// it would return with an iterator to the first checker in the core, so we
// we really have to use find here, which uses operator==.
auto CheckerIt =
- llvm::find(Checkers, CheckerInfo(SuppliedCheckerOrPackage));
- if (CheckerIt != Checkers.end()) {
+ llvm::find(Data.Checkers, CheckerInfo(SuppliedCheckerOrPackage));
+ if (CheckerIt != Data.Checkers.end()) {
isOptionContainedIn(CheckerIt->CmdLineOptions, SuppliedCheckerOrPackage,
SuppliedOption, AnOpts, Diags);
continue;
}
- auto PackageIt =
- llvm::find(Packages, PackageInfo(SuppliedCheckerOrPackage));
- if (PackageIt != Packages.end()) {
+ const auto *PackageIt =
+ llvm::find(Data.Packages, PackageInfo(SuppliedCheckerOrPackage));
+ if (PackageIt != Data.Packages.end()) {
isOptionContainedIn(PackageIt->CmdLineOptions, SuppliedCheckerOrPackage,
SuppliedOption, AnOpts, Diags);
continue;
@@ -505,121 +529,3 @@ void CheckerRegistry::validateCheckerOptions() const {
}
}
-void CheckerRegistry::printCheckerWithDescList(raw_ostream &Out,
- size_t MaxNameChars) const {
- // FIXME: Print available packages.
-
- Out << "CHECKERS:\n";
-
- // Find the maximum option length.
- size_t OptionFieldWidth = 0;
- for (const auto &Checker : Checkers) {
- // Limit the amount of padding we are willing to give up for alignment.
- // Package.Name Description [Hidden]
- size_t NameLength = Checker.FullName.size();
- if (NameLength <= MaxNameChars)
- OptionFieldWidth = std::max(OptionFieldWidth, NameLength);
- }
-
- const size_t InitialPad = 2;
-
- auto Print = [=](llvm::raw_ostream &Out, const CheckerInfo &Checker,
- StringRef Description) {
- AnalyzerOptions::printFormattedEntry(Out, {Checker.FullName, Description},
- InitialPad, OptionFieldWidth);
- Out << '\n';
- };
-
- for (const auto &Checker : Checkers) {
- // The order of this if branches is significant, we wouldn't like to display
- // developer checkers even in the alpha output. For example,
- // alpha.cplusplus.IteratorModeling is a modeling checker, hence it's hidden
- // by default, and users (even when the user is a developer of an alpha
- // checker) shouldn't normally tinker with whether they should be enabled.
-
- if (Checker.IsHidden) {
- if (AnOpts.ShowCheckerHelpDeveloper)
- Print(Out, Checker, Checker.Desc);
- continue;
- }
-
- if (Checker.FullName.startswith("alpha")) {
- if (AnOpts.ShowCheckerHelpAlpha)
- Print(Out, Checker,
- ("(Enable only for development!) " + Checker.Desc).str());
- continue;
- }
-
- if (AnOpts.ShowCheckerHelp)
- Print(Out, Checker, Checker.Desc);
- }
-}
-
-void CheckerRegistry::printEnabledCheckerList(raw_ostream &Out) const {
- // Collect checkers enabled by the options.
- CheckerInfoSet EnabledCheckers = getEnabledCheckers();
-
- for (const auto *i : EnabledCheckers)
- Out << i->FullName << '\n';
-}
-
-void CheckerRegistry::printCheckerOptionList(raw_ostream &Out) const {
- Out << "OVERVIEW: Clang Static Analyzer Checker and Package Option List\n\n";
- Out << "USAGE: -analyzer-config <OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
- Out << " -analyzer-config OPTION1=VALUE, -analyzer-config "
- "OPTION2=VALUE, ...\n\n";
- Out << "OPTIONS:\n\n";
-
- std::multimap<StringRef, const CmdLineOption &> OptionMap;
-
- for (const CheckerInfo &Checker : Checkers) {
- for (const CmdLineOption &Option : Checker.CmdLineOptions) {
- OptionMap.insert({Checker.FullName, Option});
- }
- }
-
- for (const PackageInfo &Package : Packages) {
- for (const CmdLineOption &Option : Package.CmdLineOptions) {
- OptionMap.insert({Package.FullName, Option});
- }
- }
-
- auto Print = [] (llvm::raw_ostream &Out, StringRef FullOption, StringRef Desc) {
- AnalyzerOptions::printFormattedEntry(Out, {FullOption, Desc},
- /*InitialPad*/ 2,
- /*EntryWidth*/ 50,
- /*MinLineWidth*/ 90);
- Out << "\n\n";
- };
- for (const std::pair<const StringRef, const CmdLineOption &> &Entry :
- OptionMap) {
- const CmdLineOption &Option = Entry.second;
- std::string FullOption = (Entry.first + ":" + Option.OptionName).str();
-
- std::string Desc =
- ("(" + Option.OptionType + ") " + Option.Description + " (default: " +
- (Option.DefaultValStr.empty() ? "\"\"" : Option.DefaultValStr) + ")")
- .str();
-
- // The list of these if branches is significant, we wouldn't like to
- // display hidden alpha checker options for
- // -analyzer-checker-option-help-alpha.
-
- if (Option.IsHidden) {
- if (AnOpts.ShowCheckerOptionDeveloperList)
- Print(Out, FullOption, Desc);
- continue;
- }
-
- if (Option.DevelopmentStatus == "alpha" ||
- Entry.first.startswith("alpha")) {
- if (AnOpts.ShowCheckerOptionAlphaList)
- Print(Out, FullOption,
- llvm::Twine("(Enable only for development!) " + Desc).str());
- continue;
- }
-
- if (AnOpts.ShowCheckerOptionList)
- Print(Out, FullOption, Desc);
- }
-}
diff --git a/clang/lib/StaticAnalyzer/Frontend/CreateCheckerManager.cpp b/clang/lib/StaticAnalyzer/Frontend/CreateCheckerManager.cpp
new file mode 100644
index 000000000000..21a60785eb52
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Frontend/CreateCheckerManager.cpp
@@ -0,0 +1,50 @@
+//===- CheckerManager.h - Static Analyzer Checker Manager -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the Static Analyzer Checker Manager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h"
+#include <memory>
+
+namespace clang {
+namespace ento {
+
+CheckerManager::CheckerManager(
+ ASTContext &Context, AnalyzerOptions &AOptions, const Preprocessor &PP,
+ ArrayRef<std::string> plugins,
+ ArrayRef<std::function<void(CheckerRegistry &)>> checkerRegistrationFns)
+ : Context(&Context), LangOpts(Context.getLangOpts()), AOptions(AOptions),
+ PP(&PP), Diags(Context.getDiagnostics()),
+ RegistryData(std::make_unique<CheckerRegistryData>()) {
+ CheckerRegistry Registry(*RegistryData, plugins, Context.getDiagnostics(),
+ AOptions, checkerRegistrationFns);
+ Registry.initializeRegistry(*this);
+ Registry.initializeManager(*this);
+ finishedCheckerRegistration();
+}
+
+CheckerManager::CheckerManager(AnalyzerOptions &AOptions,
+ const LangOptions &LangOpts,
+ DiagnosticsEngine &Diags,
+ ArrayRef<std::string> plugins)
+ : LangOpts(LangOpts), AOptions(AOptions), Diags(Diags),
+ RegistryData(std::make_unique<CheckerRegistryData>()) {
+ CheckerRegistry Registry(*RegistryData, plugins, Diags, AOptions, {});
+ Registry.initializeRegistry(*this);
+}
+
+CheckerManager::~CheckerManager() {
+ for (const auto &CheckerDtor : CheckerDtors)
+ CheckerDtor();
+}
+
+} // namespace ento
+} // namespace clang
diff --git a/clang/lib/Testing/CommandLineArgs.cpp b/clang/lib/Testing/CommandLineArgs.cpp
new file mode 100644
index 000000000000..cd4d8c188da9
--- /dev/null
+++ b/clang/lib/Testing/CommandLineArgs.cpp
@@ -0,0 +1,70 @@
+//===--- CommandLineArgs.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Testing/CommandLineArgs.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+
+std::vector<std::string> getCommandLineArgsForTesting(TestLanguage Lang) {
+ std::vector<std::string> Args;
+ // Test with basic arguments.
+ switch (Lang) {
+ case Lang_C89:
+ Args = {"-x", "c", "-std=c89"};
+ break;
+ case Lang_C99:
+ Args = {"-x", "c", "-std=c99"};
+ break;
+ case Lang_CXX03:
+ Args = {"-std=c++03", "-frtti"};
+ break;
+ case Lang_CXX11:
+ Args = {"-std=c++11", "-frtti"};
+ break;
+ case Lang_CXX14:
+ Args = {"-std=c++14", "-frtti"};
+ break;
+ case Lang_CXX17:
+ Args = {"-std=c++17", "-frtti"};
+ break;
+ case Lang_CXX20:
+ Args = {"-std=c++20", "-frtti"};
+ break;
+ case Lang_OBJCXX:
+ Args = {"-x", "objective-c++", "-frtti"};
+ break;
+ case Lang_OpenCL:
+ llvm_unreachable("Not implemented yet!");
+ }
+ return Args;
+}
+
+StringRef getFilenameForTesting(TestLanguage Lang) {
+ switch (Lang) {
+ case Lang_C89:
+ case Lang_C99:
+ return "input.c";
+
+ case Lang_CXX03:
+ case Lang_CXX11:
+ case Lang_CXX14:
+ case Lang_CXX17:
+ case Lang_CXX20:
+ return "input.cc";
+
+ case Lang_OpenCL:
+ return "input.cl";
+
+ case Lang_OBJCXX:
+ return "input.mm";
+ }
+ llvm_unreachable("Unhandled TestLanguage enum");
+}
+
+} // end namespace clang
diff --git a/clang/lib/Tooling/ASTDiff/ASTDiff.cpp b/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
index 4d495228cb51..0821863adcc6 100644
--- a/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
+++ b/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
@@ -11,8 +11,9 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/ASTDiff/ASTDiff.h"
-
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/PriorityQueue.h"
@@ -116,12 +117,12 @@ public:
Impl(SyntaxTree *Parent, Stmt *N, ASTContext &AST);
template <class T>
Impl(SyntaxTree *Parent,
- typename std::enable_if<std::is_base_of<Stmt, T>::value, T>::type *Node,
+ std::enable_if_t<std::is_base_of<Stmt, T>::value, T> *Node,
ASTContext &AST)
: Impl(Parent, dyn_cast<Stmt>(Node), AST) {}
template <class T>
Impl(SyntaxTree *Parent,
- typename std::enable_if<std::is_base_of<Decl, T>::value, T>::type *Node,
+ std::enable_if_t<std::is_base_of<Decl, T>::value, T> *Node,
ASTContext &AST)
: Impl(Parent, dyn_cast<Decl>(Node), AST) {}
@@ -397,7 +398,7 @@ static const DeclContext *getEnclosingDeclContext(ASTContext &AST,
static std::string getInitializerValue(const CXXCtorInitializer *Init,
const PrintingPolicy &TypePP) {
if (Init->isAnyMemberInitializer())
- return Init->getAnyMember()->getName();
+ return std::string(Init->getAnyMember()->getName());
if (Init->isBaseInitializer())
return QualType(Init->getBaseClass(), 0).getAsString(TypePP);
if (Init->isDelegatingInitializer())
@@ -434,36 +435,36 @@ std::string SyntaxTree::Impl::getDeclValue(const Decl *D) const {
T->getTypeForDecl()->getCanonicalTypeInternal().getAsString(TypePP) +
";";
if (auto *U = dyn_cast<UsingDirectiveDecl>(D))
- return U->getNominatedNamespace()->getName();
+ return std::string(U->getNominatedNamespace()->getName());
if (auto *A = dyn_cast<AccessSpecDecl>(D)) {
CharSourceRange Range(A->getSourceRange(), false);
- return Lexer::getSourceText(Range, AST.getSourceManager(),
- AST.getLangOpts());
+ return std::string(
+ Lexer::getSourceText(Range, AST.getSourceManager(), AST.getLangOpts()));
}
return Value;
}
std::string SyntaxTree::Impl::getStmtValue(const Stmt *S) const {
if (auto *U = dyn_cast<UnaryOperator>(S))
- return UnaryOperator::getOpcodeStr(U->getOpcode());
+ return std::string(UnaryOperator::getOpcodeStr(U->getOpcode()));
if (auto *B = dyn_cast<BinaryOperator>(S))
- return B->getOpcodeStr();
+ return std::string(B->getOpcodeStr());
if (auto *M = dyn_cast<MemberExpr>(S))
return getRelativeName(M->getMemberDecl());
if (auto *I = dyn_cast<IntegerLiteral>(S)) {
SmallString<256> Str;
I->getValue().toString(Str, /*Radix=*/10, /*Signed=*/false);
- return Str.str();
+ return std::string(Str.str());
}
if (auto *F = dyn_cast<FloatingLiteral>(S)) {
SmallString<256> Str;
F->getValue().toString(Str);
- return Str.str();
+ return std::string(Str.str());
}
if (auto *D = dyn_cast<DeclRefExpr>(S))
return getRelativeName(D->getDecl(), getEnclosingDeclContext(AST, S));
if (auto *String = dyn_cast<StringLiteral>(S))
- return String->getString();
+ return std::string(String->getString());
if (auto *B = dyn_cast<CXXBoolLiteralExpr>(S))
return B->getValue() ? "true" : "false";
return "";
@@ -683,9 +684,7 @@ private:
}
};
-ast_type_traits::ASTNodeKind Node::getType() const {
- return ASTNode.getNodeKind();
-}
+ASTNodeKind Node::getType() const { return ASTNode.getNodeKind(); }
StringRef Node::getTypeLabel() const { return getType().asStringRef(); }
diff --git a/clang/lib/Tooling/AllTUsExecution.cpp b/clang/lib/Tooling/AllTUsExecution.cpp
index d85075f59607..7707c99c21d0 100644
--- a/clang/lib/Tooling/AllTUsExecution.cpp
+++ b/clang/lib/Tooling/AllTUsExecution.cpp
@@ -8,8 +8,9 @@
#include "clang/Tooling/AllTUsExecution.h"
#include "clang/Tooling/ToolExecutorPluginRegistry.h"
-#include "llvm/Support/Threading.h"
+#include "llvm/Support/Regex.h"
#include "llvm/Support/ThreadPool.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
namespace clang {
@@ -114,8 +115,7 @@ llvm::Error AllTUsToolExecutor::execute(
auto &Action = Actions.front();
{
- llvm::ThreadPool Pool(ThreadCount == 0 ? llvm::hardware_concurrency()
- : ThreadCount);
+ llvm::ThreadPool Pool(llvm::hardware_concurrency(ThreadCount));
for (std::string File : Files) {
Pool.async(
[&](std::string Path) {
diff --git a/clang/lib/Tooling/ArgumentsAdjusters.cpp b/clang/lib/Tooling/ArgumentsAdjusters.cpp
index a609e4ed2469..a857b57fbf7b 100644
--- a/clang/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/clang/lib/Tooling/ArgumentsAdjusters.cpp
@@ -26,7 +26,7 @@ ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
return [](const CommandLineArguments &Args, StringRef /*unused*/) {
CommandLineArguments AdjustedArgs;
bool HasSyntaxOnly = false;
- const std::vector<llvm::StringRef> OutputCommands = {
+ constexpr llvm::StringRef OutputCommands[] = {
// FIXME: Add other options that generate output.
"-save-temps",
"--save-temps",
@@ -42,6 +42,12 @@ ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
if (!Arg.startswith("-fcolor-diagnostics") &&
!Arg.startswith("-fdiagnostics-color"))
AdjustedArgs.push_back(Args[i]);
+ // If we strip a color option, make sure we strip any preceeding `-Xclang`
+ // option as well.
+ // FIXME: This should be added to most argument adjusters!
+ else if (!AdjustedArgs.empty() && AdjustedArgs.back() == "-Xclang")
+ AdjustedArgs.pop_back();
+
if (Arg == "-fsyntax-only")
HasSyntaxOnly = true;
}
@@ -92,7 +98,8 @@ ArgumentsAdjuster getClangStripDependencyFileAdjuster() {
StringRef Arg = Args[i];
// All dependency-file options begin with -M. These include -MM,
// -MF, -MG, -MP, -MT, -MQ, -MD, and -MMD.
- if (!Arg.startswith("-M")) {
+ if (!Arg.startswith("-M") && !Arg.startswith("/showIncludes") &&
+ !Arg.startswith("-showIncludes")) {
AdjustedArgs.push_back(Args[i]);
continue;
}
diff --git a/clang/lib/Tooling/CompilationDatabase.cpp b/clang/lib/Tooling/CompilationDatabase.cpp
index c453e8d7df19..2b4c26dab96f 100644
--- a/clang/lib/Tooling/CompilationDatabase.cpp
+++ b/clang/lib/Tooling/CompilationDatabase.cpp
@@ -64,16 +64,14 @@ std::unique_ptr<CompilationDatabase>
CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
std::string &ErrorMessage) {
llvm::raw_string_ostream ErrorStream(ErrorMessage);
- for (CompilationDatabasePluginRegistry::iterator
- It = CompilationDatabasePluginRegistry::begin(),
- Ie = CompilationDatabasePluginRegistry::end();
- It != Ie; ++It) {
+ for (const CompilationDatabasePluginRegistry::entry &Database :
+ CompilationDatabasePluginRegistry::entries()) {
std::string DatabaseErrorMessage;
- std::unique_ptr<CompilationDatabasePlugin> Plugin(It->instantiate());
+ std::unique_ptr<CompilationDatabasePlugin> Plugin(Database.instantiate());
if (std::unique_ptr<CompilationDatabase> DB =
Plugin->loadFromDirectory(BuildDirectory, DatabaseErrorMessage))
return DB;
- ErrorStream << It->getName() << ": " << DatabaseErrorMessage << "\n";
+ ErrorStream << Database.getName() << ": " << DatabaseErrorMessage << "\n";
}
return nullptr;
}
@@ -164,7 +162,7 @@ private:
case driver::Action::InputClass:
if (Collect) {
const auto *IA = cast<driver::InputAction>(A);
- Inputs.push_back(IA->getInputArg().getSpelling());
+ Inputs.push_back(std::string(IA->getInputArg().getSpelling()));
}
break;
@@ -233,7 +231,7 @@ std::string GetClangToolCommand() {
SmallString<128> ClangToolPath;
ClangToolPath = llvm::sys::path::parent_path(ClangExecutable);
llvm::sys::path::append(ClangToolPath, "clang-tool");
- return ClangToolPath.str();
+ return std::string(ClangToolPath.str());
}
} // namespace
@@ -368,8 +366,14 @@ FixedCompilationDatabase::loadFromFile(StringRef Path, std::string &ErrorMsg) {
ErrorMsg = "Error while opening fixed database: " + Result.message();
return nullptr;
}
- std::vector<std::string> Args{llvm::line_iterator(**File),
- llvm::line_iterator()};
+ std::vector<std::string> Args;
+ for (llvm::StringRef Line :
+ llvm::make_range(llvm::line_iterator(**File), llvm::line_iterator())) {
+ // Stray whitespace is almost certainly unintended.
+ Line = Line.trim();
+ if (!Line.empty())
+ Args.push_back(Line.str());
+ }
return std::make_unique<FixedCompilationDatabase>(
llvm::sys::path::parent_path(Path), std::move(Args));
}
@@ -387,8 +391,8 @@ FixedCompilationDatabase(Twine Directory, ArrayRef<std::string> CommandLine) {
std::vector<CompileCommand>
FixedCompilationDatabase::getCompileCommands(StringRef FilePath) const {
std::vector<CompileCommand> Result(CompileCommands);
- Result[0].CommandLine.push_back(FilePath);
- Result[0].Filename = FilePath;
+ Result[0].CommandLine.push_back(std::string(FilePath));
+ Result[0].Filename = std::string(FilePath);
return Result;
}
diff --git a/clang/lib/Tooling/Core/Diagnostic.cpp b/clang/lib/Tooling/Core/Diagnostic.cpp
index 235bd7fc1433..b0c4ea8c5608 100644
--- a/clang/lib/Tooling/Core/Diagnostic.cpp
+++ b/clang/lib/Tooling/Core/Diagnostic.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Core/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/STLExtras.h"
@@ -25,7 +26,7 @@ DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message,
SourceLocation Loc)
: Message(Message), FileOffset(0) {
assert(Loc.isValid() && Loc.isFileID());
- FilePath = Sources.getFilename(Loc);
+ FilePath = std::string(Sources.getFilename(Loc));
// Don't store offset in the scratch space. It doesn't tell anything to the
// user. Moreover, it depends on the history of macro expansions and thus
@@ -34,6 +35,16 @@ DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message,
FileOffset = Sources.getFileOffset(Loc);
}
+FileByteRange::FileByteRange(
+ const SourceManager &Sources, CharSourceRange Range)
+ : FileOffset(0), Length(0) {
+ FilePath = std::string(Sources.getFilename(Range.getBegin()));
+ if (!FilePath.empty()) {
+ FileOffset = Sources.getFileOffset(Range.getBegin());
+ Length = Sources.getFileOffset(Range.getEnd()) - FileOffset;
+ }
+}
+
Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
Diagnostic::Level DiagLevel, StringRef BuildDirectory)
: DiagnosticName(DiagnosticName), DiagLevel(DiagLevel),
@@ -42,9 +53,10 @@ Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
const DiagnosticMessage &Message,
const SmallVector<DiagnosticMessage, 1> &Notes,
- Level DiagLevel, llvm::StringRef BuildDirectory)
+ Level DiagLevel, llvm::StringRef BuildDirectory,
+ const SmallVector<FileByteRange, 1> &Ranges)
: DiagnosticName(DiagnosticName), Message(Message), Notes(Notes),
- DiagLevel(DiagLevel), BuildDirectory(BuildDirectory) {}
+ DiagLevel(DiagLevel), BuildDirectory(BuildDirectory), Ranges(Ranges) {}
const llvm::StringMap<Replacements> *selectFirstFix(const Diagnostic& D) {
if (!D.Message.Fix.empty())
diff --git a/clang/lib/Tooling/Core/Lookup.cpp b/clang/lib/Tooling/Core/Lookup.cpp
index 735a5df5ed21..712724a268fb 100644
--- a/clang/lib/Tooling/Core/Lookup.cpp
+++ b/clang/lib/Tooling/Core/Lookup.cpp
@@ -11,10 +11,12 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Core/Lookup.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclarationName.h"
#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallVector.h"
using namespace clang;
using namespace clang::tooling;
@@ -129,7 +131,7 @@ static std::string disambiguateSpellingInScope(StringRef Spelling,
assert(QName.startswith("::"));
assert(QName.endswith(Spelling));
if (Spelling.startswith("::"))
- return Spelling;
+ return std::string(Spelling);
auto UnspelledSpecifier = QName.drop_back(Spelling.size());
llvm::SmallVector<llvm::StringRef, 2> UnspelledScopes;
@@ -168,7 +170,7 @@ static std::string disambiguateSpellingInScope(StringRef Spelling,
};
// Add more qualifiers until the spelling is not ambiguous.
- std::string Disambiguated = Spelling;
+ std::string Disambiguated = std::string(Spelling);
while (IsAmbiguousSpelling(Disambiguated)) {
if (UnspelledScopes.empty()) {
Disambiguated = "::" + Disambiguated;
@@ -206,8 +208,9 @@ std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
!usingFromDifferentCanonicalNamespace(FromDecl->getDeclContext(),
UseContext)) {
auto Pos = ReplacementString.rfind("::");
- return Pos != StringRef::npos ? ReplacementString.substr(Pos + 2)
- : ReplacementString;
+ return std::string(Pos != StringRef::npos
+ ? ReplacementString.substr(Pos + 2)
+ : ReplacementString);
}
// We did not match this because of a using statement, so we will need to
// figure out how good a namespace match we have with our destination type.
diff --git a/clang/lib/Tooling/Core/Replacement.cpp b/clang/lib/Tooling/Core/Replacement.cpp
index 9ed03655bf2c..ab8e20539559 100644
--- a/clang/lib/Tooling/Core/Replacement.cpp
+++ b/clang/lib/Tooling/Core/Replacement.cpp
@@ -46,8 +46,8 @@ Replacement::Replacement() : FilePath(InvalidLocation) {}
Replacement::Replacement(StringRef FilePath, unsigned Offset, unsigned Length,
StringRef ReplacementText)
- : FilePath(FilePath), ReplacementRange(Offset, Length),
- ReplacementText(ReplacementText) {}
+ : FilePath(std::string(FilePath)), ReplacementRange(Offset, Length),
+ ReplacementText(std::string(ReplacementText)) {}
Replacement::Replacement(const SourceManager &Sources, SourceLocation Start,
unsigned Length, StringRef ReplacementText) {
@@ -123,9 +123,9 @@ void Replacement::setFromSourceLocation(const SourceManager &Sources,
const std::pair<FileID, unsigned> DecomposedLocation =
Sources.getDecomposedLoc(Start);
const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
- this->FilePath = Entry ? Entry->getName() : InvalidLocation;
+ this->FilePath = std::string(Entry ? Entry->getName() : InvalidLocation);
this->ReplacementRange = Range(DecomposedLocation.second, Length);
- this->ReplacementText = ReplacementText;
+ this->ReplacementText = std::string(ReplacementText);
}
// FIXME: This should go into the Lexer, but we need to figure out how
@@ -367,8 +367,8 @@ class MergedReplacement {
public:
MergedReplacement(const Replacement &R, bool MergeSecond, int D)
: MergeSecond(MergeSecond), Delta(D), FilePath(R.getFilePath()),
- Offset(R.getOffset() + (MergeSecond ? 0 : Delta)), Length(R.getLength()),
- Text(R.getReplacementText()) {
+ Offset(R.getOffset() + (MergeSecond ? 0 : Delta)),
+ Length(R.getLength()), Text(std::string(R.getReplacementText())) {
Delta += MergeSecond ? 0 : Text.size() - Length;
DeltaFirst = MergeSecond ? Text.size() - Length : 0;
}
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index b4d5a29ca695..b1b87e7fa573 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -106,7 +106,8 @@ DependencyScanningFilesystemSharedCache::
// sharding gives a performance edge by reducing the lock contention.
// FIXME: A better heuristic might also consider the OS to account for
// the different cost of lock contention on different OSes.
- NumShards = std::max(2u, llvm::hardware_concurrency() / 4);
+ NumShards =
+ std::max(2u, llvm::hardware_concurrency().compute_thread_count() / 4);
CacheShards = std::make_unique<CacheShard[]>(NumShards);
}
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
index f643c538f8f9..16040c2f4626 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
@@ -8,24 +8,25 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningTool.h"
#include "clang/Frontend/Utils.h"
-#include "llvm/Support/JSON.h"
-
-static llvm::json::Array toJSONSorted(const llvm::StringSet<> &Set) {
- std::vector<llvm::StringRef> Strings;
- for (auto &&I : Set)
- Strings.push_back(I.getKey());
- std::sort(Strings.begin(), Strings.end());
- return llvm::json::Array(Strings);
-}
namespace clang{
namespace tooling{
namespace dependencies{
+std::vector<std::string> FullDependencies::getAdditionalCommandLine(
+ std::function<StringRef(ClangModuleDep)> LookupPCMPath,
+ std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const {
+ std::vector<std::string> Ret = AdditionalNonPathCommandLine;
+
+ dependencies::detail::appendCommonModuleArguments(
+ ClangModuleDeps, LookupPCMPath, LookupModuleDeps, Ret);
+
+ return Ret;
+}
+
DependencyScanningTool::DependencyScanningTool(
DependencyScanningService &Service)
- : Format(Service.getFormat()), Worker(Service) {
-}
+ : Worker(Service) {}
llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
const tooling::CompilationDatabase &Compilations, StringRef CWD) {
@@ -36,7 +37,7 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
StringRef File) override {
if (!this->Opts)
this->Opts = std::make_unique<DependencyOutputOptions>(Opts);
- Dependencies.push_back(File);
+ Dependencies.push_back(std::string(File));
}
void handleModuleDependency(ModuleDeps MD) override {
@@ -75,11 +76,36 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
std::vector<std::string> Dependencies;
};
+ // We expect a single command here because if a source file occurs multiple
+ // times in the original CDB, then `computeDependencies` would run the
+ // `DependencyScanningAction` once for every time the input occured in the
+ // CDB. Instead we split up the CDB into single command chunks to avoid this
+ // behavior.
+ assert(Compilations.getAllCompileCommands().size() == 1 &&
+ "Expected a compilation database with a single command!");
+ std::string Input = Compilations.getAllCompileCommands().front().Filename;
+
+ MakeDependencyPrinterConsumer Consumer;
+ auto Result = Worker.computeDependencies(Input, CWD, Compilations, Consumer);
+ if (Result)
+ return std::move(Result);
+ std::string Output;
+ Consumer.printDependencies(Output);
+ return Output;
+}
+
+llvm::Expected<FullDependenciesResult>
+DependencyScanningTool::getFullDependencies(
+ const tooling::CompilationDatabase &Compilations, StringRef CWD,
+ const llvm::StringSet<> &AlreadySeen) {
class FullDependencyPrinterConsumer : public DependencyConsumer {
public:
+ FullDependencyPrinterConsumer(const llvm::StringSet<> &AlreadySeen)
+ : AlreadySeen(AlreadySeen) {}
+
void handleFileDependency(const DependencyOutputOptions &Opts,
StringRef File) override {
- Dependencies.push_back(File);
+ Dependencies.push_back(std::string(File));
}
void handleModuleDependency(ModuleDeps MD) override {
@@ -90,55 +116,41 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
ContextHash = std::move(Hash);
}
- void printDependencies(std::string &S, StringRef MainFile) {
- // Sort the modules by name to get a deterministic order.
- std::vector<StringRef> Modules;
- for (auto &&Dep : ClangModuleDeps)
- Modules.push_back(Dep.first);
- std::sort(Modules.begin(), Modules.end());
+ FullDependenciesResult getFullDependencies() const {
+ FullDependencies FD;
- llvm::raw_string_ostream OS(S);
+ FD.ContextHash = std::move(ContextHash);
- using namespace llvm::json;
+ FD.FileDeps.assign(Dependencies.begin(), Dependencies.end());
- Array Imports;
- for (auto &&ModName : Modules) {
- auto &MD = ClangModuleDeps[ModName];
+ for (auto &&M : ClangModuleDeps) {
+ auto &MD = M.second;
if (MD.ImportedByMainFile)
- Imports.push_back(MD.ModuleName);
+ FD.ClangModuleDeps.push_back({MD.ModuleName, ContextHash});
}
- Array Mods;
- for (auto &&ModName : Modules) {
- auto &MD = ClangModuleDeps[ModName];
- Object Mod{
- {"name", MD.ModuleName},
- {"file-deps", toJSONSorted(MD.FileDeps)},
- {"clang-module-deps", toJSONSorted(MD.ClangModuleDeps)},
- {"clang-modulemap-file", MD.ClangModuleMapFile},
- };
- Mods.push_back(std::move(Mod));
- }
+ FullDependenciesResult FDR;
- Object O{
- {"input-file", MainFile},
- {"clang-context-hash", ContextHash},
- {"file-deps", Dependencies},
- {"clang-module-deps", std::move(Imports)},
- {"clang-modules", std::move(Mods)},
- };
+ for (auto &&M : ClangModuleDeps) {
+ // TODO: Avoid handleModuleDependency even being called for modules
+ // we've already seen.
+ if (AlreadySeen.count(M.first))
+ continue;
+ FDR.DiscoveredModules.push_back(std::move(M.second));
+ }
- S = llvm::formatv("{0:2},\n", Value(std::move(O))).str();
- return;
+ FDR.FullDeps = std::move(FD);
+ return FDR;
}
private:
std::vector<std::string> Dependencies;
std::unordered_map<std::string, ModuleDeps> ClangModuleDeps;
std::string ContextHash;
+ std::vector<std::string> OutputPaths;
+ const llvm::StringSet<> &AlreadySeen;
};
-
// We expect a single command here because if a source file occurs multiple
// times in the original CDB, then `computeDependencies` would run the
// `DependencyScanningAction` once for every time the input occured in the
@@ -147,26 +159,13 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
assert(Compilations.getAllCompileCommands().size() == 1 &&
"Expected a compilation database with a single command!");
std::string Input = Compilations.getAllCompileCommands().front().Filename;
-
- if (Format == ScanningOutputFormat::Make) {
- MakeDependencyPrinterConsumer Consumer;
- auto Result =
- Worker.computeDependencies(Input, CWD, Compilations, Consumer);
- if (Result)
- return std::move(Result);
- std::string Output;
- Consumer.printDependencies(Output);
- return Output;
- } else {
- FullDependencyPrinterConsumer Consumer;
- auto Result =
- Worker.computeDependencies(Input, CWD, Compilations, Consumer);
- if (Result)
- return std::move(Result);
- std::string Output;
- Consumer.printDependencies(Output, Input);
- return Output;
- }
+
+ FullDependencyPrinterConsumer Consumer(AlreadySeen);
+ llvm::Error Result =
+ Worker.computeDependencies(Input, CWD, Compilations, Consumer);
+ if (Result)
+ return std::move(Result);
+ return Consumer.getFullDependencies();
}
} // end namespace dependencies
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index edf2cf8bd70f..32bbc578d2db 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -118,7 +118,7 @@ public:
.ExcludedConditionalDirectiveSkipMappings = PPSkipMappings;
}
- FileMgr->getFileSystemOpts().WorkingDir = WorkingDirectory;
+ FileMgr->getFileSystemOpts().WorkingDir = std::string(WorkingDirectory);
Compiler.setFileManager(FileMgr);
Compiler.createSourceManager(*FileMgr);
@@ -142,12 +142,17 @@ public:
Consumer));
break;
case ScanningOutputFormat::Full:
- Compiler.addDependencyCollector(
- std::make_shared<ModuleDepCollector>(Compiler, Consumer));
+ Compiler.addDependencyCollector(std::make_shared<ModuleDepCollector>(
+ std::move(Opts), Compiler, Consumer));
break;
}
- Consumer.handleContextHash(Compiler.getInvocation().getModuleHash());
+ // Consider different header search and diagnostic options to create
+ // different modules. This avoids the unsound aliasing of module PCMs.
+ //
+ // TODO: Implement diagnostic bucketing and header search pruning to reduce
+ // the impact of strict context hashing.
+ Compiler.getHeaderSearchOpts().ModulesStrictContextHash = true;
auto Action = std::make_unique<PreprocessOnlyAction>();
const bool Result = Compiler.ExecuteAction(*Action);
diff --git a/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index 422940047f2d..4f6eff799f22 100644
--- a/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -17,12 +17,60 @@ using namespace clang;
using namespace tooling;
using namespace dependencies;
+std::vector<std::string> ModuleDeps::getFullCommandLine(
+ std::function<StringRef(ClangModuleDep)> LookupPCMPath,
+ std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const {
+ std::vector<std::string> Ret = NonPathCommandLine;
+
+ // TODO: Build full command line. That also means capturing the original
+ // command line into NonPathCommandLine.
+
+ dependencies::detail::appendCommonModuleArguments(
+ ClangModuleDeps, LookupPCMPath, LookupModuleDeps, Ret);
+
+ return Ret;
+}
+
+void dependencies::detail::appendCommonModuleArguments(
+ llvm::ArrayRef<ClangModuleDep> Modules,
+ std::function<StringRef(ClangModuleDep)> LookupPCMPath,
+ std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps,
+ std::vector<std::string> &Result) {
+ llvm::StringSet<> AlreadyAdded;
+
+ std::function<void(llvm::ArrayRef<ClangModuleDep>)> AddArgs =
+ [&](llvm::ArrayRef<ClangModuleDep> Modules) {
+ for (const ClangModuleDep &CMD : Modules) {
+ if (!AlreadyAdded.insert(CMD.ModuleName + CMD.ContextHash).second)
+ continue;
+ const ModuleDeps &M = LookupModuleDeps(CMD);
+ // Depth first traversal.
+ AddArgs(M.ClangModuleDeps);
+ Result.push_back(("-fmodule-file=" + LookupPCMPath(CMD)).str());
+ if (!M.ClangModuleMapFile.empty()) {
+ Result.push_back("-fmodule-map-file=" + M.ClangModuleMapFile);
+ }
+ }
+ };
+
+ Result.push_back("-fno-implicit-modules");
+ Result.push_back("-fno-implicit-module-maps");
+ AddArgs(Modules);
+}
+
void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) {
if (Reason != PPCallbacks::EnterFile)
return;
+
+ // This has to be delayed as the context hash can change at the start of
+ // `CompilerInstance::ExecuteAction`.
+ if (MDC.ContextHash.empty()) {
+ MDC.ContextHash = Instance.getInvocation().getModuleHash();
+ MDC.Consumer.handleContextHash(MDC.ContextHash);
+ }
SourceManager &SM = Instance.getSourceManager();
@@ -37,7 +85,7 @@ void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
StringRef FileName =
llvm::sys::path::remove_leading_dotslash(File->getName());
- MDC.MainDeps.push_back(FileName);
+ MDC.MainDeps.push_back(std::string(FileName));
}
void ModuleDepCollectorPP::InclusionDirective(
@@ -48,9 +96,18 @@ void ModuleDepCollectorPP::InclusionDirective(
if (!File && !Imported) {
// This is a non-modular include that HeaderSearch failed to find. Add it
// here as `FileChanged` will never see it.
- MDC.MainDeps.push_back(FileName);
+ MDC.MainDeps.push_back(std::string(FileName));
}
+ handleImport(Imported);
+}
+void ModuleDepCollectorPP::moduleImport(SourceLocation ImportLoc,
+ ModuleIdPath Path,
+ const Module *Imported) {
+ handleImport(Imported);
+}
+
+void ModuleDepCollectorPP::handleImport(const Module *Imported) {
if (!Imported)
return;
@@ -61,8 +118,8 @@ void ModuleDepCollectorPP::InclusionDirective(
void ModuleDepCollectorPP::EndOfMainFile() {
FileID MainFileID = Instance.getSourceManager().getMainFileID();
- MDC.MainFile =
- Instance.getSourceManager().getFileEntryForID(MainFileID)->getName();
+ MDC.MainFile = std::string(
+ Instance.getSourceManager().getFileEntryForID(MainFileID)->getName());
for (const Module *M : DirectDeps) {
handleTopLevelModule(M);
@@ -71,9 +128,8 @@ void ModuleDepCollectorPP::EndOfMainFile() {
for (auto &&I : MDC.Deps)
MDC.Consumer.handleModuleDependency(I.second);
- DependencyOutputOptions Opts;
for (auto &&I : MDC.MainDeps)
- MDC.Consumer.handleFileDependency(Opts, I);
+ MDC.Consumer.handleFileDependency(*MDC.Opts, I);
}
void ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
@@ -92,9 +148,9 @@ void ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
.getModuleMap()
.getContainingModuleMapFile(M);
- MD.ClangModuleMapFile = ModuleMap ? ModuleMap->getName() : "";
+ MD.ClangModuleMapFile = std::string(ModuleMap ? ModuleMap->getName() : "");
MD.ModuleName = M->getFullModuleName();
- MD.ModulePCMPath = M->getASTFile()->getName();
+ MD.ImplicitModulePCMPath = std::string(M->getASTFile()->getName());
MD.ContextHash = MDC.ContextHash;
serialization::ModuleFile *MF =
MDC.Instance.getASTReader()->getModuleManager().lookup(M->getASTFile());
@@ -103,30 +159,37 @@ void ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
MD.FileDeps.insert(IF.getFile()->getName());
});
- addAllSubmoduleDeps(M, MD);
+ llvm::DenseSet<const Module *> AddedModules;
+ addAllSubmoduleDeps(M, MD, AddedModules);
}
-void ModuleDepCollectorPP::addAllSubmoduleDeps(const Module *M,
- ModuleDeps &MD) {
- addModuleDep(M, MD);
+void ModuleDepCollectorPP::addAllSubmoduleDeps(
+ const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules) {
+ addModuleDep(M, MD, AddedModules);
for (const Module *SubM : M->submodules())
- addAllSubmoduleDeps(SubM, MD);
+ addAllSubmoduleDeps(SubM, MD, AddedModules);
}
-void ModuleDepCollectorPP::addModuleDep(const Module *M, ModuleDeps &MD) {
+void ModuleDepCollectorPP::addModuleDep(
+ const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules) {
for (const Module *Import : M->Imports) {
if (Import->getTopLevelModule() != M->getTopLevelModule()) {
- MD.ClangModuleDeps.insert(Import->getTopLevelModuleName());
+ if (AddedModules.insert(Import->getTopLevelModule()).second)
+ MD.ClangModuleDeps.push_back(
+ {std::string(Import->getTopLevelModuleName()),
+ Instance.getInvocation().getModuleHash()});
handleTopLevelModule(Import->getTopLevelModule());
}
}
}
-ModuleDepCollector::ModuleDepCollector(CompilerInstance &I,
- DependencyConsumer &C)
- : Instance(I), Consumer(C), ContextHash(I.getInvocation().getModuleHash()) {
-}
+ModuleDepCollector::ModuleDepCollector(
+ std::unique_ptr<DependencyOutputOptions> Opts, CompilerInstance &I,
+ DependencyConsumer &C)
+ : Instance(I), Consumer(C), Opts(std::move(Opts)) {}
void ModuleDepCollector::attachToPreprocessor(Preprocessor &PP) {
PP.addPPCallbacks(std::make_unique<ModuleDepCollectorPP>(Instance, *this));
diff --git a/clang/lib/Tooling/Execution.cpp b/clang/lib/Tooling/Execution.cpp
index c39a4fcdac82..247b260b97ed 100644
--- a/clang/lib/Tooling/Execution.cpp
+++ b/clang/lib/Tooling/Execution.cpp
@@ -63,18 +63,16 @@ createExecutorFromCommandLineArgsImpl(int &argc, const char **argv,
/*Overview=*/Overview);
if (!OptionsParser)
return OptionsParser.takeError();
- for (auto I = ToolExecutorPluginRegistry::begin(),
- E = ToolExecutorPluginRegistry::end();
- I != E; ++I) {
- if (I->getName() != ExecutorName) {
+ for (const auto &TEPlugin : ToolExecutorPluginRegistry::entries()) {
+ if (TEPlugin.getName() != ExecutorName) {
continue;
}
- std::unique_ptr<ToolExecutorPlugin> Plugin(I->instantiate());
+ std::unique_ptr<ToolExecutorPlugin> Plugin(TEPlugin.instantiate());
llvm::Expected<std::unique_ptr<ToolExecutor>> Executor =
Plugin->create(*OptionsParser);
if (!Executor) {
return llvm::make_error<llvm::StringError>(
- llvm::Twine("Failed to create '") + I->getName() +
+ llvm::Twine("Failed to create '") + TEPlugin.getName() +
"': " + llvm::toString(Executor.takeError()) + "\n",
llvm::inconvertibleErrorCode());
}
diff --git a/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp b/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
index 99298316718b..f1ab2aed54c0 100644
--- a/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
+++ b/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
@@ -12,6 +12,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/StringSaver.h"
diff --git a/clang/lib/Tooling/FileMatchTrie.cpp b/clang/lib/Tooling/FileMatchTrie.cpp
index 7df5a16fd88f..88dea6bb6c9f 100644
--- a/clang/lib/Tooling/FileMatchTrie.cpp
+++ b/clang/lib/Tooling/FileMatchTrie.cpp
@@ -63,7 +63,7 @@ public:
return;
if (Path.empty()) {
// This is an empty leaf. Store NewPath and return.
- Path = NewPath;
+ Path = std::string(NewPath);
return;
}
if (Children.empty()) {
diff --git a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
index 37a0816c803e..681fcc5c762a 100644
--- a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Inclusions/HeaderIncludes.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/Optional.h"
@@ -319,7 +320,7 @@ HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled) const {
(!IsAngled && StringRef(Inc.Name).startswith("\"")))
return llvm::None;
std::string Quoted =
- llvm::formatv(IsAngled ? "<{0}>" : "\"{0}\"", IncludeName);
+ std::string(llvm::formatv(IsAngled ? "<{0}>" : "\"{0}\"", IncludeName));
StringRef QuotedName = Quoted;
int Priority = Categories.getIncludePriority(
QuotedName, /*CheckMainHeader=*/FirstIncludeOffset < 0);
@@ -336,7 +337,8 @@ HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled) const {
}
}
assert(InsertOffset <= Code.size());
- std::string NewInclude = llvm::formatv("#include {0}\n", QuotedName);
+ std::string NewInclude =
+ std::string(llvm::formatv("#include {0}\n", QuotedName));
// When inserting headers at end of the code, also append '\n' to the code
// if it does not end with '\n'.
// FIXME: when inserting multiple #includes at the end of code, only one
diff --git a/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp b/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
index 2cc819a498c6..fa61560e5123 100644
--- a/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
+++ b/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
@@ -114,6 +114,9 @@ static types::ID foldType(types::ID Lang) {
case types::TY_ObjCXX:
case types::TY_ObjCXXHeader:
return types::TY_ObjCXX;
+ case types::TY_CUDA:
+ case types::TY_CUDA_DEVICE:
+ return types::TY_CUDA;
default:
return types::TY_INVALID;
}
@@ -203,7 +206,7 @@ struct TransferableCommand {
// Produce a CompileCommand for \p filename, based on this one.
CompileCommand transferTo(StringRef Filename) const {
CompileCommand Result = Cmd;
- Result.Filename = Filename;
+ Result.Filename = std::string(Filename);
bool TypeCertain;
auto TargetType = guessType(Filename, &TypeCertain);
// If the filename doesn't determine the language (.h), transfer with -x.
@@ -217,7 +220,7 @@ struct TransferableCommand {
if (ClangCLMode) {
const StringRef Flag = toCLFlag(TargetType);
if (!Flag.empty())
- Result.CommandLine.push_back(Flag);
+ Result.CommandLine.push_back(std::string(Flag));
} else {
Result.CommandLine.push_back("-x");
Result.CommandLine.push_back(types::getTypeName(TargetType));
@@ -230,7 +233,7 @@ struct TransferableCommand {
llvm::Twine(ClangCLMode ? "/std:" : "-std=") +
LangStandard::getLangStandardForKind(Std).getName()).str());
}
- Result.CommandLine.push_back(Filename);
+ Result.CommandLine.push_back(std::string(Filename));
Result.Heuristic = "inferred from " + Cmd.Filename;
return Result;
}
diff --git a/clang/lib/Tooling/JSONCompilationDatabase.cpp b/clang/lib/Tooling/JSONCompilationDatabase.cpp
index 04dd4dbf6248..4af361f538cb 100644
--- a/clang/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -305,7 +305,7 @@ nodeToCommandLine(JSONCommandLineSyntax Syntax,
Arguments = unescapeCommandLine(Syntax, Nodes[0]->getValue(Storage));
else
for (const auto *Node : Nodes)
- Arguments.push_back(Node->getValue(Storage));
+ Arguments.push_back(std::string(Node->getValue(Storage)));
// There may be multiple wrappers: using distcc and ccache together is common.
while (unwrapCommand(Arguments))
;
diff --git a/clang/lib/Tooling/Refactoring/ASTSelection.cpp b/clang/lib/Tooling/Refactoring/ASTSelection.cpp
index 64e57af59011..af1eb491a20a 100644
--- a/clang/lib/Tooling/Refactoring/ASTSelection.cpp
+++ b/clang/lib/Tooling/Refactoring/ASTSelection.cpp
@@ -13,7 +13,6 @@
using namespace clang;
using namespace tooling;
-using ast_type_traits::DynTypedNode;
namespace {
diff --git a/clang/lib/Tooling/Refactoring/AtomicChange.cpp b/clang/lib/Tooling/Refactoring/AtomicChange.cpp
index 4cf63306d262..069e9c1eb36e 100644
--- a/clang/lib/Tooling/Refactoring/AtomicChange.cpp
+++ b/clang/lib/Tooling/Refactoring/AtomicChange.cpp
@@ -200,10 +200,16 @@ AtomicChange::AtomicChange(const SourceManager &SM,
FullKeyPosition.getSpellingLoc().getDecomposedLoc();
const FileEntry *FE = SM.getFileEntryForID(FileIDAndOffset.first);
assert(FE && "Cannot create AtomicChange with invalid location.");
- FilePath = FE->getName();
+ FilePath = std::string(FE->getName());
Key = FilePath + ":" + std::to_string(FileIDAndOffset.second);
}
+AtomicChange::AtomicChange(const SourceManager &SM, SourceLocation KeyPosition,
+ llvm::Any M)
+ : AtomicChange(SM, KeyPosition) {
+ Metadata = std::move(M);
+}
+
AtomicChange::AtomicChange(std::string Key, std::string FilePath,
std::string Error,
std::vector<std::string> InsertedHeaders,
@@ -284,11 +290,11 @@ llvm::Error AtomicChange::insert(const SourceManager &SM, SourceLocation Loc,
}
void AtomicChange::addHeader(llvm::StringRef Header) {
- InsertedHeaders.push_back(Header);
+ InsertedHeaders.push_back(std::string(Header));
}
void AtomicChange::removeHeader(llvm::StringRef Header) {
- RemovedHeaders.push_back(Header);
+ RemovedHeaders.push_back(std::string(Header));
}
llvm::Expected<std::string>
diff --git a/clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp b/clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
index b0634912e3fc..72598601d47d 100644
--- a/clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
@@ -170,7 +170,8 @@ static void convertChangesToFileReplacements(
std::map<std::string, tooling::Replacements> *FileToReplaces) {
for (const auto &AtomicChange : AtomicChanges) {
for (const auto &Replace : AtomicChange.getReplacements()) {
- llvm::Error Err = (*FileToReplaces)[Replace.getFilePath()].add(Replace);
+ llvm::Error Err =
+ (*FileToReplaces)[std::string(Replace.getFilePath())].add(Replace);
if (Err) {
llvm::errs() << "Renaming failed in " << Replace.getFilePath() << "! "
<< llvm::toString(std::move(Err)) << "\n";
diff --git a/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp b/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp
index 55111202ac88..23f567f1c9ec 100644
--- a/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/USRFinder.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/AST.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Index/USRGeneration.h"
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Refactoring/RecursiveSymbolVisitor.h"
diff --git a/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp b/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
index d966a5ef23c2..43dc32e158d3 100644
--- a/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
@@ -126,15 +126,24 @@ private:
addUSRsOfCtorDtors(TemplateDecl->getTemplatedDecl());
}
- void addUSRsOfCtorDtors(const CXXRecordDecl *RecordDecl) {
- RecordDecl = RecordDecl->getDefinition();
+ void addUSRsOfCtorDtors(const CXXRecordDecl *RD) {
+ const auto* RecordDecl = RD->getDefinition();
// Skip if the CXXRecordDecl doesn't have definition.
- if (!RecordDecl)
+ if (!RecordDecl) {
+ USRSet.insert(getUSRForDecl(RD));
return;
+ }
for (const auto *CtorDecl : RecordDecl->ctors())
USRSet.insert(getUSRForDecl(CtorDecl));
+ // Add template constructor decls, they are not in ctors() unfortunately.
+ if (RecordDecl->hasUserDeclaredConstructor())
+ for (const auto *D : RecordDecl->decls())
+ if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ if (const auto *Ctor =
+ dyn_cast<CXXConstructorDecl>(FTD->getTemplatedDecl()))
+ USRSet.insert(getUSRForDecl(Ctor));
USRSet.insert(getUSRForDecl(RecordDecl->getDestructor()));
USRSet.insert(getUSRForDecl(RecordDecl));
diff --git a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index 408e184f5bf5..dfc319dd0639 100644
--- a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -15,6 +15,7 @@
#include "clang/Tooling/Refactoring/Rename/USRLocFinder.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
@@ -426,8 +427,7 @@ public:
StartLoc,
EndLoc,
TemplateSpecType->getTemplateName().getAsTemplateDecl(),
- getClosestAncestorDecl(
- ast_type_traits::DynTypedNode::create(TargetLoc)),
+ getClosestAncestorDecl(DynTypedNode::create(TargetLoc)),
GetNestedNameForType(TargetLoc),
/*IgnorePrefixQualifers=*/false};
RenameInfos.push_back(Info);
@@ -466,8 +466,7 @@ private:
// FIXME: figure out how to handle it when there are multiple parents.
if (Parents.size() != 1)
return nullptr;
- if (ast_type_traits::ASTNodeKind::getFromNodeKind<Decl>().isBaseOf(
- Parents[0].getNodeKind()))
+ if (ASTNodeKind::getFromNodeKind<Decl>().isBaseOf(Parents[0].getNodeKind()))
return Parents[0].template get<Decl>();
return getClosestAncestorDecl(Parents[0]);
}
@@ -536,7 +535,7 @@ createRenameAtomicChanges(llvm::ArrayRef<std::string> USRs,
// Get the name without prefix qualifiers from NewName.
size_t LastColonPos = NewName.find_last_of(':');
if (LastColonPos != std::string::npos)
- ReplacedName = NewName.substr(LastColonPos + 1);
+ ReplacedName = std::string(NewName.substr(LastColonPos + 1));
} else {
if (RenameInfo.FromDecl && RenameInfo.Context) {
if (!llvm::isa<clang::TranslationUnitDecl>(
diff --git a/clang/lib/Tooling/RefactoringCallbacks.cpp b/clang/lib/Tooling/RefactoringCallbacks.cpp
index 919b83beb357..e3fc91afeb59 100644
--- a/clang/lib/Tooling/RefactoringCallbacks.cpp
+++ b/clang/lib/Tooling/RefactoringCallbacks.cpp
@@ -50,8 +50,8 @@ public:
for (const auto &Callback : Refactoring.Callbacks) {
for (const auto &Replacement : Callback->getReplacements()) {
llvm::Error Err =
- Refactoring.FileToReplaces[Replacement.getFilePath()].add(
- Replacement);
+ Refactoring.FileToReplaces[std::string(Replacement.getFilePath())]
+ .add(Replacement);
if (Err) {
llvm::errs() << "Skipping replacement " << Replacement.toString()
<< " due to this error:\n"
@@ -83,7 +83,7 @@ static Replacement replaceStmtWithStmt(SourceManager &Sources, const Stmt &From,
}
ReplaceStmtWithText::ReplaceStmtWithText(StringRef FromId, StringRef ToText)
- : FromId(FromId), ToText(ToText) {}
+ : FromId(std::string(FromId)), ToText(std::string(ToText)) {}
void ReplaceStmtWithText::run(
const ast_matchers::MatchFinder::MatchResult &Result) {
@@ -101,7 +101,7 @@ void ReplaceStmtWithText::run(
}
ReplaceStmtWithStmt::ReplaceStmtWithStmt(StringRef FromId, StringRef ToId)
- : FromId(FromId), ToId(ToId) {}
+ : FromId(std::string(FromId)), ToId(std::string(ToId)) {}
void ReplaceStmtWithStmt::run(
const ast_matchers::MatchFinder::MatchResult &Result) {
@@ -121,7 +121,7 @@ void ReplaceStmtWithStmt::run(
ReplaceIfStmtWithItsBody::ReplaceIfStmtWithItsBody(StringRef Id,
bool PickTrueBranch)
- : Id(Id), PickTrueBranch(PickTrueBranch) {}
+ : Id(std::string(Id)), PickTrueBranch(PickTrueBranch) {}
void ReplaceIfStmtWithItsBody::run(
const ast_matchers::MatchFinder::MatchResult &Result) {
@@ -153,7 +153,7 @@ void ReplaceIfStmtWithItsBody::run(
ReplaceNodeWithTemplate::ReplaceNodeWithTemplate(
llvm::StringRef FromId, std::vector<TemplateElement> Template)
- : FromId(FromId), Template(std::move(Template)) {}
+ : FromId(std::string(FromId)), Template(std::move(Template)) {}
llvm::Expected<std::unique_ptr<ReplaceNodeWithTemplate>>
ReplaceNodeWithTemplate::create(StringRef FromId, StringRef ToTemplate) {
@@ -172,8 +172,8 @@ ReplaceNodeWithTemplate::create(StringRef FromId, StringRef ToTemplate) {
ToTemplate.substr(Index),
llvm::inconvertibleErrorCode());
}
- std::string SourceNodeName =
- ToTemplate.substr(Index + 2, EndOfIdentifier - Index - 2);
+ std::string SourceNodeName = std::string(
+ ToTemplate.substr(Index + 2, EndOfIdentifier - Index - 2));
ParsedTemplate.push_back(
TemplateElement{TemplateElement::Identifier, SourceNodeName});
Index = EndOfIdentifier + 1;
@@ -185,9 +185,9 @@ ReplaceNodeWithTemplate::create(StringRef FromId, StringRef ToTemplate) {
}
} else {
size_t NextIndex = ToTemplate.find('$', Index + 1);
- ParsedTemplate.push_back(
- TemplateElement{TemplateElement::Literal,
- ToTemplate.substr(Index, NextIndex - Index)});
+ ParsedTemplate.push_back(TemplateElement{
+ TemplateElement::Literal,
+ std::string(ToTemplate.substr(Index, NextIndex - Index))});
Index = NextIndex;
}
}
diff --git a/clang/lib/Tooling/Syntax/BuildTree.cpp b/clang/lib/Tooling/Syntax/BuildTree.cpp
index aa8844771d37..1f192180ec45 100644
--- a/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -6,20 +6,32 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Syntax/BuildTree.h"
+#include "clang/AST/ASTFwd.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Stmt.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeLocVisitor.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/Lexer.h"
+#include "clang/Lex/LiteralSupport.h"
#include "clang/Tooling/Syntax/Nodes.h"
#include "clang/Tooling/Syntax/Tokens.h"
#include "clang/Tooling/Syntax/Tree.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
@@ -27,6 +39,7 @@
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include <cstddef>
#include <map>
using namespace clang;
@@ -34,6 +47,207 @@ using namespace clang;
LLVM_ATTRIBUTE_UNUSED
static bool isImplicitExpr(clang::Expr *E) { return E->IgnoreImplicit() != E; }
+namespace {
+/// Get start location of the Declarator from the TypeLoc.
+/// E.g.:
+/// loc of `(` in `int (a)`
+/// loc of `*` in `int *(a)`
+/// loc of the first `(` in `int (*a)(int)`
+/// loc of the `*` in `int *(a)(int)`
+/// loc of the first `*` in `const int *const *volatile a;`
+///
+/// It is non-trivial to get the start location because TypeLocs are stored
+/// inside out. In the example above `*volatile` is the TypeLoc returned
+/// by `Decl.getTypeSourceInfo()`, and `*const` is what `.getPointeeLoc()`
+/// returns.
+struct GetStartLoc : TypeLocVisitor<GetStartLoc, SourceLocation> {
+ SourceLocation VisitParenTypeLoc(ParenTypeLoc T) {
+ auto L = Visit(T.getInnerLoc());
+ if (L.isValid())
+ return L;
+ return T.getLParenLoc();
+ }
+
+ // Types spelled in the prefix part of the declarator.
+ SourceLocation VisitPointerTypeLoc(PointerTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ SourceLocation VisitMemberPointerTypeLoc(MemberPointerTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ SourceLocation VisitBlockPointerTypeLoc(BlockPointerTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ SourceLocation VisitReferenceTypeLoc(ReferenceTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ SourceLocation VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc T) {
+ return HandlePointer(T);
+ }
+
+ // All other cases are not important, as they are either part of declaration
+ // specifiers (e.g. inheritors of TypeSpecTypeLoc) or introduce modifiers on
+ // existing declarators (e.g. QualifiedTypeLoc). They cannot start the
+ // declarator themselves, but their underlying type can.
+ SourceLocation VisitTypeLoc(TypeLoc T) {
+ auto N = T.getNextTypeLoc();
+ if (!N)
+ return SourceLocation();
+ return Visit(N);
+ }
+
+ SourceLocation VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc T) {
+ if (T.getTypePtr()->hasTrailingReturn())
+ return SourceLocation(); // avoid recursing into the suffix of declarator.
+ return VisitTypeLoc(T);
+ }
+
+private:
+ template <class PtrLoc> SourceLocation HandlePointer(PtrLoc T) {
+ auto L = Visit(T.getPointeeLoc());
+ if (L.isValid())
+ return L;
+ return T.getLocalSourceRange().getBegin();
+ }
+};
+} // namespace
+
+static syntax::NodeKind getOperatorNodeKind(const CXXOperatorCallExpr &E) {
+ switch (E.getOperator()) {
+ // Comparison
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ case OO_Greater:
+ case OO_GreaterEqual:
+ case OO_Less:
+ case OO_LessEqual:
+ case OO_Spaceship:
+ // Assignment
+ case OO_Equal:
+ case OO_SlashEqual:
+ case OO_PercentEqual:
+ case OO_CaretEqual:
+ case OO_PipeEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ case OO_StarEqual:
+ case OO_AmpEqual:
+ // Binary computation
+ case OO_Slash:
+ case OO_Percent:
+ case OO_Caret:
+ case OO_Pipe:
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ case OO_AmpAmp:
+ case OO_PipePipe:
+ case OO_ArrowStar:
+ case OO_Comma:
+ return syntax::NodeKind::BinaryOperatorExpression;
+ case OO_Tilde:
+ case OO_Exclaim:
+ return syntax::NodeKind::PrefixUnaryOperatorExpression;
+ // Prefix/Postfix increment/decrement
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ switch (E.getNumArgs()) {
+ case 1:
+ return syntax::NodeKind::PrefixUnaryOperatorExpression;
+ case 2:
+ return syntax::NodeKind::PostfixUnaryOperatorExpression;
+ default:
+ llvm_unreachable("Invalid number of arguments for operator");
+ }
+ // Operators that can be unary or binary
+ case OO_Plus:
+ case OO_Minus:
+ case OO_Star:
+ case OO_Amp:
+ switch (E.getNumArgs()) {
+ case 1:
+ return syntax::NodeKind::PrefixUnaryOperatorExpression;
+ case 2:
+ return syntax::NodeKind::BinaryOperatorExpression;
+ default:
+ llvm_unreachable("Invalid number of arguments for operator");
+ }
+ return syntax::NodeKind::BinaryOperatorExpression;
+ // Not yet supported by SyntaxTree
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Coawait:
+ case OO_Call:
+ case OO_Subscript:
+ case OO_Arrow:
+ return syntax::NodeKind::UnknownExpression;
+ case OO_Conditional: // not overloadable
+ case NUM_OVERLOADED_OPERATORS:
+ case OO_None:
+ llvm_unreachable("Not an overloadable operator");
+ }
+ llvm_unreachable("Unknown OverloadedOperatorKind enum");
+}
+
+/// Gets the range of declarator as defined by the C++ grammar. E.g.
+/// `int a;` -> range of `a`,
+/// `int *a;` -> range of `*a`,
+/// `int a[10];` -> range of `a[10]`,
+/// `int a[1][2][3];` -> range of `a[1][2][3]`,
+/// `int *a = nullptr` -> range of `*a = nullptr`.
+/// FIMXE: \p Name must be a source range, e.g. for `operator+`.
+static SourceRange getDeclaratorRange(const SourceManager &SM, TypeLoc T,
+ SourceLocation Name,
+ SourceRange Initializer) {
+ SourceLocation Start = GetStartLoc().Visit(T);
+ SourceLocation End = T.getSourceRange().getEnd();
+ assert(End.isValid());
+ if (Name.isValid()) {
+ if (Start.isInvalid())
+ Start = Name;
+ if (SM.isBeforeInTranslationUnit(End, Name))
+ End = Name;
+ }
+ if (Initializer.isValid()) {
+ auto InitializerEnd = Initializer.getEnd();
+ assert(SM.isBeforeInTranslationUnit(End, InitializerEnd) ||
+ End == InitializerEnd);
+ End = InitializerEnd;
+ }
+ return SourceRange(Start, End);
+}
+
+namespace {
+/// All AST hierarchy roots that can be represented as pointers.
+using ASTPtr = llvm::PointerUnion<Stmt *, Decl *>;
+/// Maintains a mapping from AST to syntax tree nodes. This class will get more
+/// complicated as we support more kinds of AST nodes, e.g. TypeLocs.
+/// FIXME: expose this as public API.
+class ASTToSyntaxMapping {
+public:
+ void add(ASTPtr From, syntax::Tree *To) {
+ assert(To != nullptr);
+ assert(!From.isNull());
+
+ bool Added = Nodes.insert({From, To}).second;
+ (void)Added;
+ assert(Added && "mapping added twice");
+ }
+
+ syntax::Tree *find(ASTPtr P) const { return Nodes.lookup(P); }
+
+private:
+ llvm::DenseMap<ASTPtr, syntax::Tree *> Nodes;
+};
+} // namespace
+
/// A helper class for constructing the syntax tree while traversing a clang
/// AST.
///
@@ -57,30 +271,44 @@ public:
}
llvm::BumpPtrAllocator &allocator() { return Arena.allocator(); }
+ const SourceManager &sourceManager() const { return Arena.sourceManager(); }
/// Populate children for \p New node, assuming it covers tokens from \p
/// Range.
- void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New);
-
- /// Must be called with the range of each `DeclaratorDecl`. Ensures the
- /// corresponding declarator nodes are covered by `SimpleDeclaration`.
- void noticeDeclaratorRange(llvm::ArrayRef<syntax::Token> Range);
+ void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New,
+ ASTPtr From) {
+ assert(New);
+ Pending.foldChildren(Arena, Range, New);
+ if (From)
+ Mapping.add(From, New);
+ }
+ void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New,
+ TypeLoc L) {
+ // FIXME: add mapping for TypeLocs
+ foldNode(Range, New, nullptr);
+ }
/// Notifies that we should not consume trailing semicolon when computing
/// token range of \p D.
- void noticeDeclaratorWithoutSemicolon(Decl *D);
+ void noticeDeclWithoutSemicolon(Decl *D);
/// Mark the \p Child node with a corresponding \p Role. All marked children
/// should be consumed by foldNode.
- /// (!) when called on expressions (clang::Expr is derived from clang::Stmt),
- /// wraps expressions into expression statement.
+ /// When called on expressions (clang::Expr is derived from clang::Stmt),
+ /// wraps expressions into expression statement.
void markStmtChild(Stmt *Child, NodeRole Role);
/// Should be called for expressions in non-statement position to avoid
/// wrapping into expression statement.
void markExprChild(Expr *Child, NodeRole Role);
-
/// Set role for a token starting at \p Loc.
void markChildToken(SourceLocation Loc, NodeRole R);
+ /// Set role for \p T.
+ void markChildToken(const syntax::Token *T, NodeRole R);
+
+ /// Set role for \p N.
+ void markChild(syntax::Node *N, NodeRole R);
+ /// Set role for the syntax node matching \p N.
+ void markChild(ASTPtr N, NodeRole R);
/// Finish building the tree and consume the root node.
syntax::TranslationUnit *finalize() && {
@@ -97,8 +325,16 @@ public:
return TU;
}
- /// getRange() finds the syntax tokens corresponding to the passed source
- /// locations.
+ /// Finds a token starting at \p L. The token must exist if \p L is valid.
+ const syntax::Token *findToken(SourceLocation L) const;
+
+ /// Finds the syntax tokens corresponding to the \p SourceRange.
+ llvm::ArrayRef<syntax::Token> getRange(SourceRange Range) const {
+ assert(Range.isValid());
+ return getRange(Range.getBegin(), Range.getEnd());
+ }
+
+ /// Finds the syntax tokens corresponding to the passed source locations.
/// \p First is the start position of the first token and \p Last is the start
/// position of the last token.
llvm::ArrayRef<syntax::Token> getRange(SourceLocation First,
@@ -109,23 +345,62 @@ public:
Arena.sourceManager().isBeforeInTranslationUnit(First, Last));
return llvm::makeArrayRef(findToken(First), std::next(findToken(Last)));
}
- llvm::ArrayRef<syntax::Token> getRange(const Decl *D) const {
- auto Tokens = getRange(D->getBeginLoc(), D->getEndLoc());
- if (llvm::isa<NamespaceDecl>(D))
- return Tokens;
- if (DeclsWithoutSemicolons.count(D))
- return Tokens;
- // FIXME: do not consume trailing semicolon on function definitions.
- // Most declarations own a semicolon in syntax trees, but not in clang AST.
- return withTrailingSemicolon(Tokens);
+
+ llvm::ArrayRef<syntax::Token>
+ getTemplateRange(const ClassTemplateSpecializationDecl *D) const {
+ auto Tokens = getRange(D->getSourceRange());
+ return maybeAppendSemicolon(Tokens, D);
}
+
+ /// Returns true if \p D is the last declarator in a chain and is thus
+ /// reponsible for creating SimpleDeclaration for the whole chain.
+ template <class T>
+ bool isResponsibleForCreatingDeclaration(const T *D) const {
+ static_assert((std::is_base_of<DeclaratorDecl, T>::value ||
+ std::is_base_of<TypedefNameDecl, T>::value),
+ "only DeclaratorDecl and TypedefNameDecl are supported.");
+
+ const Decl *Next = D->getNextDeclInContext();
+
+ // There's no next sibling, this one is responsible.
+ if (Next == nullptr) {
+ return true;
+ }
+ const auto *NextT = llvm::dyn_cast<T>(Next);
+
+ // Next sibling is not the same type, this one is responsible.
+ if (NextT == nullptr) {
+ return true;
+ }
+ // Next sibling doesn't begin at the same loc, it must be a different
+ // declaration, so this declarator is responsible.
+ if (NextT->getBeginLoc() != D->getBeginLoc()) {
+ return true;
+ }
+
+ // NextT is a member of the same declaration, and we need the last member to
+ // create declaration. This one is not responsible.
+ return false;
+ }
+
+ llvm::ArrayRef<syntax::Token> getDeclarationRange(Decl *D) {
+ llvm::ArrayRef<clang::syntax::Token> Tokens;
+ // We want to drop the template parameters for specializations.
+ if (const auto *S = llvm::dyn_cast<TagDecl>(D))
+ Tokens = getRange(S->TypeDecl::getBeginLoc(), S->getEndLoc());
+ else
+ Tokens = getRange(D->getSourceRange());
+ return maybeAppendSemicolon(Tokens, D);
+ }
+
llvm::ArrayRef<syntax::Token> getExprRange(const Expr *E) const {
- return getRange(E->getBeginLoc(), E->getEndLoc());
+ return getRange(E->getSourceRange());
}
+
/// Find the adjusted range for the statement, consuming the trailing
/// semicolon when needed.
llvm::ArrayRef<syntax::Token> getStmtRange(const Stmt *S) const {
- auto Tokens = getRange(S->getBeginLoc(), S->getEndLoc());
+ auto Tokens = getRange(S->getSourceRange());
if (isa<CompoundStmt>(S))
return Tokens;
@@ -138,17 +413,31 @@ public:
private:
llvm::ArrayRef<syntax::Token>
+ maybeAppendSemicolon(llvm::ArrayRef<syntax::Token> Tokens,
+ const Decl *D) const {
+ if (llvm::isa<NamespaceDecl>(D))
+ return Tokens;
+ if (DeclsWithoutSemicolons.count(D))
+ return Tokens;
+ // FIXME: do not consume trailing semicolon on function definitions.
+ // Most declarations own a semicolon in syntax trees, but not in clang AST.
+ return withTrailingSemicolon(Tokens);
+ }
+
+ llvm::ArrayRef<syntax::Token>
withTrailingSemicolon(llvm::ArrayRef<syntax::Token> Tokens) const {
assert(!Tokens.empty());
assert(Tokens.back().kind() != tok::eof);
- // (!) we never consume 'eof', so looking at the next token is ok.
+ // We never consume 'eof', so looking at the next token is ok.
if (Tokens.back().kind() != tok::semi && Tokens.end()->kind() == tok::semi)
return llvm::makeArrayRef(Tokens.begin(), Tokens.end() + 1);
return Tokens;
}
- /// Finds a token starting at \p L. The token must exist.
- const syntax::Token *findToken(SourceLocation L) const;
+ void setRole(syntax::Node *N, NodeRole R) {
+ assert(N->role() == NodeRole::Detached);
+ N->setRole(R);
+ }
/// A collection of trees covering the input tokens.
/// When created, each tree corresponds to a single token in the file.
@@ -166,12 +455,10 @@ private:
auto *L = new (A.allocator()) syntax::Leaf(&T);
L->Original = true;
L->CanModify = A.tokenBuffer().spelledForExpanded(T).hasValue();
- Trees.insert(Trees.end(), {&T, NodeAndRole{L}});
+ Trees.insert(Trees.end(), {&T, L});
}
}
- ~Forest() { assert(DelayedFolds.empty()); }
-
void assignRole(llvm::ArrayRef<syntax::Token> Range,
syntax::NodeRole Role) {
assert(!Range.empty());
@@ -181,56 +468,49 @@ private:
assert((std::next(It) == Trees.end() ||
std::next(It)->first == Range.end()) &&
"no child with the specified range");
- It->second.Role = Role;
+ assert(It->second->role() == NodeRole::Detached &&
+ "re-assigning role for a child");
+ It->second->setRole(Role);
}
/// Add \p Node to the forest and attach child nodes based on \p Tokens.
void foldChildren(const syntax::Arena &A,
llvm::ArrayRef<syntax::Token> Tokens,
syntax::Tree *Node) {
- // Execute delayed folds inside `Tokens`.
- auto BeginExecuted = DelayedFolds.lower_bound(Tokens.begin());
- auto It = BeginExecuted;
- for (; It != DelayedFolds.end() && It->second.End <= Tokens.end(); ++It)
- foldChildrenEager(A, llvm::makeArrayRef(It->first, It->second.End),
- It->second.Node);
- DelayedFolds.erase(BeginExecuted, It);
-
// Attach children to `Node`.
- foldChildrenEager(A, Tokens, Node);
- }
+ assert(Node->firstChild() == nullptr && "node already has children");
- /// Schedule a call to `foldChildren` that will only be executed when
- /// containing node is folded. The range of delayed nodes can be extended by
- /// calling `extendDelayedFold`. Only one delayed node for each starting
- /// token is allowed.
- void foldChildrenDelayed(llvm::ArrayRef<syntax::Token> Tokens,
- syntax::Tree *Node) {
- assert(!Tokens.empty());
- bool Inserted =
- DelayedFolds.insert({Tokens.begin(), DelayedFold{Tokens.end(), Node}})
- .second;
- (void)Inserted;
- assert(Inserted && "Multiple delayed folds start at the same token");
- }
+ auto *FirstToken = Tokens.begin();
+ auto BeginChildren = Trees.lower_bound(FirstToken);
- /// If there a delayed fold, starting at `ExtendedRange.begin()`, extends
- /// its endpoint to `ExtendedRange.end()` and returns true.
- /// Otherwise, returns false.
- bool extendDelayedFold(llvm::ArrayRef<syntax::Token> ExtendedRange) {
- assert(!ExtendedRange.empty());
- auto It = DelayedFolds.find(ExtendedRange.data());
- if (It == DelayedFolds.end())
- return false;
- assert(It->second.End <= ExtendedRange.end());
- It->second.End = ExtendedRange.end();
- return true;
+ assert((BeginChildren == Trees.end() ||
+ BeginChildren->first == FirstToken) &&
+ "fold crosses boundaries of existing subtrees");
+ auto EndChildren = Trees.lower_bound(Tokens.end());
+ assert(
+ (EndChildren == Trees.end() || EndChildren->first == Tokens.end()) &&
+ "fold crosses boundaries of existing subtrees");
+
+ // We need to go in reverse order, because we can only prepend.
+ for (auto It = EndChildren; It != BeginChildren; --It) {
+ auto *C = std::prev(It)->second;
+ if (C->role() == NodeRole::Detached)
+ C->setRole(NodeRole::Unknown);
+ Node->prependChildLowLevel(C);
+ }
+
+ // Mark that this node came from the AST and is backed by the source code.
+ Node->Original = true;
+ Node->CanModify = A.tokenBuffer().spelledForExpanded(Tokens).hasValue();
+
+ Trees.erase(BeginChildren, EndChildren);
+ Trees.insert({FirstToken, Node});
}
// EXPECTS: all tokens were consumed and are owned by a single root node.
syntax::Node *finalize() && {
assert(Trees.size() == 1);
- auto *Root = Trees.begin()->second.Node;
+ auto *Root = Trees.begin()->second;
Trees = {};
return Root;
}
@@ -243,66 +523,19 @@ private:
? (std::next(It)->first - It->first)
: A.tokenBuffer().expandedTokens().end() - It->first;
- R += llvm::formatv("- '{0}' covers '{1}'+{2} tokens\n",
- It->second.Node->kind(),
- It->first->text(A.sourceManager()), CoveredTokens);
- R += It->second.Node->dump(A);
+ R += std::string(llvm::formatv(
+ "- '{0}' covers '{1}'+{2} tokens\n", It->second->kind(),
+ It->first->text(A.sourceManager()), CoveredTokens));
+ R += It->second->dump(A);
}
return R;
}
private:
- /// Implementation detail of `foldChildren`, does acutal folding ignoring
- /// delayed folds.
- void foldChildrenEager(const syntax::Arena &A,
- llvm::ArrayRef<syntax::Token> Tokens,
- syntax::Tree *Node) {
- assert(Node->firstChild() == nullptr && "node already has children");
-
- auto *FirstToken = Tokens.begin();
- auto BeginChildren = Trees.lower_bound(FirstToken);
- assert((BeginChildren == Trees.end() ||
- BeginChildren->first == FirstToken) &&
- "fold crosses boundaries of existing subtrees");
- auto EndChildren = Trees.lower_bound(Tokens.end());
- assert(
- (EndChildren == Trees.end() || EndChildren->first == Tokens.end()) &&
- "fold crosses boundaries of existing subtrees");
-
- // (!) we need to go in reverse order, because we can only prepend.
- for (auto It = EndChildren; It != BeginChildren; --It)
- Node->prependChildLowLevel(std::prev(It)->second.Node,
- std::prev(It)->second.Role);
-
- // Mark that this node came from the AST and is backed by the source code.
- Node->Original = true;
- Node->CanModify = A.tokenBuffer().spelledForExpanded(Tokens).hasValue();
-
- Trees.erase(BeginChildren, EndChildren);
- Trees.insert({FirstToken, NodeAndRole(Node)});
- }
- /// A with a role that should be assigned to it when adding to a parent.
- struct NodeAndRole {
- explicit NodeAndRole(syntax::Node *Node)
- : Node(Node), Role(NodeRole::Unknown) {}
-
- syntax::Node *Node;
- NodeRole Role;
- };
-
/// Maps from the start token to a subtree starting at that token.
/// Keys in the map are pointers into the array of expanded tokens, so
/// pointer order corresponds to the order of preprocessor tokens.
- /// FIXME: storing the end tokens is redundant.
- /// FIXME: the key of a map is redundant, it is also stored in NodeForRange.
- std::map<const syntax::Token *, NodeAndRole> Trees;
-
- /// See documentation of `foldChildrenDelayed` for details.
- struct DelayedFold {
- const syntax::Token *End = nullptr;
- syntax::Tree *Node = nullptr;
- };
- std::map<const syntax::Token *, DelayedFold> DelayedFolds;
+ std::map<const syntax::Token *, syntax::Node *> Trees;
};
/// For debugging purposes.
@@ -314,49 +547,91 @@ private:
LocationToToken;
Forest Pending;
llvm::DenseSet<Decl *> DeclsWithoutSemicolons;
+ ASTToSyntaxMapping Mapping;
};
namespace {
class BuildTreeVisitor : public RecursiveASTVisitor<BuildTreeVisitor> {
public:
- explicit BuildTreeVisitor(ASTContext &Ctx, syntax::TreeBuilder &Builder)
- : Builder(Builder), LangOpts(Ctx.getLangOpts()) {}
+ explicit BuildTreeVisitor(ASTContext &Context, syntax::TreeBuilder &Builder)
+ : Builder(Builder), Context(Context) {}
bool shouldTraversePostOrder() const { return true; }
- bool WalkUpFromDeclaratorDecl(DeclaratorDecl *D) {
- // Ensure declarators are covered by SimpleDeclaration.
- Builder.noticeDeclaratorRange(Builder.getRange(D));
- // FIXME: build nodes for the declarator too.
- return true;
+ bool WalkUpFromDeclaratorDecl(DeclaratorDecl *DD) {
+ return processDeclaratorAndDeclaration(DD);
}
- bool WalkUpFromTypedefNameDecl(TypedefNameDecl *D) {
- // Also a declarator.
- Builder.noticeDeclaratorRange(Builder.getRange(D));
- // FIXME: build nodes for the declarator too.
- return true;
+
+ bool WalkUpFromTypedefNameDecl(TypedefNameDecl *TD) {
+ return processDeclaratorAndDeclaration(TD);
}
bool VisitDecl(Decl *D) {
assert(!D->isImplicit());
- Builder.foldNode(Builder.getRange(D),
- new (allocator()) syntax::UnknownDeclaration());
+ Builder.foldNode(Builder.getDeclarationRange(D),
+ new (allocator()) syntax::UnknownDeclaration(), D);
+ return true;
+ }
+
+ // RAV does not call WalkUpFrom* on explicit instantiations, so we have to
+ // override Traverse.
+ // FIXME: make RAV call WalkUpFrom* instead.
+ bool
+ TraverseClassTemplateSpecializationDecl(ClassTemplateSpecializationDecl *C) {
+ if (!RecursiveASTVisitor::TraverseClassTemplateSpecializationDecl(C))
+ return false;
+ if (C->isExplicitSpecialization())
+ return true; // we are only interested in explicit instantiations.
+ auto *Declaration =
+ cast<syntax::SimpleDeclaration>(handleFreeStandingTagDecl(C));
+ foldExplicitTemplateInstantiation(
+ Builder.getTemplateRange(C), Builder.findToken(C->getExternLoc()),
+ Builder.findToken(C->getTemplateKeywordLoc()), Declaration, C);
+ return true;
+ }
+
+ bool WalkUpFromTemplateDecl(TemplateDecl *S) {
+ foldTemplateDeclaration(
+ Builder.getDeclarationRange(S),
+ Builder.findToken(S->getTemplateParameters()->getTemplateLoc()),
+ Builder.getDeclarationRange(S->getTemplatedDecl()), S);
return true;
}
bool WalkUpFromTagDecl(TagDecl *C) {
// FIXME: build the ClassSpecifier node.
- if (C->isFreeStanding()) {
- // Class is a declaration specifier and needs a spanning declaration node.
- Builder.foldNode(Builder.getRange(C),
- new (allocator()) syntax::SimpleDeclaration);
+ if (!C->isFreeStanding()) {
+ assert(C->getNumTemplateParameterLists() == 0);
return true;
}
+ handleFreeStandingTagDecl(C);
return true;
}
+ syntax::Declaration *handleFreeStandingTagDecl(TagDecl *C) {
+ assert(C->isFreeStanding());
+ // Class is a declaration specifier and needs a spanning declaration node.
+ auto DeclarationRange = Builder.getDeclarationRange(C);
+ syntax::Declaration *Result = new (allocator()) syntax::SimpleDeclaration;
+ Builder.foldNode(DeclarationRange, Result, nullptr);
+
+ // Build TemplateDeclaration nodes if we had template parameters.
+ auto ConsumeTemplateParameters = [&](const TemplateParameterList &L) {
+ const auto *TemplateKW = Builder.findToken(L.getTemplateLoc());
+ auto R = llvm::makeArrayRef(TemplateKW, DeclarationRange.end());
+ Result =
+ foldTemplateDeclaration(R, TemplateKW, DeclarationRange, nullptr);
+ DeclarationRange = R;
+ };
+ if (auto *S = llvm::dyn_cast<ClassTemplatePartialSpecializationDecl>(C))
+ ConsumeTemplateParameters(*S->getTemplateParameters());
+ for (unsigned I = C->getNumTemplateParameterLists(); 0 < I; --I)
+ ConsumeTemplateParameters(*C->getTemplateParameterList(I - 1));
+ return Result;
+ }
+
bool WalkUpFromTranslationUnitDecl(TranslationUnitDecl *TU) {
- // (!) we do not want to call VisitDecl(), the declaration for translation
+ // We do not want to call VisitDecl(), the declaration for translation
// unit is built by finalize().
return true;
}
@@ -370,14 +645,14 @@ public:
Builder.markChildToken(S->getRBracLoc(), NodeRole::CloseParen);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::CompoundStatement);
+ new (allocator()) syntax::CompoundStatement, S);
return true;
}
// Some statements are not yet handled by syntax trees.
bool WalkUpFromStmt(Stmt *S) {
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::UnknownStatement);
+ new (allocator()) syntax::UnknownStatement, S);
return true;
}
@@ -386,27 +661,28 @@ public:
// RAV traverses it as a statement, we produce invalid node kinds in that
// case.
// FIXME: should do this in RAV instead?
- if (S->getInit() && !TraverseStmt(S->getInit()))
- return false;
- if (S->getLoopVariable() && !TraverseDecl(S->getLoopVariable()))
- return false;
- if (S->getRangeInit() && !TraverseStmt(S->getRangeInit()))
- return false;
- if (S->getBody() && !TraverseStmt(S->getBody()))
- return false;
- return true;
+ bool Result = [&, this]() {
+ if (S->getInit() && !TraverseStmt(S->getInit()))
+ return false;
+ if (S->getLoopVariable() && !TraverseDecl(S->getLoopVariable()))
+ return false;
+ if (S->getRangeInit() && !TraverseStmt(S->getRangeInit()))
+ return false;
+ if (S->getBody() && !TraverseStmt(S->getBody()))
+ return false;
+ return true;
+ }();
+ WalkUpFromCXXForRangeStmt(S);
+ return Result;
}
bool TraverseStmt(Stmt *S) {
if (auto *DS = llvm::dyn_cast_or_null<DeclStmt>(S)) {
// We want to consume the semicolon, make sure SimpleDeclaration does not.
for (auto *D : DS->decls())
- Builder.noticeDeclaratorWithoutSemicolon(D);
+ Builder.noticeDeclWithoutSemicolon(D);
} else if (auto *E = llvm::dyn_cast_or_null<Expr>(S)) {
- // (!) do not recurse into subexpressions.
- // we do not have syntax trees for expressions yet, so we only want to see
- // the first top-level expression.
- return WalkUpFromExpr(E->IgnoreImplicit());
+ return RecursiveASTVisitor::TraverseStmt(E->IgnoreImplicit());
}
return RecursiveASTVisitor::TraverseStmt(S);
}
@@ -415,19 +691,306 @@ public:
bool WalkUpFromExpr(Expr *E) {
assert(!isImplicitExpr(E) && "should be handled by TraverseStmt");
Builder.foldNode(Builder.getExprRange(E),
- new (allocator()) syntax::UnknownExpression);
+ new (allocator()) syntax::UnknownExpression, E);
+ return true;
+ }
+
+ syntax::NestedNameSpecifier *
+ BuildNestedNameSpecifier(NestedNameSpecifierLoc QualifierLoc) {
+ if (!QualifierLoc)
+ return nullptr;
+ for (auto it = QualifierLoc; it; it = it.getPrefix()) {
+ auto *NS = new (allocator()) syntax::NameSpecifier;
+ Builder.foldNode(Builder.getRange(it.getLocalSourceRange()), NS, nullptr);
+ Builder.markChild(NS, syntax::NodeRole::NestedNameSpecifier_specifier);
+ }
+ auto *NNS = new (allocator()) syntax::NestedNameSpecifier;
+ Builder.foldNode(Builder.getRange(QualifierLoc.getSourceRange()), NNS,
+ nullptr);
+ return NNS;
+ }
+
+ bool TraverseUserDefinedLiteral(UserDefinedLiteral *S) {
+ // The semantic AST node `UserDefinedLiteral` (UDL) may have one child node
+ // referencing the location of the UDL suffix (`_w` in `1.2_w`). The
+ // UDL suffix location does not point to the beginning of a token, so we
+ // can't represent the UDL suffix as a separate syntax tree node.
+
+ return WalkUpFromUserDefinedLiteral(S);
+ }
+
+ syntax::UserDefinedLiteralExpression *
+ buildUserDefinedLiteral(UserDefinedLiteral *S) {
+ switch (S->getLiteralOperatorKind()) {
+ case clang::UserDefinedLiteral::LOK_Integer:
+ return new (allocator()) syntax::IntegerUserDefinedLiteralExpression;
+ case clang::UserDefinedLiteral::LOK_Floating:
+ return new (allocator()) syntax::FloatUserDefinedLiteralExpression;
+ case clang::UserDefinedLiteral::LOK_Character:
+ return new (allocator()) syntax::CharUserDefinedLiteralExpression;
+ case clang::UserDefinedLiteral::LOK_String:
+ return new (allocator()) syntax::StringUserDefinedLiteralExpression;
+ case clang::UserDefinedLiteral::LOK_Raw:
+ case clang::UserDefinedLiteral::LOK_Template:
+ // For raw literal operator and numeric literal operator template we
+ // cannot get the type of the operand in the semantic AST. We get this
+ // information from the token. As integer and floating point have the same
+ // token kind, we run `NumericLiteralParser` again to distinguish them.
+ auto TokLoc = S->getBeginLoc();
+ auto TokSpelling =
+ Builder.findToken(TokLoc)->text(Context.getSourceManager());
+ auto Literal =
+ NumericLiteralParser(TokSpelling, TokLoc, Context.getSourceManager(),
+ Context.getLangOpts(), Context.getTargetInfo(),
+ Context.getDiagnostics());
+ if (Literal.isIntegerLiteral())
+ return new (allocator()) syntax::IntegerUserDefinedLiteralExpression;
+ else {
+ assert(Literal.isFloatingLiteral());
+ return new (allocator()) syntax::FloatUserDefinedLiteralExpression;
+ }
+ }
+ llvm_unreachable("Unknown literal operator kind.");
+ }
+
+ bool WalkUpFromUserDefinedLiteral(UserDefinedLiteral *S) {
+ Builder.markChildToken(S->getBeginLoc(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S), buildUserDefinedLiteral(S), S);
+ return true;
+ }
+
+ bool WalkUpFromDeclRefExpr(DeclRefExpr *S) {
+ if (auto *NNS = BuildNestedNameSpecifier(S->getQualifierLoc()))
+ Builder.markChild(NNS, syntax::NodeRole::IdExpression_qualifier);
+
+ auto *unqualifiedId = new (allocator()) syntax::UnqualifiedId;
+ // Get `UnqualifiedId` from `DeclRefExpr`.
+ // FIXME: Extract this logic so that it can be used by `MemberExpr`,
+ // and other semantic constructs, now it is tied to `DeclRefExpr`.
+ if (!S->hasExplicitTemplateArgs()) {
+ Builder.foldNode(Builder.getRange(S->getNameInfo().getSourceRange()),
+ unqualifiedId, nullptr);
+ } else {
+ auto templateIdSourceRange =
+ SourceRange(S->getNameInfo().getBeginLoc(), S->getRAngleLoc());
+ Builder.foldNode(Builder.getRange(templateIdSourceRange), unqualifiedId,
+ nullptr);
+ }
+ Builder.markChild(unqualifiedId, syntax::NodeRole::IdExpression_id);
+
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::IdExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromParenExpr(ParenExpr *S) {
+ Builder.markChildToken(S->getLParen(), syntax::NodeRole::OpenParen);
+ Builder.markExprChild(S->getSubExpr(),
+ syntax::NodeRole::ParenExpression_subExpression);
+ Builder.markChildToken(S->getRParen(), syntax::NodeRole::CloseParen);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::ParenExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromIntegerLiteral(IntegerLiteral *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::IntegerLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromCharacterLiteral(CharacterLiteral *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::CharacterLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromFloatingLiteral(FloatingLiteral *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::FloatingLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromStringLiteral(StringLiteral *S) {
+ Builder.markChildToken(S->getBeginLoc(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::StringLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromCXXBoolLiteralExpr(CXXBoolLiteralExpr *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::BoolLiteralExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *S) {
+ Builder.markChildToken(S->getLocation(), syntax::NodeRole::LiteralToken);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::CxxNullPtrExpression, S);
+ return true;
+ }
+
+ bool WalkUpFromUnaryOperator(UnaryOperator *S) {
+ Builder.markChildToken(S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(S->getSubExpr(),
+ syntax::NodeRole::UnaryOperatorExpression_operand);
+
+ if (S->isPostfix())
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::PostfixUnaryOperatorExpression,
+ S);
+ else
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::PrefixUnaryOperatorExpression,
+ S);
+
+ return true;
+ }
+
+ bool WalkUpFromBinaryOperator(BinaryOperator *S) {
+ Builder.markExprChild(
+ S->getLHS(), syntax::NodeRole::BinaryOperatorExpression_leftHandSide);
+ Builder.markChildToken(S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(
+ S->getRHS(), syntax::NodeRole::BinaryOperatorExpression_rightHandSide);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::BinaryOperatorExpression, S);
return true;
}
+ bool TraverseCXXOperatorCallExpr(CXXOperatorCallExpr *S) {
+ if (getOperatorNodeKind(*S) ==
+ syntax::NodeKind::PostfixUnaryOperatorExpression) {
+ // A postfix unary operator is declared as taking two operands. The
+ // second operand is used to distinguish from its prefix counterpart. In
+ // the semantic AST this "phantom" operand is represented as a
+ // `IntegerLiteral` with invalid `SourceLocation`. We skip visiting this
+ // operand because it does not correspond to anything written in source
+ // code
+ for (auto *child : S->children()) {
+ if (child->getSourceRange().isInvalid())
+ continue;
+ if (!TraverseStmt(child))
+ return false;
+ }
+ return WalkUpFromCXXOperatorCallExpr(S);
+ } else
+ return RecursiveASTVisitor::TraverseCXXOperatorCallExpr(S);
+ }
+
+ bool WalkUpFromCXXOperatorCallExpr(CXXOperatorCallExpr *S) {
+ switch (getOperatorNodeKind(*S)) {
+ case syntax::NodeKind::BinaryOperatorExpression:
+ Builder.markExprChild(
+ S->getArg(0),
+ syntax::NodeRole::BinaryOperatorExpression_leftHandSide);
+ Builder.markChildToken(
+ S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(
+ S->getArg(1),
+ syntax::NodeRole::BinaryOperatorExpression_rightHandSide);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::BinaryOperatorExpression, S);
+ return true;
+ case syntax::NodeKind::PrefixUnaryOperatorExpression:
+ Builder.markChildToken(
+ S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(S->getArg(0),
+ syntax::NodeRole::UnaryOperatorExpression_operand);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::PrefixUnaryOperatorExpression,
+ S);
+ return true;
+ case syntax::NodeKind::PostfixUnaryOperatorExpression:
+ Builder.markChildToken(
+ S->getOperatorLoc(),
+ syntax::NodeRole::OperatorExpression_operatorToken);
+ Builder.markExprChild(S->getArg(0),
+ syntax::NodeRole::UnaryOperatorExpression_operand);
+ Builder.foldNode(Builder.getExprRange(S),
+ new (allocator()) syntax::PostfixUnaryOperatorExpression,
+ S);
+ return true;
+ case syntax::NodeKind::UnknownExpression:
+ return RecursiveASTVisitor::WalkUpFromCXXOperatorCallExpr(S);
+ default:
+ llvm_unreachable("getOperatorNodeKind() does not return this value");
+ }
+ }
+
bool WalkUpFromNamespaceDecl(NamespaceDecl *S) {
- auto Tokens = Builder.getRange(S);
+ auto Tokens = Builder.getDeclarationRange(S);
if (Tokens.front().kind() == tok::coloncolon) {
// Handle nested namespace definitions. Those start at '::' token, e.g.
// namespace a^::b {}
// FIXME: build corresponding nodes for the name of this namespace.
return true;
}
- Builder.foldNode(Tokens, new (allocator()) syntax::NamespaceDefinition);
+ Builder.foldNode(Tokens, new (allocator()) syntax::NamespaceDefinition, S);
+ return true;
+ }
+
+ bool TraverseParenTypeLoc(ParenTypeLoc L) {
+ // We reverse order of traversal to get the proper syntax structure.
+ if (!WalkUpFromParenTypeLoc(L))
+ return false;
+ return TraverseTypeLoc(L.getInnerLoc());
+ }
+
+ bool WalkUpFromParenTypeLoc(ParenTypeLoc L) {
+ Builder.markChildToken(L.getLParenLoc(), syntax::NodeRole::OpenParen);
+ Builder.markChildToken(L.getRParenLoc(), syntax::NodeRole::CloseParen);
+ Builder.foldNode(Builder.getRange(L.getLParenLoc(), L.getRParenLoc()),
+ new (allocator()) syntax::ParenDeclarator, L);
+ return true;
+ }
+
+ // Declarator chunks, they are produced by type locs and some clang::Decls.
+ bool WalkUpFromArrayTypeLoc(ArrayTypeLoc L) {
+ Builder.markChildToken(L.getLBracketLoc(), syntax::NodeRole::OpenParen);
+ Builder.markExprChild(L.getSizeExpr(),
+ syntax::NodeRole::ArraySubscript_sizeExpression);
+ Builder.markChildToken(L.getRBracketLoc(), syntax::NodeRole::CloseParen);
+ Builder.foldNode(Builder.getRange(L.getLBracketLoc(), L.getRBracketLoc()),
+ new (allocator()) syntax::ArraySubscript, L);
+ return true;
+ }
+
+ bool WalkUpFromFunctionTypeLoc(FunctionTypeLoc L) {
+ Builder.markChildToken(L.getLParenLoc(), syntax::NodeRole::OpenParen);
+ for (auto *P : L.getParams()) {
+ Builder.markChild(P, syntax::NodeRole::ParametersAndQualifiers_parameter);
+ }
+ Builder.markChildToken(L.getRParenLoc(), syntax::NodeRole::CloseParen);
+ Builder.foldNode(Builder.getRange(L.getLParenLoc(), L.getEndLoc()),
+ new (allocator()) syntax::ParametersAndQualifiers, L);
+ return true;
+ }
+
+ bool WalkUpFromFunctionProtoTypeLoc(FunctionProtoTypeLoc L) {
+ if (!L.getTypePtr()->hasTrailingReturn())
+ return WalkUpFromFunctionTypeLoc(L);
+
+ auto *TrailingReturnTokens = BuildTrailingReturn(L);
+ // Finish building the node for parameters.
+ Builder.markChild(TrailingReturnTokens,
+ syntax::NodeRole::ParametersAndQualifiers_trailingReturn);
+ return WalkUpFromFunctionTypeLoc(L);
+ }
+
+ bool WalkUpFromMemberPointerTypeLoc(MemberPointerTypeLoc L) {
+ auto SR = L.getLocalSourceRange();
+ Builder.foldNode(Builder.getRange(SR),
+ new (allocator()) syntax::MemberPointer, L);
return true;
}
@@ -436,13 +999,13 @@ public:
// and fold resulting nodes.
bool WalkUpFromDeclStmt(DeclStmt *S) {
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::DeclarationStatement);
+ new (allocator()) syntax::DeclarationStatement, S);
return true;
}
bool WalkUpFromNullStmt(NullStmt *S) {
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::EmptyStatement);
+ new (allocator()) syntax::EmptyStatement, S);
return true;
}
@@ -451,7 +1014,7 @@ public:
syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getBody(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::SwitchStatement);
+ new (allocator()) syntax::SwitchStatement, S);
return true;
}
@@ -461,7 +1024,7 @@ public:
Builder.markExprChild(S->getLHS(), syntax::NodeRole::CaseStatement_value);
Builder.markStmtChild(S->getSubStmt(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::CaseStatement);
+ new (allocator()) syntax::CaseStatement, S);
return true;
}
@@ -470,7 +1033,7 @@ public:
syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getSubStmt(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::DefaultStatement);
+ new (allocator()) syntax::DefaultStatement, S);
return true;
}
@@ -483,7 +1046,7 @@ public:
Builder.markStmtChild(S->getElse(),
syntax::NodeRole::IfStatement_elseStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::IfStatement);
+ new (allocator()) syntax::IfStatement, S);
return true;
}
@@ -491,7 +1054,7 @@ public:
Builder.markChildToken(S->getForLoc(), syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getBody(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::ForStatement);
+ new (allocator()) syntax::ForStatement, S);
return true;
}
@@ -500,7 +1063,7 @@ public:
syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getBody(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::WhileStatement);
+ new (allocator()) syntax::WhileStatement, S);
return true;
}
@@ -508,7 +1071,7 @@ public:
Builder.markChildToken(S->getContinueLoc(),
syntax::NodeRole::IntroducerKeyword);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::ContinueStatement);
+ new (allocator()) syntax::ContinueStatement, S);
return true;
}
@@ -516,7 +1079,7 @@ public:
Builder.markChildToken(S->getBreakLoc(),
syntax::NodeRole::IntroducerKeyword);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::BreakStatement);
+ new (allocator()) syntax::BreakStatement, S);
return true;
}
@@ -526,7 +1089,7 @@ public:
Builder.markExprChild(S->getRetValue(),
syntax::NodeRole::ReturnStatement_value);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::ReturnStatement);
+ new (allocator()) syntax::ReturnStatement, S);
return true;
}
@@ -534,13 +1097,13 @@ public:
Builder.markChildToken(S->getForLoc(), syntax::NodeRole::IntroducerKeyword);
Builder.markStmtChild(S->getBody(), syntax::NodeRole::BodyStatement);
Builder.foldNode(Builder.getStmtRange(S),
- new (allocator()) syntax::RangeBasedForStatement);
+ new (allocator()) syntax::RangeBasedForStatement, S);
return true;
}
bool WalkUpFromEmptyDecl(EmptyDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::EmptyDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::EmptyDeclaration, S);
return true;
}
@@ -549,76 +1112,175 @@ public:
syntax::NodeRole::StaticAssertDeclaration_condition);
Builder.markExprChild(S->getMessage(),
syntax::NodeRole::StaticAssertDeclaration_message);
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::StaticAssertDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::StaticAssertDeclaration, S);
return true;
}
bool WalkUpFromLinkageSpecDecl(LinkageSpecDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::LinkageSpecificationDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::LinkageSpecificationDeclaration,
+ S);
return true;
}
bool WalkUpFromNamespaceAliasDecl(NamespaceAliasDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::NamespaceAliasDefinition);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::NamespaceAliasDefinition, S);
return true;
}
bool WalkUpFromUsingDirectiveDecl(UsingDirectiveDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::UsingNamespaceDirective);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::UsingNamespaceDirective, S);
return true;
}
bool WalkUpFromUsingDecl(UsingDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::UsingDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::UsingDeclaration, S);
return true;
}
bool WalkUpFromUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::UsingDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::UsingDeclaration, S);
return true;
}
bool WalkUpFromUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::UsingDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::UsingDeclaration, S);
return true;
}
bool WalkUpFromTypeAliasDecl(TypeAliasDecl *S) {
- Builder.foldNode(Builder.getRange(S),
- new (allocator()) syntax::TypeAliasDeclaration);
+ Builder.foldNode(Builder.getDeclarationRange(S),
+ new (allocator()) syntax::TypeAliasDeclaration, S);
return true;
}
private:
+ template <class T> SourceLocation getQualifiedNameStart(T *D) {
+ static_assert((std::is_base_of<DeclaratorDecl, T>::value ||
+ std::is_base_of<TypedefNameDecl, T>::value),
+ "only DeclaratorDecl and TypedefNameDecl are supported.");
+
+ auto DN = D->getDeclName();
+ bool IsAnonymous = DN.isIdentifier() && !DN.getAsIdentifierInfo();
+ if (IsAnonymous)
+ return SourceLocation();
+
+ if (const auto *DD = llvm::dyn_cast<DeclaratorDecl>(D)) {
+ if (DD->getQualifierLoc()) {
+ return DD->getQualifierLoc().getBeginLoc();
+ }
+ }
+
+ return D->getLocation();
+ }
+
+ SourceRange getInitializerRange(Decl *D) {
+ if (auto *V = llvm::dyn_cast<VarDecl>(D)) {
+ auto *I = V->getInit();
+ // Initializers in range-based-for are not part of the declarator
+ if (I && !V->isCXXForRangeDecl())
+ return I->getSourceRange();
+ }
+
+ return SourceRange();
+ }
+
+ /// Folds SimpleDeclarator node (if present) and in case this is the last
+ /// declarator in the chain it also folds SimpleDeclaration node.
+ template <class T> bool processDeclaratorAndDeclaration(T *D) {
+ SourceRange Initializer = getInitializerRange(D);
+ auto Range = getDeclaratorRange(Builder.sourceManager(),
+ D->getTypeSourceInfo()->getTypeLoc(),
+ getQualifiedNameStart(D), Initializer);
+
+ // There doesn't have to be a declarator (e.g. `void foo(int)` only has
+ // declaration, but no declarator).
+ if (Range.getBegin().isValid()) {
+ auto *N = new (allocator()) syntax::SimpleDeclarator;
+ Builder.foldNode(Builder.getRange(Range), N, nullptr);
+ Builder.markChild(N, syntax::NodeRole::SimpleDeclaration_declarator);
+ }
+
+ if (Builder.isResponsibleForCreatingDeclaration(D)) {
+ Builder.foldNode(Builder.getDeclarationRange(D),
+ new (allocator()) syntax::SimpleDeclaration, D);
+ }
+ return true;
+ }
+
+ /// Returns the range of the built node.
+ syntax::TrailingReturnType *BuildTrailingReturn(FunctionProtoTypeLoc L) {
+ assert(L.getTypePtr()->hasTrailingReturn());
+
+ auto ReturnedType = L.getReturnLoc();
+ // Build node for the declarator, if any.
+ auto ReturnDeclaratorRange =
+ getDeclaratorRange(this->Builder.sourceManager(), ReturnedType,
+ /*Name=*/SourceLocation(),
+ /*Initializer=*/SourceLocation());
+ syntax::SimpleDeclarator *ReturnDeclarator = nullptr;
+ if (ReturnDeclaratorRange.isValid()) {
+ ReturnDeclarator = new (allocator()) syntax::SimpleDeclarator;
+ Builder.foldNode(Builder.getRange(ReturnDeclaratorRange),
+ ReturnDeclarator, nullptr);
+ }
+
+ // Build node for trailing return type.
+ auto Return = Builder.getRange(ReturnedType.getSourceRange());
+ const auto *Arrow = Return.begin() - 1;
+ assert(Arrow->kind() == tok::arrow);
+ auto Tokens = llvm::makeArrayRef(Arrow, Return.end());
+ Builder.markChildToken(Arrow, syntax::NodeRole::ArrowToken);
+ if (ReturnDeclarator)
+ Builder.markChild(ReturnDeclarator,
+ syntax::NodeRole::TrailingReturnType_declarator);
+ auto *R = new (allocator()) syntax::TrailingReturnType;
+ Builder.foldNode(Tokens, R, L);
+ return R;
+ }
+
+ void foldExplicitTemplateInstantiation(
+ ArrayRef<syntax::Token> Range, const syntax::Token *ExternKW,
+ const syntax::Token *TemplateKW,
+ syntax::SimpleDeclaration *InnerDeclaration, Decl *From) {
+ assert(!ExternKW || ExternKW->kind() == tok::kw_extern);
+ assert(TemplateKW && TemplateKW->kind() == tok::kw_template);
+ Builder.markChildToken(ExternKW, syntax::NodeRole::ExternKeyword);
+ Builder.markChildToken(TemplateKW, syntax::NodeRole::IntroducerKeyword);
+ Builder.markChild(
+ InnerDeclaration,
+ syntax::NodeRole::ExplicitTemplateInstantiation_declaration);
+ Builder.foldNode(
+ Range, new (allocator()) syntax::ExplicitTemplateInstantiation, From);
+ }
+
+ syntax::TemplateDeclaration *foldTemplateDeclaration(
+ ArrayRef<syntax::Token> Range, const syntax::Token *TemplateKW,
+ ArrayRef<syntax::Token> TemplatedDeclaration, Decl *From) {
+ assert(TemplateKW && TemplateKW->kind() == tok::kw_template);
+ Builder.markChildToken(TemplateKW, syntax::NodeRole::IntroducerKeyword);
+
+ auto *N = new (allocator()) syntax::TemplateDeclaration;
+ Builder.foldNode(Range, N, From);
+ Builder.markChild(N, syntax::NodeRole::TemplateDeclaration_declaration);
+ return N;
+ }
+
/// A small helper to save some typing.
llvm::BumpPtrAllocator &allocator() { return Builder.allocator(); }
syntax::TreeBuilder &Builder;
- const LangOptions &LangOpts;
+ const ASTContext &Context;
};
} // namespace
-void syntax::TreeBuilder::foldNode(llvm::ArrayRef<syntax::Token> Range,
- syntax::Tree *New) {
- Pending.foldChildren(Arena, Range, New);
-}
-
-void syntax::TreeBuilder::noticeDeclaratorRange(
- llvm::ArrayRef<syntax::Token> Range) {
- if (Pending.extendDelayedFold(Range))
- return;
- Pending.foldChildrenDelayed(Range,
- new (allocator()) syntax::SimpleDeclaration);
-}
-
-void syntax::TreeBuilder::noticeDeclaratorWithoutSemicolon(Decl *D) {
+void syntax::TreeBuilder::noticeDeclWithoutSemicolon(Decl *D) {
DeclsWithoutSemicolons.insert(D);
}
@@ -628,31 +1290,55 @@ void syntax::TreeBuilder::markChildToken(SourceLocation Loc, NodeRole Role) {
Pending.assignRole(*findToken(Loc), Role);
}
+void syntax::TreeBuilder::markChildToken(const syntax::Token *T, NodeRole R) {
+ if (!T)
+ return;
+ Pending.assignRole(*T, R);
+}
+
+void syntax::TreeBuilder::markChild(syntax::Node *N, NodeRole R) {
+ assert(N);
+ setRole(N, R);
+}
+
+void syntax::TreeBuilder::markChild(ASTPtr N, NodeRole R) {
+ auto *SN = Mapping.find(N);
+ assert(SN != nullptr);
+ setRole(SN, R);
+}
+
void syntax::TreeBuilder::markStmtChild(Stmt *Child, NodeRole Role) {
if (!Child)
return;
- auto Range = getStmtRange(Child);
- // This is an expression in a statement position, consume the trailing
- // semicolon and form an 'ExpressionStatement' node.
- if (auto *E = dyn_cast<Expr>(Child)) {
- Pending.assignRole(getExprRange(E),
- NodeRole::ExpressionStatement_expression);
- // (!) 'getRange(Stmt)' ensures this already covers a trailing semicolon.
- Pending.foldChildren(Arena, Range,
- new (allocator()) syntax::ExpressionStatement);
- }
- Pending.assignRole(Range, Role);
+ syntax::Tree *ChildNode;
+ if (Expr *ChildExpr = dyn_cast<Expr>(Child)) {
+ // This is an expression in a statement position, consume the trailing
+ // semicolon and form an 'ExpressionStatement' node.
+ markExprChild(ChildExpr, NodeRole::ExpressionStatement_expression);
+ ChildNode = new (allocator()) syntax::ExpressionStatement;
+ // (!) 'getStmtRange()' ensures this covers a trailing semicolon.
+ Pending.foldChildren(Arena, getStmtRange(Child), ChildNode);
+ } else {
+ ChildNode = Mapping.find(Child);
+ }
+ assert(ChildNode != nullptr);
+ setRole(ChildNode, Role);
}
void syntax::TreeBuilder::markExprChild(Expr *Child, NodeRole Role) {
if (!Child)
return;
+ Child = Child->IgnoreImplicit();
- Pending.assignRole(getExprRange(Child), Role);
+ syntax::Tree *ChildNode = Mapping.find(Child);
+ assert(ChildNode != nullptr);
+ setRole(ChildNode, Role);
}
const syntax::Token *syntax::TreeBuilder::findToken(SourceLocation L) const {
+ if (L.isInvalid())
+ return nullptr;
auto It = LocationToToken.find(L.getRawEncoding());
assert(It != LocationToToken.end());
return It->second;
diff --git a/clang/lib/Tooling/Syntax/Mutations.cpp b/clang/lib/Tooling/Syntax/Mutations.cpp
index 72458528202e..24048b297a11 100644
--- a/clang/lib/Tooling/Syntax/Mutations.cpp
+++ b/clang/lib/Tooling/Syntax/Mutations.cpp
@@ -35,7 +35,7 @@ public:
assert(!New->isDetached());
assert(Role != NodeRole::Detached);
- New->Role = static_cast<unsigned>(Role);
+ New->setRole(Role);
auto *P = Anchor->parent();
P->replaceChildRangeLowLevel(Anchor, Anchor, New);
diff --git a/clang/lib/Tooling/Syntax/Nodes.cpp b/clang/lib/Tooling/Syntax/Nodes.cpp
index 5b0c5107c134..2435ae0a91dd 100644
--- a/clang/lib/Tooling/Syntax/Nodes.cpp
+++ b/clang/lib/Tooling/Syntax/Nodes.cpp
@@ -18,6 +18,38 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeKind K) {
return OS << "TranslationUnit";
case NodeKind::UnknownExpression:
return OS << "UnknownExpression";
+ case NodeKind::ParenExpression:
+ return OS << "ParenExpression";
+ case NodeKind::IntegerLiteralExpression:
+ return OS << "IntegerLiteralExpression";
+ case NodeKind::CharacterLiteralExpression:
+ return OS << "CharacterLiteralExpression";
+ case NodeKind::FloatingLiteralExpression:
+ return OS << "FloatingLiteralExpression";
+ case NodeKind::StringLiteralExpression:
+ return OS << "StringLiteralExpression";
+ case NodeKind::BoolLiteralExpression:
+ return OS << "BoolLiteralExpression";
+ case NodeKind::CxxNullPtrExpression:
+ return OS << "CxxNullPtrExpression";
+ case NodeKind::IntegerUserDefinedLiteralExpression:
+ return OS << "IntegerUserDefinedLiteralExpression";
+ case NodeKind::FloatUserDefinedLiteralExpression:
+ return OS << "FloatUserDefinedLiteralExpression";
+ case NodeKind::CharUserDefinedLiteralExpression:
+ return OS << "CharUserDefinedLiteralExpression";
+ case NodeKind::StringUserDefinedLiteralExpression:
+ return OS << "StringUserDefinedLiteralExpression";
+ case NodeKind::PrefixUnaryOperatorExpression:
+ return OS << "PrefixUnaryOperatorExpression";
+ case NodeKind::PostfixUnaryOperatorExpression:
+ return OS << "PostfixUnaryOperatorExpression";
+ case NodeKind::BinaryOperatorExpression:
+ return OS << "BinaryOperatorExpression";
+ case NodeKind::UnqualifiedId:
+ return OS << "UnqualifiedId";
+ case NodeKind::IdExpression:
+ return OS << "IdExpression";
case NodeKind::UnknownStatement:
return OS << "UnknownStatement";
case NodeKind::DeclarationStatement:
@@ -58,6 +90,10 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeKind K) {
return OS << "LinkageSpecificationDeclaration";
case NodeKind::SimpleDeclaration:
return OS << "SimpleDeclaration";
+ case NodeKind::TemplateDeclaration:
+ return OS << "TemplateDeclaration";
+ case NodeKind::ExplicitTemplateInstantiation:
+ return OS << "ExplicitTemplateInstantiation";
case NodeKind::NamespaceDefinition:
return OS << "NamespaceDefinition";
case NodeKind::NamespaceAliasDefinition:
@@ -68,6 +104,22 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeKind K) {
return OS << "UsingDeclaration";
case NodeKind::TypeAliasDeclaration:
return OS << "TypeAliasDeclaration";
+ case NodeKind::SimpleDeclarator:
+ return OS << "SimpleDeclarator";
+ case NodeKind::ParenDeclarator:
+ return OS << "ParenDeclarator";
+ case NodeKind::ArraySubscript:
+ return OS << "ArraySubscript";
+ case NodeKind::TrailingReturnType:
+ return OS << "TrailingReturnType";
+ case NodeKind::ParametersAndQualifiers:
+ return OS << "ParametersAndQualifiers";
+ case NodeKind::MemberPointer:
+ return OS << "MemberPointer";
+ case NodeKind::NameSpecifier:
+ return OS << "NameSpecifier";
+ case NodeKind::NestedNameSpecifier:
+ return OS << "NestedNameSpecifier";
}
llvm_unreachable("unknown node kind");
}
@@ -84,6 +136,12 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeRole R) {
return OS << "CloseParen";
case syntax::NodeRole::IntroducerKeyword:
return OS << "IntroducerKeyword";
+ case syntax::NodeRole::LiteralToken:
+ return OS << "LiteralToken";
+ case syntax::NodeRole::ArrowToken:
+ return OS << "ArrowToken";
+ case syntax::NodeRole::ExternKeyword:
+ return OS << "ExternKeyword";
case syntax::NodeRole::BodyStatement:
return OS << "BodyStatement";
case syntax::NodeRole::CaseStatement_value:
@@ -94,6 +152,14 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeRole R) {
return OS << "IfStatement_elseKeyword";
case syntax::NodeRole::IfStatement_elseStatement:
return OS << "IfStatement_elseStatement";
+ case syntax::NodeRole::OperatorExpression_operatorToken:
+ return OS << "OperatorExpression_operatorToken";
+ case syntax::NodeRole::UnaryOperatorExpression_operand:
+ return OS << "UnaryOperatorExpression_operand";
+ case syntax::NodeRole::BinaryOperatorExpression_leftHandSide:
+ return OS << "BinaryOperatorExpression_leftHandSide";
+ case syntax::NodeRole::BinaryOperatorExpression_rightHandSide:
+ return OS << "BinaryOperatorExpression_rightHandSide";
case syntax::NodeRole::ReturnStatement_value:
return OS << "ReturnStatement_value";
case syntax::NodeRole::ExpressionStatement_expression:
@@ -104,10 +170,126 @@ llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, NodeRole R) {
return OS << "StaticAssertDeclaration_condition";
case syntax::NodeRole::StaticAssertDeclaration_message:
return OS << "StaticAssertDeclaration_message";
+ case syntax::NodeRole::SimpleDeclaration_declarator:
+ return OS << "SimpleDeclaration_declarator";
+ case syntax::NodeRole::TemplateDeclaration_declaration:
+ return OS << "TemplateDeclaration_declaration";
+ case syntax::NodeRole::ExplicitTemplateInstantiation_declaration:
+ return OS << "ExplicitTemplateInstantiation_declaration";
+ case syntax::NodeRole::ArraySubscript_sizeExpression:
+ return OS << "ArraySubscript_sizeExpression";
+ case syntax::NodeRole::TrailingReturnType_declarator:
+ return OS << "TrailingReturnType_declarator";
+ case syntax::NodeRole::ParametersAndQualifiers_parameter:
+ return OS << "ParametersAndQualifiers_parameter";
+ case syntax::NodeRole::ParametersAndQualifiers_trailingReturn:
+ return OS << "ParametersAndQualifiers_trailingReturn";
+ case syntax::NodeRole::IdExpression_id:
+ return OS << "IdExpression_id";
+ case syntax::NodeRole::IdExpression_qualifier:
+ return OS << "IdExpression_qualifier";
+ case syntax::NodeRole::NestedNameSpecifier_specifier:
+ return OS << "NestedNameSpecifier_specifier";
+ case syntax::NodeRole::ParenExpression_subExpression:
+ return OS << "ParenExpression_subExpression";
}
llvm_unreachable("invalid role");
}
+std::vector<syntax::NameSpecifier *> syntax::NestedNameSpecifier::specifiers() {
+ std::vector<syntax::NameSpecifier *> Children;
+ for (auto *C = firstChild(); C; C = C->nextSibling()) {
+ assert(C->role() == syntax::NodeRole::NestedNameSpecifier_specifier);
+ Children.push_back(llvm::cast<syntax::NameSpecifier>(C));
+ }
+ return Children;
+}
+
+syntax::NestedNameSpecifier *syntax::IdExpression::qualifier() {
+ return llvm::cast_or_null<syntax::NestedNameSpecifier>(
+ findChild(syntax::NodeRole::IdExpression_qualifier));
+}
+
+syntax::UnqualifiedId *syntax::IdExpression::unqualifiedId() {
+ return llvm::cast_or_null<syntax::UnqualifiedId>(
+ findChild(syntax::NodeRole::IdExpression_id));
+}
+
+syntax::Leaf *syntax::ParenExpression::openParen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OpenParen));
+}
+
+syntax::Expression *syntax::ParenExpression::subExpression() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::ParenExpression_subExpression));
+}
+
+syntax::Leaf *syntax::ParenExpression::closeParen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::CloseParen));
+}
+
+syntax::Leaf *syntax::IntegerLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::CharacterLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::FloatingLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::StringLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::BoolLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::CxxNullPtrExpression::nullPtrKeyword() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Leaf *syntax::UserDefinedLiteralExpression::literalToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::LiteralToken));
+}
+
+syntax::Expression *syntax::BinaryOperatorExpression::lhs() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::BinaryOperatorExpression_leftHandSide));
+}
+
+syntax::Leaf *syntax::UnaryOperatorExpression::operatorToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OperatorExpression_operatorToken));
+}
+
+syntax::Expression *syntax::UnaryOperatorExpression::operand() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::UnaryOperatorExpression_operand));
+}
+
+syntax::Leaf *syntax::BinaryOperatorExpression::operatorToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OperatorExpression_operatorToken));
+}
+
+syntax::Expression *syntax::BinaryOperatorExpression::rhs() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::BinaryOperatorExpression_rightHandSide));
+}
+
syntax::Leaf *syntax::SwitchStatement::switchKeyword() {
return llvm::cast_or_null<syntax::Leaf>(
findChild(syntax::NodeRole::IntroducerKeyword));
@@ -226,8 +408,8 @@ syntax::Leaf *syntax::CompoundStatement::lbrace() {
std::vector<syntax::Statement *> syntax::CompoundStatement::statements() {
std::vector<syntax::Statement *> Children;
for (auto *C = firstChild(); C; C = C->nextSibling()) {
- if (C->role() == syntax::NodeRole::CompoundStatement_statement)
- Children.push_back(llvm::cast<syntax::Statement>(C));
+ assert(C->role() == syntax::NodeRole::CompoundStatement_statement);
+ Children.push_back(llvm::cast<syntax::Statement>(C));
}
return Children;
}
@@ -246,3 +428,98 @@ syntax::Expression *syntax::StaticAssertDeclaration::message() {
return llvm::cast_or_null<syntax::Expression>(
findChild(syntax::NodeRole::StaticAssertDeclaration_message));
}
+
+std::vector<syntax::SimpleDeclarator *>
+syntax::SimpleDeclaration::declarators() {
+ std::vector<syntax::SimpleDeclarator *> Children;
+ for (auto *C = firstChild(); C; C = C->nextSibling()) {
+ if (C->role() == syntax::NodeRole::SimpleDeclaration_declarator)
+ Children.push_back(llvm::cast<syntax::SimpleDeclarator>(C));
+ }
+ return Children;
+}
+
+syntax::Leaf *syntax::TemplateDeclaration::templateKeyword() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::IntroducerKeyword));
+}
+
+syntax::Declaration *syntax::TemplateDeclaration::declaration() {
+ return llvm::cast_or_null<syntax::Declaration>(
+ findChild(syntax::NodeRole::TemplateDeclaration_declaration));
+}
+
+syntax::Leaf *syntax::ExplicitTemplateInstantiation::templateKeyword() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::IntroducerKeyword));
+}
+
+syntax::Leaf *syntax::ExplicitTemplateInstantiation::externKeyword() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::ExternKeyword));
+}
+
+syntax::Declaration *syntax::ExplicitTemplateInstantiation::declaration() {
+ return llvm::cast_or_null<syntax::Declaration>(
+ findChild(syntax::NodeRole::ExplicitTemplateInstantiation_declaration));
+}
+
+syntax::Leaf *syntax::ParenDeclarator::lparen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OpenParen));
+}
+
+syntax::Leaf *syntax::ParenDeclarator::rparen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::CloseParen));
+}
+
+syntax::Leaf *syntax::ArraySubscript::lbracket() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OpenParen));
+}
+
+syntax::Expression *syntax::ArraySubscript::sizeExpression() {
+ return llvm::cast_or_null<syntax::Expression>(
+ findChild(syntax::NodeRole::ArraySubscript_sizeExpression));
+}
+
+syntax::Leaf *syntax::ArraySubscript::rbracket() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::CloseParen));
+}
+
+syntax::Leaf *syntax::TrailingReturnType::arrowToken() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::ArrowToken));
+}
+
+syntax::SimpleDeclarator *syntax::TrailingReturnType::declarator() {
+ return llvm::cast_or_null<syntax::SimpleDeclarator>(
+ findChild(syntax::NodeRole::TrailingReturnType_declarator));
+}
+
+syntax::Leaf *syntax::ParametersAndQualifiers::lparen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::OpenParen));
+}
+
+std::vector<syntax::SimpleDeclaration *>
+syntax::ParametersAndQualifiers::parameters() {
+ std::vector<syntax::SimpleDeclaration *> Children;
+ for (auto *C = firstChild(); C; C = C->nextSibling()) {
+ if (C->role() == syntax::NodeRole::ParametersAndQualifiers_parameter)
+ Children.push_back(llvm::cast<syntax::SimpleDeclaration>(C));
+ }
+ return Children;
+}
+
+syntax::Leaf *syntax::ParametersAndQualifiers::rparen() {
+ return llvm::cast_or_null<syntax::Leaf>(
+ findChild(syntax::NodeRole::CloseParen));
+}
+
+syntax::TrailingReturnType *syntax::ParametersAndQualifiers::trailingReturn() {
+ return llvm::cast_or_null<syntax::TrailingReturnType>(
+ findChild(syntax::NodeRole::ParametersAndQualifiers_trailingReturn));
+}
diff --git a/clang/lib/Tooling/Syntax/Tokens.cpp b/clang/lib/Tooling/Syntax/Tokens.cpp
index 3df1c064923a..c6b904822b8b 100644
--- a/clang/lib/Tooling/Syntax/Tokens.cpp
+++ b/clang/lib/Tooling/Syntax/Tokens.cpp
@@ -35,6 +35,69 @@
using namespace clang;
using namespace clang::syntax;
+namespace {
+// Finds the smallest consecutive subsuquence of Toks that covers R.
+llvm::ArrayRef<syntax::Token>
+getTokensCovering(llvm::ArrayRef<syntax::Token> Toks, SourceRange R,
+ const SourceManager &SM) {
+ if (R.isInvalid())
+ return {};
+ const syntax::Token *Begin =
+ llvm::partition_point(Toks, [&](const syntax::Token &T) {
+ return SM.isBeforeInTranslationUnit(T.location(), R.getBegin());
+ });
+ const syntax::Token *End =
+ llvm::partition_point(Toks, [&](const syntax::Token &T) {
+ return !SM.isBeforeInTranslationUnit(R.getEnd(), T.location());
+ });
+ if (Begin > End)
+ return {};
+ return {Begin, End};
+}
+
+// Finds the smallest expansion range that contains expanded tokens First and
+// Last, e.g.:
+// #define ID(x) x
+// ID(ID(ID(a1) a2))
+// ~~ -> a1
+// ~~ -> a2
+// ~~~~~~~~~ -> a1 a2
+SourceRange findCommonRangeForMacroArgs(const syntax::Token &First,
+ const syntax::Token &Last,
+ const SourceManager &SM) {
+ SourceRange Res;
+ auto FirstLoc = First.location(), LastLoc = Last.location();
+ // Keep traversing up the spelling chain as longs as tokens are part of the
+ // same expansion.
+ while (!FirstLoc.isFileID() && !LastLoc.isFileID()) {
+ auto ExpInfoFirst = SM.getSLocEntry(SM.getFileID(FirstLoc)).getExpansion();
+ auto ExpInfoLast = SM.getSLocEntry(SM.getFileID(LastLoc)).getExpansion();
+ // Stop if expansions have diverged.
+ if (ExpInfoFirst.getExpansionLocStart() !=
+ ExpInfoLast.getExpansionLocStart())
+ break;
+ // Do not continue into macro bodies.
+ if (!ExpInfoFirst.isMacroArgExpansion() ||
+ !ExpInfoLast.isMacroArgExpansion())
+ break;
+ FirstLoc = SM.getImmediateSpellingLoc(FirstLoc);
+ LastLoc = SM.getImmediateSpellingLoc(LastLoc);
+ // Update the result afterwards, as we want the tokens that triggered the
+ // expansion.
+ Res = {FirstLoc, LastLoc};
+ }
+ // Normally mapping back to expansion location here only changes FileID, as
+ // we've already found some tokens expanded from the same macro argument, and
+ // they should map to a consecutive subset of spelled tokens. Unfortunately
+ // SourceManager::isBeforeInTranslationUnit discriminates sourcelocations
+ // based on their FileID in addition to offsets. So even though we are
+ // referring to same tokens, SourceManager might tell us that one is before
+ // the other if they've got different FileIDs.
+ return SM.getExpansionRange(CharSourceRange(Res, true)).getAsRange();
+}
+
+} // namespace
+
syntax::Token::Token(SourceLocation Location, unsigned Length,
tok::TokenKind Kind)
: Location(Location), Length(Length), Kind(Kind) {
@@ -67,7 +130,8 @@ FileRange syntax::Token::range(const SourceManager &SM,
auto F = First.range(SM);
auto L = Last.range(SM);
assert(F.file() == L.file() && "tokens from different files");
- assert((F == L || F.endOffset() <= L.beginOffset()) && "wrong order of tokens");
+ assert((F == L || F.endOffset() <= L.beginOffset()) &&
+ "wrong order of tokens");
return FileRange(F.file(), F.beginOffset(), L.endOffset());
}
@@ -120,19 +184,7 @@ llvm::StringRef FileRange::text(const SourceManager &SM) const {
}
llvm::ArrayRef<syntax::Token> TokenBuffer::expandedTokens(SourceRange R) const {
- if (R.isInvalid())
- return {};
- const Token *Begin =
- llvm::partition_point(expandedTokens(), [&](const syntax::Token &T) {
- return SourceMgr->isBeforeInTranslationUnit(T.location(), R.getBegin());
- });
- const Token *End =
- llvm::partition_point(expandedTokens(), [&](const syntax::Token &T) {
- return !SourceMgr->isBeforeInTranslationUnit(R.getEnd(), T.location());
- });
- if (Begin > End)
- return {};
- return {Begin, End};
+ return getTokensCovering(expandedTokens(), R, *SourceMgr);
}
CharSourceRange FileRange::toCharRange(const SourceManager &SM) const {
@@ -161,19 +213,109 @@ TokenBuffer::spelledForExpandedToken(const syntax::Token *Expanded) const {
// Our token could only be produced by the previous mapping.
if (It == File.Mappings.begin()) {
// No previous mapping, no need to modify offsets.
- return {&File.SpelledTokens[ExpandedIndex - File.BeginExpanded], nullptr};
+ return {&File.SpelledTokens[ExpandedIndex - File.BeginExpanded],
+ /*Mapping=*/nullptr};
}
--It; // 'It' now points to last mapping that started before our token.
// Check if the token is part of the mapping.
if (ExpandedIndex < It->EndExpanded)
- return {&File.SpelledTokens[It->BeginSpelled], /*Mapping*/ &*It};
+ return {&File.SpelledTokens[It->BeginSpelled], /*Mapping=*/&*It};
// Not part of the mapping, use the index from previous mapping to compute the
// corresponding spelled token.
return {
&File.SpelledTokens[It->EndSpelled + (ExpandedIndex - It->EndExpanded)],
- /*Mapping*/ nullptr};
+ /*Mapping=*/nullptr};
+}
+
+const TokenBuffer::Mapping *
+TokenBuffer::mappingStartingBeforeSpelled(const MarkedFile &F,
+ const syntax::Token *Spelled) {
+ assert(F.SpelledTokens.data() <= Spelled);
+ unsigned SpelledI = Spelled - F.SpelledTokens.data();
+ assert(SpelledI < F.SpelledTokens.size());
+
+ auto It = llvm::partition_point(F.Mappings, [SpelledI](const Mapping &M) {
+ return M.BeginSpelled <= SpelledI;
+ });
+ if (It == F.Mappings.begin())
+ return nullptr;
+ --It;
+ return &*It;
+}
+
+llvm::SmallVector<llvm::ArrayRef<syntax::Token>, 1>
+TokenBuffer::expandedForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const {
+ if (Spelled.empty())
+ return {};
+ assert(Spelled.front().location().isFileID());
+
+ auto FID = sourceManager().getFileID(Spelled.front().location());
+ auto It = Files.find(FID);
+ assert(It != Files.end());
+
+ const MarkedFile &File = It->second;
+ // `Spelled` must be a subrange of `File.SpelledTokens`.
+ assert(File.SpelledTokens.data() <= Spelled.data());
+ assert(&Spelled.back() <=
+ File.SpelledTokens.data() + File.SpelledTokens.size());
+#ifndef NDEBUG
+ auto T1 = Spelled.back().location();
+ auto T2 = File.SpelledTokens.back().location();
+ assert(T1 == T2 || sourceManager().isBeforeInTranslationUnit(T1, T2));
+#endif
+
+ auto *FrontMapping = mappingStartingBeforeSpelled(File, &Spelled.front());
+ unsigned SpelledFrontI = &Spelled.front() - File.SpelledTokens.data();
+ assert(SpelledFrontI < File.SpelledTokens.size());
+ unsigned ExpandedBegin;
+ if (!FrontMapping) {
+ // No mapping that starts before the first token of Spelled, we don't have
+ // to modify offsets.
+ ExpandedBegin = File.BeginExpanded + SpelledFrontI;
+ } else if (SpelledFrontI < FrontMapping->EndSpelled) {
+ // This mapping applies to Spelled tokens.
+ if (SpelledFrontI != FrontMapping->BeginSpelled) {
+ // Spelled tokens don't cover the entire mapping, returning empty result.
+ return {}; // FIXME: support macro arguments.
+ }
+ // Spelled tokens start at the beginning of this mapping.
+ ExpandedBegin = FrontMapping->BeginExpanded;
+ } else {
+ // Spelled tokens start after the mapping ends (they start in the hole
+ // between 2 mappings, or between a mapping and end of the file).
+ ExpandedBegin =
+ FrontMapping->EndExpanded + (SpelledFrontI - FrontMapping->EndSpelled);
+ }
+
+ auto *BackMapping = mappingStartingBeforeSpelled(File, &Spelled.back());
+ unsigned SpelledBackI = &Spelled.back() - File.SpelledTokens.data();
+ unsigned ExpandedEnd;
+ if (!BackMapping) {
+ // No mapping that starts before the last token of Spelled, we don't have to
+ // modify offsets.
+ ExpandedEnd = File.BeginExpanded + SpelledBackI + 1;
+ } else if (SpelledBackI < BackMapping->EndSpelled) {
+ // This mapping applies to Spelled tokens.
+ if (SpelledBackI + 1 != BackMapping->EndSpelled) {
+ // Spelled tokens don't cover the entire mapping, returning empty result.
+ return {}; // FIXME: support macro arguments.
+ }
+ ExpandedEnd = BackMapping->EndExpanded;
+ } else {
+ // Spelled tokens end after the mapping ends.
+ ExpandedEnd =
+ BackMapping->EndExpanded + (SpelledBackI - BackMapping->EndSpelled) + 1;
+ }
+
+ assert(ExpandedBegin < ExpandedTokens.size());
+ assert(ExpandedEnd < ExpandedTokens.size());
+ // Avoid returning empty ranges.
+ if (ExpandedBegin == ExpandedEnd)
+ return {};
+ return {llvm::makeArrayRef(ExpandedTokens.data() + ExpandedBegin,
+ ExpandedTokens.data() + ExpandedEnd)};
}
llvm::ArrayRef<syntax::Token> TokenBuffer::spelledTokens(FileID FID) const {
@@ -182,9 +324,20 @@ llvm::ArrayRef<syntax::Token> TokenBuffer::spelledTokens(FileID FID) const {
return It->second.SpelledTokens;
}
+const syntax::Token *TokenBuffer::spelledTokenAt(SourceLocation Loc) const {
+ assert(Loc.isFileID());
+ const auto *Tok = llvm::partition_point(
+ spelledTokens(SourceMgr->getFileID(Loc)),
+ [&](const syntax::Token &Tok) { return Tok.location() < Loc; });
+ if (!Tok || Tok->location() != Loc)
+ return nullptr;
+ return Tok;
+}
+
std::string TokenBuffer::Mapping::str() const {
- return llvm::formatv("spelled tokens: [{0},{1}), expanded tokens: [{2},{3})",
- BeginSpelled, EndSpelled, BeginExpanded, EndExpanded);
+ return std::string(
+ llvm::formatv("spelled tokens: [{0},{1}), expanded tokens: [{2},{3})",
+ BeginSpelled, EndSpelled, BeginExpanded, EndExpanded));
}
llvm::Optional<llvm::ArrayRef<syntax::Token>>
@@ -194,8 +347,6 @@ TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const {
if (Expanded.empty())
return llvm::None;
- // FIXME: also allow changes uniquely mapping to macro arguments.
-
const syntax::Token *BeginSpelled;
const Mapping *BeginMapping;
std::tie(BeginSpelled, BeginMapping) =
@@ -213,12 +364,28 @@ TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const {
const MarkedFile &File = Files.find(FID)->second;
- // Do not allow changes that cross macro expansion boundaries.
+ // If both tokens are coming from a macro argument expansion, try and map to
+ // smallest part of the macro argument. BeginMapping && LastMapping check is
+ // only for performance, they are a prerequisite for Expanded.front() and
+ // Expanded.back() being part of a macro arg expansion.
+ if (BeginMapping && LastMapping &&
+ SourceMgr->isMacroArgExpansion(Expanded.front().location()) &&
+ SourceMgr->isMacroArgExpansion(Expanded.back().location())) {
+ auto CommonRange = findCommonRangeForMacroArgs(Expanded.front(),
+ Expanded.back(), *SourceMgr);
+ // It might be the case that tokens are arguments of different macro calls,
+ // in that case we should continue with the logic below instead of returning
+ // an empty range.
+ if (CommonRange.isValid())
+ return getTokensCovering(File.SpelledTokens, CommonRange, *SourceMgr);
+ }
+
+ // Do not allow changes that doesn't cover full expansion.
unsigned BeginExpanded = Expanded.begin() - ExpandedTokens.data();
unsigned EndExpanded = Expanded.end() - ExpandedTokens.data();
- if (BeginMapping && BeginMapping->BeginExpanded < BeginExpanded)
+ if (BeginMapping && BeginExpanded != BeginMapping->BeginExpanded)
return llvm::None;
- if (LastMapping && EndExpanded < LastMapping->EndExpanded)
+ if (LastMapping && LastMapping->EndExpanded != EndExpanded)
return llvm::None;
// All is good, return the result.
return llvm::makeArrayRef(
@@ -253,24 +420,30 @@ TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const {
ExpandedTokens.data() + M->EndExpanded);
return E;
}
-
llvm::ArrayRef<syntax::Token>
syntax::spelledTokensTouching(SourceLocation Loc,
- const syntax::TokenBuffer &Tokens) {
+ llvm::ArrayRef<syntax::Token> Tokens) {
assert(Loc.isFileID());
- llvm::ArrayRef<syntax::Token> All =
- Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc));
+
auto *Right = llvm::partition_point(
- All, [&](const syntax::Token &Tok) { return Tok.location() < Loc; });
- bool AcceptRight = Right != All.end() && Right->location() <= Loc;
- bool AcceptLeft = Right != All.begin() && (Right - 1)->endLocation() >= Loc;
+ Tokens, [&](const syntax::Token &Tok) { return Tok.location() < Loc; });
+ bool AcceptRight = Right != Tokens.end() && Right->location() <= Loc;
+ bool AcceptLeft =
+ Right != Tokens.begin() && (Right - 1)->endLocation() >= Loc;
return llvm::makeArrayRef(Right - (AcceptLeft ? 1 : 0),
Right + (AcceptRight ? 1 : 0));
}
+llvm::ArrayRef<syntax::Token>
+syntax::spelledTokensTouching(SourceLocation Loc,
+ const syntax::TokenBuffer &Tokens) {
+ return spelledTokensTouching(
+ Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc)));
+}
+
const syntax::Token *
syntax::spelledIdentifierTouching(SourceLocation Loc,
- const syntax::TokenBuffer &Tokens) {
+ llvm::ArrayRef<syntax::Token> Tokens) {
for (const syntax::Token &Tok : spelledTokensTouching(Loc, Tokens)) {
if (Tok.kind() == tok::identifier)
return &Tok;
@@ -278,6 +451,13 @@ syntax::spelledIdentifierTouching(SourceLocation Loc,
return nullptr;
}
+const syntax::Token *
+syntax::spelledIdentifierTouching(SourceLocation Loc,
+ const syntax::TokenBuffer &Tokens) {
+ return spelledIdentifierTouching(
+ Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc)));
+}
+
std::vector<const syntax::Token *>
TokenBuffer::macroExpansions(FileID FID) const {
auto FileIt = Files.find(FID);
@@ -293,7 +473,8 @@ TokenBuffer::macroExpansions(FileID FID) const {
return Expansions;
}
-std::vector<syntax::Token> syntax::tokenize(FileID FID, const SourceManager &SM,
+std::vector<syntax::Token> syntax::tokenize(const FileRange &FR,
+ const SourceManager &SM,
const LangOptions &LO) {
std::vector<syntax::Token> Tokens;
IdentifierTable Identifiers(LO);
@@ -308,18 +489,28 @@ std::vector<syntax::Token> syntax::tokenize(FileID FID, const SourceManager &SM,
Tokens.push_back(syntax::Token(T));
};
- Lexer L(FID, SM.getBuffer(FID), SM, LO);
+ auto SrcBuffer = SM.getBufferData(FR.file());
+ Lexer L(SM.getLocForStartOfFile(FR.file()), LO, SrcBuffer.data(),
+ SrcBuffer.data() + FR.beginOffset(),
+ // We can't make BufEnd point to FR.endOffset, as Lexer requires a
+ // null terminated buffer.
+ SrcBuffer.data() + SrcBuffer.size());
clang::Token T;
- while (!L.LexFromRawLexer(T))
+ while (!L.LexFromRawLexer(T) && L.getCurrentBufferOffset() < FR.endOffset())
AddToken(T);
- // 'eof' is only the last token if the input is null-terminated. Never store
- // it, for consistency.
- if (T.getKind() != tok::eof)
+ // LexFromRawLexer returns true when it parses the last token of the file, add
+ // it iff it starts within the range we are interested in.
+ if (SM.getFileOffset(T.getLocation()) < FR.endOffset())
AddToken(T);
return Tokens;
}
+std::vector<syntax::Token> syntax::tokenize(FileID FID, const SourceManager &SM,
+ const LangOptions &LO) {
+ return tokenize(syntax::FileRange(FID, 0, SM.getFileIDSize(FID)), SM, LO);
+}
+
/// Records information reqired to construct mappings for the token buffer that
/// we are collecting.
class TokenCollector::CollectPPExpansions : public PPCallbacks {
@@ -335,14 +526,38 @@ public:
SourceRange Range, const MacroArgs *Args) override {
if (!Collector)
return;
- // Only record top-level expansions, not those where:
+ const auto &SM = Collector->PP.getSourceManager();
+ // Only record top-level expansions that directly produce expanded tokens.
+ // This excludes those where:
// - the macro use is inside a macro body,
// - the macro appears in an argument to another macro.
- if (!MacroNameTok.getLocation().isFileID() ||
- (LastExpansionEnd.isValid() &&
- Collector->PP.getSourceManager().isBeforeInTranslationUnit(
- Range.getBegin(), LastExpansionEnd)))
+ // However macro expansion isn't really a tree, it's token rewrite rules,
+ // so there are other cases, e.g.
+ // #define B(X) X
+ // #define A 1 + B
+ // A(2)
+ // Both A and B produce expanded tokens, though the macro name 'B' comes
+ // from an expansion. The best we can do is merge the mappings for both.
+
+ // The *last* token of any top-level macro expansion must be in a file.
+ // (In the example above, see the closing paren of the expansion of B).
+ if (!Range.getEnd().isFileID())
+ return;
+ // If there's a current expansion that encloses this one, this one can't be
+ // top-level.
+ if (LastExpansionEnd.isValid() &&
+ !SM.isBeforeInTranslationUnit(LastExpansionEnd, Range.getEnd()))
return;
+
+ // If the macro invocation (B) starts in a macro (A) but ends in a file,
+ // we'll create a merged mapping for A + B by overwriting the endpoint for
+ // A's startpoint.
+ if (!Range.getBegin().isFileID()) {
+ Range.setBegin(SM.getExpansionLoc(Range.getBegin()));
+ assert(Collector->Expansions.count(Range.getBegin().getRawEncoding()) &&
+ "Overlapping macros should have same expansion location");
+ }
+
Collector->Expansions[Range.getBegin().getRawEncoding()] = Range.getEnd();
LastExpansionEnd = Range.getEnd();
}
@@ -399,197 +614,180 @@ public:
}
TokenBuffer build() && {
- buildSpelledTokens();
-
- // Walk over expanded tokens and spelled tokens in parallel, building the
- // mappings between those using source locations.
- // To correctly recover empty macro expansions, we also take locations
- // reported to PPCallbacks::MacroExpands into account as we do not have any
- // expanded tokens with source locations to guide us.
-
- // The 'eof' token is special, it is not part of spelled token stream. We
- // handle it separately at the end.
assert(!Result.ExpandedTokens.empty());
assert(Result.ExpandedTokens.back().kind() == tok::eof);
- for (unsigned I = 0; I < Result.ExpandedTokens.size() - 1; ++I) {
- // (!) I might be updated by the following call.
- processExpandedToken(I);
- }
- // 'eof' not handled in the loop, do it here.
- assert(SM.getMainFileID() ==
- SM.getFileID(Result.ExpandedTokens.back().location()));
- fillGapUntil(Result.Files[SM.getMainFileID()],
- Result.ExpandedTokens.back().location(),
- Result.ExpandedTokens.size() - 1);
- Result.Files[SM.getMainFileID()].EndExpanded = Result.ExpandedTokens.size();
+ // Tokenize every file that contributed tokens to the expanded stream.
+ buildSpelledTokens();
- // Some files might have unaccounted spelled tokens at the end, add an empty
- // mapping for those as they did not have expanded counterparts.
- fillGapsAtEndOfFiles();
+ // The expanded token stream consists of runs of tokens that came from
+ // the same source (a macro expansion, part of a file etc).
+ // Between these runs are the logical positions of spelled tokens that
+ // didn't expand to anything.
+ while (NextExpanded < Result.ExpandedTokens.size() - 1 /* eof */) {
+ // Create empty mappings for spelled tokens that expanded to nothing here.
+ // May advance NextSpelled, but NextExpanded is unchanged.
+ discard();
+ // Create mapping for a contiguous run of expanded tokens.
+ // Advances NextExpanded past the run, and NextSpelled accordingly.
+ unsigned OldPosition = NextExpanded;
+ advance();
+ if (NextExpanded == OldPosition)
+ diagnoseAdvanceFailure();
+ }
+ // If any tokens remain in any of the files, they didn't expand to anything.
+ // Create empty mappings up until the end of the file.
+ for (const auto &File : Result.Files)
+ discard(File.first);
+
+#ifndef NDEBUG
+ for (auto &pair : Result.Files) {
+ auto &mappings = pair.second.Mappings;
+ assert(llvm::is_sorted(mappings, [](const TokenBuffer::Mapping &M1,
+ const TokenBuffer::Mapping &M2) {
+ return M1.BeginSpelled < M2.BeginSpelled &&
+ M1.EndSpelled < M2.EndSpelled &&
+ M1.BeginExpanded < M2.BeginExpanded &&
+ M1.EndExpanded < M2.EndExpanded;
+ }));
+ }
+#endif
return std::move(Result);
}
private:
- /// Process the next token in an expanded stream and move corresponding
- /// spelled tokens, record any mapping if needed.
- /// (!) \p I will be updated if this had to skip tokens, e.g. for macros.
- void processExpandedToken(unsigned &I) {
- auto L = Result.ExpandedTokens[I].location();
- if (L.isMacroID()) {
- processMacroExpansion(SM.getExpansionRange(L), I);
- return;
+ // Consume a sequence of spelled tokens that didn't expand to anything.
+ // In the simplest case, skips spelled tokens until finding one that produced
+ // the NextExpanded token, and creates an empty mapping for them.
+ // If Drain is provided, skips remaining tokens from that file instead.
+ void discard(llvm::Optional<FileID> Drain = llvm::None) {
+ SourceLocation Target =
+ Drain ? SM.getLocForEndOfFile(*Drain)
+ : SM.getExpansionLoc(
+ Result.ExpandedTokens[NextExpanded].location());
+ FileID File = SM.getFileID(Target);
+ const auto &SpelledTokens = Result.Files[File].SpelledTokens;
+ auto &NextSpelled = this->NextSpelled[File];
+
+ TokenBuffer::Mapping Mapping;
+ Mapping.BeginSpelled = NextSpelled;
+ // When dropping trailing tokens from a file, the empty mapping should
+ // be positioned within the file's expanded-token range (at the end).
+ Mapping.BeginExpanded = Mapping.EndExpanded =
+ Drain ? Result.Files[*Drain].EndExpanded : NextExpanded;
+ // We may want to split into several adjacent empty mappings.
+ // FlushMapping() emits the current mapping and starts a new one.
+ auto FlushMapping = [&, this] {
+ Mapping.EndSpelled = NextSpelled;
+ if (Mapping.BeginSpelled != Mapping.EndSpelled)
+ Result.Files[File].Mappings.push_back(Mapping);
+ Mapping.BeginSpelled = NextSpelled;
+ };
+
+ while (NextSpelled < SpelledTokens.size() &&
+ SpelledTokens[NextSpelled].location() < Target) {
+ // If we know mapping bounds at [NextSpelled, KnownEnd] (macro expansion)
+ // then we want to partition our (empty) mapping.
+ // [Start, NextSpelled) [NextSpelled, KnownEnd] (KnownEnd, Target)
+ SourceLocation KnownEnd = CollectedExpansions.lookup(
+ SpelledTokens[NextSpelled].location().getRawEncoding());
+ if (KnownEnd.isValid()) {
+ FlushMapping(); // Emits [Start, NextSpelled)
+ while (NextSpelled < SpelledTokens.size() &&
+ SpelledTokens[NextSpelled].location() <= KnownEnd)
+ ++NextSpelled;
+ FlushMapping(); // Emits [NextSpelled, KnownEnd]
+ // Now the loop contitues and will emit (KnownEnd, Target).
+ } else {
+ ++NextSpelled;
+ }
}
- if (L.isFileID()) {
- auto FID = SM.getFileID(L);
- TokenBuffer::MarkedFile &File = Result.Files[FID];
-
- fillGapUntil(File, L, I);
+ FlushMapping();
+ }
- // Skip the token.
- assert(File.SpelledTokens[NextSpelled[FID]].location() == L &&
- "no corresponding token in the spelled stream");
- ++NextSpelled[FID];
- return;
+ // Consumes the NextExpanded token and others that are part of the same run.
+ // Increases NextExpanded and NextSpelled by at least one, and adds a mapping
+ // (unless this is a run of file tokens, which we represent with no mapping).
+ void advance() {
+ const syntax::Token &Tok = Result.ExpandedTokens[NextExpanded];
+ SourceLocation Expansion = SM.getExpansionLoc(Tok.location());
+ FileID File = SM.getFileID(Expansion);
+ const auto &SpelledTokens = Result.Files[File].SpelledTokens;
+ auto &NextSpelled = this->NextSpelled[File];
+
+ if (Tok.location().isFileID()) {
+ // A run of file tokens continues while the expanded/spelled tokens match.
+ while (NextSpelled < SpelledTokens.size() &&
+ NextExpanded < Result.ExpandedTokens.size() &&
+ SpelledTokens[NextSpelled].location() ==
+ Result.ExpandedTokens[NextExpanded].location()) {
+ ++NextSpelled;
+ ++NextExpanded;
+ }
+ // We need no mapping for file tokens copied to the expanded stream.
+ } else {
+ // We found a new macro expansion. We should have its spelling bounds.
+ auto End = CollectedExpansions.lookup(Expansion.getRawEncoding());
+ assert(End.isValid() && "Macro expansion wasn't captured?");
+
+ // Mapping starts here...
+ TokenBuffer::Mapping Mapping;
+ Mapping.BeginExpanded = NextExpanded;
+ Mapping.BeginSpelled = NextSpelled;
+ // ... consumes spelled tokens within bounds we captured ...
+ while (NextSpelled < SpelledTokens.size() &&
+ SpelledTokens[NextSpelled].location() <= End)
+ ++NextSpelled;
+ // ... consumes expanded tokens rooted at the same expansion ...
+ while (NextExpanded < Result.ExpandedTokens.size() &&
+ SM.getExpansionLoc(
+ Result.ExpandedTokens[NextExpanded].location()) == Expansion)
+ ++NextExpanded;
+ // ... and ends here.
+ Mapping.EndExpanded = NextExpanded;
+ Mapping.EndSpelled = NextSpelled;
+ Result.Files[File].Mappings.push_back(Mapping);
}
}
- /// Skipped expanded and spelled tokens of a macro expansion that covers \p
- /// SpelledRange. Add a corresponding mapping.
- /// (!) \p I will be the index of the last token in an expansion after this
- /// function returns.
- void processMacroExpansion(CharSourceRange SpelledRange, unsigned &I) {
- auto FID = SM.getFileID(SpelledRange.getBegin());
- assert(FID == SM.getFileID(SpelledRange.getEnd()));
- TokenBuffer::MarkedFile &File = Result.Files[FID];
-
- fillGapUntil(File, SpelledRange.getBegin(), I);
-
- // Skip all expanded tokens from the same macro expansion.
- unsigned BeginExpanded = I;
- for (; I + 1 < Result.ExpandedTokens.size(); ++I) {
- auto NextL = Result.ExpandedTokens[I + 1].location();
- if (!NextL.isMacroID() ||
- SM.getExpansionLoc(NextL) != SpelledRange.getBegin())
- break;
+ // advance() is supposed to consume at least one token - if not, we crash.
+ void diagnoseAdvanceFailure() {
+#ifndef NDEBUG
+ // Show the failed-to-map token in context.
+ for (unsigned I = (NextExpanded < 10) ? 0 : NextExpanded - 10;
+ I < NextExpanded + 5 && I < Result.ExpandedTokens.size(); ++I) {
+ const char *L =
+ (I == NextExpanded) ? "!! " : (I < NextExpanded) ? "ok " : " ";
+ llvm::errs() << L << Result.ExpandedTokens[I].dumpForTests(SM) << "\n";
}
- unsigned EndExpanded = I + 1;
- consumeMapping(File, SM.getFileOffset(SpelledRange.getEnd()), BeginExpanded,
- EndExpanded, NextSpelled[FID]);
+#endif
+ llvm_unreachable("Couldn't map expanded token to spelled tokens!");
}
/// Initializes TokenBuffer::Files and fills spelled tokens and expanded
/// ranges for each of the files.
void buildSpelledTokens() {
for (unsigned I = 0; I < Result.ExpandedTokens.size(); ++I) {
- auto FID =
- SM.getFileID(SM.getExpansionLoc(Result.ExpandedTokens[I].location()));
+ const auto &Tok = Result.ExpandedTokens[I];
+ auto FID = SM.getFileID(SM.getExpansionLoc(Tok.location()));
auto It = Result.Files.try_emplace(FID);
TokenBuffer::MarkedFile &File = It.first->second;
- File.EndExpanded = I + 1;
+ // The eof token should not be considered part of the main-file's range.
+ File.EndExpanded = Tok.kind() == tok::eof ? I : I + 1;
+
if (!It.second)
continue; // we have seen this file before.
-
// This is the first time we see this file.
File.BeginExpanded = I;
File.SpelledTokens = tokenize(FID, SM, LangOpts);
}
}
- void consumeEmptyMapping(TokenBuffer::MarkedFile &File, unsigned EndOffset,
- unsigned ExpandedIndex, unsigned &SpelledIndex) {
- consumeMapping(File, EndOffset, ExpandedIndex, ExpandedIndex, SpelledIndex);
- }
-
- /// Consumes spelled tokens that form a macro expansion and adds a entry to
- /// the resulting token buffer.
- /// (!) SpelledIndex is updated in-place.
- void consumeMapping(TokenBuffer::MarkedFile &File, unsigned EndOffset,
- unsigned BeginExpanded, unsigned EndExpanded,
- unsigned &SpelledIndex) {
- // We need to record this mapping before continuing.
- unsigned MappingBegin = SpelledIndex;
- ++SpelledIndex;
-
- bool HitMapping =
- tryConsumeSpelledUntil(File, EndOffset + 1, SpelledIndex).hasValue();
- (void)HitMapping;
- assert(!HitMapping && "recursive macro expansion?");
-
- TokenBuffer::Mapping M;
- M.BeginExpanded = BeginExpanded;
- M.EndExpanded = EndExpanded;
- M.BeginSpelled = MappingBegin;
- M.EndSpelled = SpelledIndex;
-
- File.Mappings.push_back(M);
- }
-
- /// Consumes spelled tokens until location \p L is reached and adds a mapping
- /// covering the consumed tokens. The mapping will point to an empty expanded
- /// range at position \p ExpandedIndex.
- void fillGapUntil(TokenBuffer::MarkedFile &File, SourceLocation L,
- unsigned ExpandedIndex) {
- assert(L.isFileID());
- FileID FID;
- unsigned Offset;
- std::tie(FID, Offset) = SM.getDecomposedLoc(L);
-
- unsigned &SpelledIndex = NextSpelled[FID];
- unsigned MappingBegin = SpelledIndex;
- while (true) {
- auto EndLoc = tryConsumeSpelledUntil(File, Offset, SpelledIndex);
- if (SpelledIndex != MappingBegin) {
- TokenBuffer::Mapping M;
- M.BeginSpelled = MappingBegin;
- M.EndSpelled = SpelledIndex;
- M.BeginExpanded = M.EndExpanded = ExpandedIndex;
- File.Mappings.push_back(M);
- }
- if (!EndLoc)
- break;
- consumeEmptyMapping(File, SM.getFileOffset(*EndLoc), ExpandedIndex,
- SpelledIndex);
-
- MappingBegin = SpelledIndex;
- }
- };
-
- /// Consumes spelled tokens until it reaches Offset or a mapping boundary,
- /// i.e. a name of a macro expansion or the start '#' token of a PP directive.
- /// (!) NextSpelled is updated in place.
- ///
- /// returns None if \p Offset was reached, otherwise returns the end location
- /// of a mapping that starts at \p NextSpelled.
- llvm::Optional<SourceLocation>
- tryConsumeSpelledUntil(TokenBuffer::MarkedFile &File, unsigned Offset,
- unsigned &NextSpelled) {
- for (; NextSpelled < File.SpelledTokens.size(); ++NextSpelled) {
- auto L = File.SpelledTokens[NextSpelled].location();
- if (Offset <= SM.getFileOffset(L))
- return llvm::None; // reached the offset we are looking for.
- auto Mapping = CollectedExpansions.find(L.getRawEncoding());
- if (Mapping != CollectedExpansions.end())
- return Mapping->second; // found a mapping before the offset.
- }
- return llvm::None; // no more tokens, we "reached" the offset.
- }
-
- /// Adds empty mappings for unconsumed spelled tokens at the end of each file.
- void fillGapsAtEndOfFiles() {
- for (auto &F : Result.Files) {
- if (F.second.SpelledTokens.empty())
- continue;
- fillGapUntil(F.second, F.second.SpelledTokens.back().endLocation(),
- F.second.EndExpanded);
- }
- }
-
TokenBuffer Result;
- /// For each file, a position of the next spelled token we will consume.
- llvm::DenseMap<FileID, unsigned> NextSpelled;
+ unsigned NextExpanded = 0; // cursor in ExpandedTokens
+ llvm::DenseMap<FileID, unsigned> NextSpelled; // cursor in SpelledTokens
PPExpansions CollectedExpansions;
const SourceManager &SM;
const LangOptions &LangOpts;
@@ -604,19 +802,20 @@ TokenBuffer TokenCollector::consume() && {
}
std::string syntax::Token::str() const {
- return llvm::formatv("Token({0}, length = {1})", tok::getTokenName(kind()),
- length());
+ return std::string(llvm::formatv("Token({0}, length = {1})",
+ tok::getTokenName(kind()), length()));
}
std::string syntax::Token::dumpForTests(const SourceManager &SM) const {
- return llvm::formatv("{0} {1}", tok::getTokenName(kind()), text(SM));
+ return std::string(llvm::formatv("Token(`{0}`, {1}, length = {2})", text(SM),
+ tok::getTokenName(kind()), length()));
}
std::string TokenBuffer::dumpForTests() const {
auto PrintToken = [this](const syntax::Token &T) -> std::string {
if (T.kind() == tok::eof)
return "<eof>";
- return T.text(*SourceMgr);
+ return std::string(T.text(*SourceMgr));
};
auto DumpTokens = [this, &PrintToken](llvm::raw_ostream &OS,
diff --git a/clang/lib/Tooling/Syntax/Tree.cpp b/clang/lib/Tooling/Syntax/Tree.cpp
index 9a6270ec4cce..37579e6145b6 100644
--- a/clang/lib/Tooling/Syntax/Tree.cpp
+++ b/clang/lib/Tooling/Syntax/Tree.cpp
@@ -58,22 +58,33 @@ bool syntax::Leaf::classof(const Node *N) {
syntax::Node::Node(NodeKind Kind)
: Parent(nullptr), NextSibling(nullptr), Kind(static_cast<unsigned>(Kind)),
- Role(static_cast<unsigned>(NodeRole::Detached)), Original(false),
- CanModify(false) {}
+ Role(0), Original(false), CanModify(false) {
+ this->setRole(NodeRole::Detached);
+}
bool syntax::Node::isDetached() const { return role() == NodeRole::Detached; }
+void syntax::Node::setRole(NodeRole NR) {
+ this->Role = static_cast<unsigned>(NR);
+}
+
bool syntax::Tree::classof(const Node *N) { return N->kind() > NodeKind::Leaf; }
void syntax::Tree::prependChildLowLevel(Node *Child, NodeRole Role) {
- assert(Child->Parent == nullptr);
- assert(Child->NextSibling == nullptr);
assert(Child->role() == NodeRole::Detached);
assert(Role != NodeRole::Detached);
+ Child->setRole(Role);
+ prependChildLowLevel(Child);
+}
+
+void syntax::Tree::prependChildLowLevel(Node *Child) {
+ assert(Child->Parent == nullptr);
+ assert(Child->NextSibling == nullptr);
+ assert(Child->role() != NodeRole::Detached);
+
Child->Parent = this;
Child->NextSibling = this->FirstChild;
- Child->Role = static_cast<unsigned>(Role);
this->FirstChild = Child;
}
@@ -94,7 +105,7 @@ void syntax::Tree::replaceChildRangeLowLevel(Node *BeforeBegin, Node *End,
N != End;) {
auto *Next = N->NextSibling;
- N->Role = static_cast<unsigned>(NodeRole::Detached);
+ N->setRole(NodeRole::Detached);
N->Parent = nullptr;
N->NextSibling = nullptr;
if (N->Original)
diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp
index 4a0618c50e42..40b6cff0d627 100644
--- a/clang/lib/Tooling/Tooling.cpp
+++ b/clang/lib/Tooling/Tooling.cpp
@@ -141,11 +141,13 @@ namespace clang {
namespace tooling {
/// Returns a clang build invocation initialized from the CC1 flags.
-CompilerInvocation *newInvocation(
- DiagnosticsEngine *Diagnostics, const llvm::opt::ArgStringList &CC1Args) {
+CompilerInvocation *newInvocation(DiagnosticsEngine *Diagnostics,
+ const llvm::opt::ArgStringList &CC1Args,
+ const char *const BinaryName) {
assert(!CC1Args.empty() && "Must at least contain the program name!");
CompilerInvocation *Invocation = new CompilerInvocation;
- CompilerInvocation::CreateFromArgs(*Invocation, CC1Args, *Diagnostics);
+ CompilerInvocation::CreateFromArgs(*Invocation, CC1Args, *Diagnostics,
+ BinaryName);
Invocation->getFrontendOpts().DisableFree = false;
Invocation->getCodeGenOpts().DisableFree = false;
return Invocation;
@@ -234,7 +236,7 @@ llvm::Expected<std::string> getAbsolutePath(llvm::vfs::FileSystem &FS,
if (auto EC = FS.makeAbsolute(AbsolutePath))
return llvm::errorCodeToError(EC);
llvm::sys::path::native(AbsolutePath);
- return AbsolutePath.str();
+ return std::string(AbsolutePath.str());
}
std::string getAbsolutePath(StringRef File) {
@@ -345,7 +347,7 @@ bool ToolInvocation::run() {
if (!CC1Args)
return false;
std::unique_ptr<CompilerInvocation> Invocation(
- newInvocation(&Diagnostics, *CC1Args));
+ newInvocation(&Diagnostics, *CC1Args, BinaryName));
// FIXME: remove this when all users have migrated!
for (const auto &It : MappedFileContents) {
// Inject the code as the given file name into the preprocessor options.
@@ -619,7 +621,8 @@ buildASTFromCode(StringRef Code, StringRef FileName,
std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
StringRef Code, const std::vector<std::string> &Args, StringRef FileName,
StringRef ToolName, std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- ArgumentsAdjuster Adjuster, const FileContentMappings &VirtualMappedFiles) {
+ ArgumentsAdjuster Adjuster, const FileContentMappings &VirtualMappedFiles,
+ DiagnosticConsumer *DiagConsumer) {
std::vector<std::unique_ptr<ASTUnit>> ASTs;
ASTBuilderAction Action(ASTs);
llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
@@ -633,6 +636,7 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
ToolInvocation Invocation(
getSyntaxOnlyToolArgs(ToolName, Adjuster(Args, FileName), FileName),
&Action, Files.get(), std::move(PCHContainerOps));
+ Invocation.setDiagnosticConsumer(DiagConsumer);
InMemoryFileSystem->addFile(FileName, 0,
llvm::MemoryBuffer::getMemBufferCopy(Code));
diff --git a/clang/lib/Tooling/Transformer/Parsing.cpp b/clang/lib/Tooling/Transformer/Parsing.cpp
new file mode 100644
index 000000000000..1579115b9313
--- /dev/null
+++ b/clang/lib/Tooling/Transformer/Parsing.cpp
@@ -0,0 +1,279 @@
+//===--- Parsing.cpp - Parsing function implementations ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Transformer/Parsing.h"
+#include "clang/AST/Expr.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Tooling/Transformer/RangeSelector.h"
+#include "clang/Tooling/Transformer/SourceCode.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace clang;
+using namespace transformer;
+
+// FIXME: This implementation is entirely separate from that of the AST
+// matchers. Given the similarity of the languages and uses of the two parsers,
+// the two should share a common parsing infrastructure, as should other
+// Transformer types. We intend to unify this implementation soon to share as
+// much as possible with the AST Matchers parsing.
+
+namespace {
+using llvm::Error;
+using llvm::Expected;
+
+template <typename... Ts> using RangeSelectorOp = RangeSelector (*)(Ts...);
+
+struct ParseState {
+ // The remaining input to be processed.
+ StringRef Input;
+ // The original input. Not modified during parsing; only for reference in
+ // error reporting.
+ StringRef OriginalInput;
+};
+
+// Represents an intermediate result returned by a parsing function. Functions
+// that don't generate values should use `llvm::None`
+template <typename ResultType> struct ParseProgress {
+ ParseState State;
+ // Intermediate result generated by the Parser.
+ ResultType Value;
+};
+
+template <typename T> using ExpectedProgress = llvm::Expected<ParseProgress<T>>;
+template <typename T> using ParseFunction = ExpectedProgress<T> (*)(ParseState);
+
+class ParseError : public llvm::ErrorInfo<ParseError> {
+public:
+ // Required field for all ErrorInfo derivatives.
+ static char ID;
+
+ ParseError(size_t Pos, std::string ErrorMsg, std::string InputExcerpt)
+ : Pos(Pos), ErrorMsg(std::move(ErrorMsg)),
+ Excerpt(std::move(InputExcerpt)) {}
+
+ void log(llvm::raw_ostream &OS) const override {
+ OS << "parse error at position (" << Pos << "): " << ErrorMsg
+ << ": " + Excerpt;
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+
+ // Position of the error in the input string.
+ size_t Pos;
+ std::string ErrorMsg;
+ // Excerpt of the input starting at the error position.
+ std::string Excerpt;
+};
+
+char ParseError::ID;
+} // namespace
+
+static const llvm::StringMap<RangeSelectorOp<std::string>> &
+getUnaryStringSelectors() {
+ static const llvm::StringMap<RangeSelectorOp<std::string>> M = {
+ {"name", name},
+ {"node", node},
+ {"statement", statement},
+ {"statements", statements},
+ {"member", member},
+ {"callArgs", callArgs},
+ {"elseBranch", elseBranch},
+ {"initListElements", initListElements}};
+ return M;
+}
+
+static const llvm::StringMap<RangeSelectorOp<RangeSelector>> &
+getUnaryRangeSelectors() {
+ static const llvm::StringMap<RangeSelectorOp<RangeSelector>> M = {
+ {"before", before}, {"after", after}, {"expansion", expansion}};
+ return M;
+}
+
+static const llvm::StringMap<RangeSelectorOp<std::string, std::string>> &
+getBinaryStringSelectors() {
+ static const llvm::StringMap<RangeSelectorOp<std::string, std::string>> M = {
+ {"encloseNodes", range}};
+ return M;
+}
+
+static const llvm::StringMap<RangeSelectorOp<RangeSelector, RangeSelector>> &
+getBinaryRangeSelectors() {
+ static const llvm::StringMap<RangeSelectorOp<RangeSelector, RangeSelector>>
+ M = {{"enclose", range}};
+ return M;
+}
+
+template <typename Element>
+llvm::Optional<Element> findOptional(const llvm::StringMap<Element> &Map,
+ llvm::StringRef Key) {
+ auto it = Map.find(Key);
+ if (it == Map.end())
+ return llvm::None;
+ return it->second;
+}
+
+template <typename ResultType>
+ParseProgress<ResultType> makeParseProgress(ParseState State,
+ ResultType Result) {
+ return ParseProgress<ResultType>{State, std::move(Result)};
+}
+
+static llvm::Error makeParseError(const ParseState &S, std::string ErrorMsg) {
+ size_t Pos = S.OriginalInput.size() - S.Input.size();
+ return llvm::make_error<ParseError>(Pos, std::move(ErrorMsg),
+ S.OriginalInput.substr(Pos, 20).str());
+}
+
+// Returns a new ParseState that advances \c S by \c N characters.
+static ParseState advance(ParseState S, size_t N) {
+ S.Input = S.Input.drop_front(N);
+ return S;
+}
+
+static StringRef consumeWhitespace(StringRef S) {
+ return S.drop_while([](char c) { return c >= 0 && isWhitespace(c); });
+}
+
+// Parses a single expected character \c c from \c State, skipping preceding
+// whitespace. Error if the expected character isn't found.
+static ExpectedProgress<llvm::NoneType> parseChar(char c, ParseState State) {
+ State.Input = consumeWhitespace(State.Input);
+ if (State.Input.empty() || State.Input.front() != c)
+ return makeParseError(State,
+ ("expected char not found: " + llvm::Twine(c)).str());
+ return makeParseProgress(advance(State, 1), llvm::None);
+}
+
+// Parses an identitifer "token" -- handles preceding whitespace.
+static ExpectedProgress<std::string> parseId(ParseState State) {
+ State.Input = consumeWhitespace(State.Input);
+ auto Id = State.Input.take_while(
+ [](char c) { return c >= 0 && isIdentifierBody(c); });
+ if (Id.empty())
+ return makeParseError(State, "failed to parse name");
+ return makeParseProgress(advance(State, Id.size()), Id.str());
+}
+
+// For consistency with the AST matcher parser and C++ code, node ids are
+// written as strings. However, we do not support escaping in the string.
+static ExpectedProgress<std::string> parseStringId(ParseState State) {
+ State.Input = consumeWhitespace(State.Input);
+ if (State.Input.empty())
+ return makeParseError(State, "unexpected end of input");
+ if (!State.Input.consume_front("\""))
+ return makeParseError(
+ State,
+ "expecting string, but encountered other character or end of input");
+
+ StringRef Id = State.Input.take_until([](char c) { return c == '"'; });
+ if (State.Input.size() == Id.size())
+ return makeParseError(State, "unterminated string");
+ // Advance past the trailing quote as well.
+ return makeParseProgress(advance(State, Id.size() + 1), Id.str());
+}
+
+// Parses a single element surrounded by parens. `Op` is applied to the parsed
+// result to create the result of this function call.
+template <typename T>
+ExpectedProgress<RangeSelector> parseSingle(ParseFunction<T> ParseElement,
+ RangeSelectorOp<T> Op,
+ ParseState State) {
+ auto P = parseChar('(', State);
+ if (!P)
+ return P.takeError();
+
+ auto E = ParseElement(P->State);
+ if (!E)
+ return E.takeError();
+
+ P = parseChar(')', E->State);
+ if (!P)
+ return P.takeError();
+
+ return makeParseProgress(P->State, Op(std::move(E->Value)));
+}
+
+// Parses a pair of elements surrounded by parens and separated by comma. `Op`
+// is applied to the parsed results to create the result of this function call.
+template <typename T>
+ExpectedProgress<RangeSelector> parsePair(ParseFunction<T> ParseElement,
+ RangeSelectorOp<T, T> Op,
+ ParseState State) {
+ auto P = parseChar('(', State);
+ if (!P)
+ return P.takeError();
+
+ auto Left = ParseElement(P->State);
+ if (!Left)
+ return Left.takeError();
+
+ P = parseChar(',', Left->State);
+ if (!P)
+ return P.takeError();
+
+ auto Right = ParseElement(P->State);
+ if (!Right)
+ return Right.takeError();
+
+ P = parseChar(')', Right->State);
+ if (!P)
+ return P.takeError();
+
+ return makeParseProgress(P->State,
+ Op(std::move(Left->Value), std::move(Right->Value)));
+}
+
+// Parses input for a stencil operator(single arg ops like AsValue, MemberOp or
+// Id operator). Returns StencilType representing the operator on success and
+// error if it fails to parse input for an operator.
+static ExpectedProgress<RangeSelector>
+parseRangeSelectorImpl(ParseState State) {
+ auto Id = parseId(State);
+ if (!Id)
+ return Id.takeError();
+
+ std::string OpName = std::move(Id->Value);
+ if (auto Op = findOptional(getUnaryStringSelectors(), OpName))
+ return parseSingle(parseStringId, *Op, Id->State);
+
+ if (auto Op = findOptional(getUnaryRangeSelectors(), OpName))
+ return parseSingle(parseRangeSelectorImpl, *Op, Id->State);
+
+ if (auto Op = findOptional(getBinaryStringSelectors(), OpName))
+ return parsePair(parseStringId, *Op, Id->State);
+
+ if (auto Op = findOptional(getBinaryRangeSelectors(), OpName))
+ return parsePair(parseRangeSelectorImpl, *Op, Id->State);
+
+ return makeParseError(State, "unknown selector name: " + OpName);
+}
+
+Expected<RangeSelector> transformer::parseRangeSelector(llvm::StringRef Input) {
+ ParseState State = {Input, Input};
+ ExpectedProgress<RangeSelector> Result = parseRangeSelectorImpl(State);
+ if (!Result)
+ return Result.takeError();
+ State = Result->State;
+ // Discard any potentially trailing whitespace.
+ State.Input = consumeWhitespace(State.Input);
+ if (State.Input.empty())
+ return Result->Value;
+ return makeParseError(State, "unexpected input after selector");
+}
diff --git a/clang/lib/Tooling/Transformer/RangeSelector.cpp b/clang/lib/Tooling/Transformer/RangeSelector.cpp
index 9f81423c9022..29b1a5b0372e 100644
--- a/clang/lib/Tooling/Transformer/RangeSelector.cpp
+++ b/clang/lib/Tooling/Transformer/RangeSelector.cpp
@@ -23,8 +23,6 @@ using namespace clang;
using namespace transformer;
using ast_matchers::MatchFinder;
-using ast_type_traits::ASTNodeKind;
-using ast_type_traits::DynTypedNode;
using llvm::Error;
using llvm::StringError;
@@ -148,7 +146,7 @@ RangeSelector transformer::statement(std::string ID) {
};
}
-RangeSelector transformer::range(RangeSelector Begin, RangeSelector End) {
+RangeSelector transformer::enclose(RangeSelector Begin, RangeSelector End) {
return [Begin, End](const MatchResult &Result) -> Expected<CharSourceRange> {
Expected<CharSourceRange> BeginRange = Begin(Result);
if (!BeginRange)
@@ -167,8 +165,9 @@ RangeSelector transformer::range(RangeSelector Begin, RangeSelector End) {
};
}
-RangeSelector transformer::range(std::string BeginID, std::string EndID) {
- return transformer::range(node(std::move(BeginID)), node(std::move(EndID)));
+RangeSelector transformer::encloseNodes(std::string BeginID,
+ std::string EndID) {
+ return transformer::enclose(node(std::move(BeginID)), node(std::move(EndID)));
}
RangeSelector transformer::member(std::string ID) {
diff --git a/clang/lib/Tooling/Transformer/RewriteRule.cpp b/clang/lib/Tooling/Transformer/RewriteRule.cpp
index 20d3a371950a..995bec03cd66 100644
--- a/clang/lib/Tooling/Transformer/RewriteRule.cpp
+++ b/clang/lib/Tooling/Transformer/RewriteRule.cpp
@@ -25,16 +25,14 @@ using namespace transformer;
using ast_matchers::MatchFinder;
using ast_matchers::internal::DynTypedMatcher;
-using ast_type_traits::ASTNodeKind;
using MatchResult = MatchFinder::MatchResult;
-Expected<SmallVector<transformer::detail::Transformation, 1>>
-transformer::detail::translateEdits(const MatchResult &Result,
- llvm::ArrayRef<ASTEdit> Edits) {
- SmallVector<transformer::detail::Transformation, 1> Transformations;
- for (const auto &Edit : Edits) {
- Expected<CharSourceRange> Range = Edit.TargetRange(Result);
+static Expected<SmallVector<transformer::Edit, 1>>
+translateEdits(const MatchResult &Result, ArrayRef<ASTEdit> ASTEdits) {
+ SmallVector<transformer::Edit, 1> Edits;
+ for (const auto &E : ASTEdits) {
+ Expected<CharSourceRange> Range = E.TargetRange(Result);
if (!Range)
return Range.takeError();
llvm::Optional<CharSourceRange> EditRange =
@@ -42,21 +40,34 @@ transformer::detail::translateEdits(const MatchResult &Result,
// FIXME: let user specify whether to treat this case as an error or ignore
// it as is currently done.
if (!EditRange)
- return SmallVector<Transformation, 0>();
- auto Replacement = Edit.Replacement->eval(Result);
+ return SmallVector<Edit, 0>();
+ auto Replacement = E.Replacement->eval(Result);
if (!Replacement)
return Replacement.takeError();
- transformer::detail::Transformation T;
+ transformer::Edit T;
T.Range = *EditRange;
T.Replacement = std::move(*Replacement);
- Transformations.push_back(std::move(T));
+ T.Metadata = E.Metadata;
+ Edits.push_back(std::move(T));
}
- return Transformations;
+ return Edits;
}
-ASTEdit transformer::changeTo(RangeSelector S, TextGenerator Replacement) {
+EditGenerator transformer::editList(SmallVector<ASTEdit, 1> Edits) {
+ return [Edits = std::move(Edits)](const MatchResult &Result) {
+ return translateEdits(Result, Edits);
+ };
+}
+
+EditGenerator transformer::edit(ASTEdit Edit) {
+ return [Edit = std::move(Edit)](const MatchResult &Result) {
+ return translateEdits(Result, {Edit});
+ };
+}
+
+ASTEdit transformer::changeTo(RangeSelector Target, TextGenerator Replacement) {
ASTEdit E;
- E.TargetRange = std::move(S);
+ E.TargetRange = std::move(Target);
E.Replacement = std::move(Replacement);
return E;
}
@@ -83,8 +94,9 @@ ASTEdit transformer::remove(RangeSelector S) {
return change(std::move(S), std::make_shared<SimpleTextGenerator>(""));
}
-RewriteRule transformer::makeRule(DynTypedMatcher M, SmallVector<ASTEdit, 1> Edits,
- TextGenerator Explanation) {
+RewriteRule transformer::makeRule(ast_matchers::internal::DynTypedMatcher M,
+ EditGenerator Edits,
+ TextGenerator Explanation) {
return RewriteRule{{RewriteRule::Case{
std::move(M), std::move(Edits), std::move(Explanation), {}}}};
}
@@ -105,10 +117,13 @@ static bool hasValidKind(const DynTypedMatcher &M) {
#endif
// Binds each rule's matcher to a unique (and deterministic) tag based on
-// `TagBase` and the id paired with the case.
+// `TagBase` and the id paired with the case. All of the returned matchers have
+// their traversal kind explicitly set, either based on a pre-set kind or to the
+// provided `DefaultTraversalKind`.
static std::vector<DynTypedMatcher> taggedMatchers(
StringRef TagBase,
- const SmallVectorImpl<std::pair<size_t, RewriteRule::Case>> &Cases) {
+ const SmallVectorImpl<std::pair<size_t, RewriteRule::Case>> &Cases,
+ ast_type_traits::TraversalKind DefaultTraversalKind) {
std::vector<DynTypedMatcher> Matchers;
Matchers.reserve(Cases.size());
for (const auto &Case : Cases) {
@@ -116,8 +131,10 @@ static std::vector<DynTypedMatcher> taggedMatchers(
// HACK: Many matchers are not bindable, so ensure that tryBind will work.
DynTypedMatcher BoundMatcher(Case.second.Matcher);
BoundMatcher.setAllowBind(true);
- auto M = BoundMatcher.tryBind(Tag);
- Matchers.push_back(*std::move(M));
+ auto M = *BoundMatcher.tryBind(Tag);
+ Matchers.push_back(!M.getTraversalKind()
+ ? M.withTraversalKind(DefaultTraversalKind)
+ : std::move(M));
}
return Matchers;
}
@@ -147,14 +164,21 @@ transformer::detail::buildMatchers(const RewriteRule &Rule) {
Buckets[Cases[I].Matcher.getSupportedKind()].emplace_back(I, Cases[I]);
}
+ // Each anyOf explicitly controls the traversal kind. The anyOf itself is set
+ // to `TK_AsIs` to ensure no nodes are skipped, thereby deferring to the kind
+ // of the branches. Then, each branch is either left as is, if the kind is
+ // already set, or explicitly set to `TK_IgnoreUnlessSpelledInSource`. We
+ // choose this setting, because we think it is the one most friendly to
+ // beginners, who are (largely) the target audience of Transformer.
std::vector<DynTypedMatcher> Matchers;
for (const auto &Bucket : Buckets) {
DynTypedMatcher M = DynTypedMatcher::constructVariadic(
DynTypedMatcher::VO_AnyOf, Bucket.first,
- taggedMatchers("Tag", Bucket.second));
+ taggedMatchers("Tag", Bucket.second, TK_IgnoreUnlessSpelledInSource));
M.setAllowBind(true);
// `tryBind` is guaranteed to succeed, because `AllowBind` was set to true.
- Matchers.push_back(*M.tryBind(RewriteRule::RootID));
+ Matchers.push_back(
+ M.tryBind(RewriteRule::RootID)->withTraversalKind(TK_AsIs));
}
return Matchers;
}
diff --git a/clang/lib/Tooling/Transformer/SourceCode.cpp b/clang/lib/Tooling/Transformer/SourceCode.cpp
index 836401d1e605..26b204851f05 100644
--- a/clang/lib/Tooling/Transformer/SourceCode.cpp
+++ b/clang/lib/Tooling/Transformer/SourceCode.cpp
@@ -10,10 +10,24 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Transformer/SourceCode.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Comment.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include <set>
using namespace clang;
+using llvm::errc;
+using llvm::StringError;
+
StringRef clang::tooling::getText(CharSourceRange Range,
const ASTContext &Context) {
return Lexer::getSourceText(Range, Context.getSourceManager(),
@@ -23,11 +37,45 @@ StringRef clang::tooling::getText(CharSourceRange Range,
CharSourceRange clang::tooling::maybeExtendRange(CharSourceRange Range,
tok::TokenKind Next,
ASTContext &Context) {
- Optional<Token> Tok = Lexer::findNextToken(
- Range.getEnd(), Context.getSourceManager(), Context.getLangOpts());
- if (!Tok || !Tok->is(Next))
+ CharSourceRange R = Lexer::getAsCharRange(Range, Context.getSourceManager(),
+ Context.getLangOpts());
+ if (R.isInvalid())
+ return Range;
+ Token Tok;
+ bool Err =
+ Lexer::getRawToken(R.getEnd(), Tok, Context.getSourceManager(),
+ Context.getLangOpts(), /*IgnoreWhiteSpace=*/true);
+ if (Err || !Tok.is(Next))
return Range;
- return CharSourceRange::getTokenRange(Range.getBegin(), Tok->getLocation());
+ return CharSourceRange::getTokenRange(Range.getBegin(), Tok.getLocation());
+}
+
+llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range,
+ const SourceManager &SM) {
+ if (Range.isInvalid())
+ return llvm::make_error<StringError>(errc::invalid_argument,
+ "Invalid range");
+
+ if (Range.getBegin().isMacroID() || Range.getEnd().isMacroID())
+ return llvm::make_error<StringError>(
+ errc::invalid_argument, "Range starts or ends in a macro expansion");
+
+ if (SM.isInSystemHeader(Range.getBegin()) ||
+ SM.isInSystemHeader(Range.getEnd()))
+ return llvm::make_error<StringError>(errc::invalid_argument,
+ "Range is in system header");
+
+ std::pair<FileID, unsigned> BeginInfo = SM.getDecomposedLoc(Range.getBegin());
+ std::pair<FileID, unsigned> EndInfo = SM.getDecomposedLoc(Range.getEnd());
+ if (BeginInfo.first != EndInfo.first)
+ return llvm::make_error<StringError>(
+ errc::invalid_argument, "Range begins and ends in different files");
+
+ if (BeginInfo.second > EndInfo.second)
+ return llvm::make_error<StringError>(
+ errc::invalid_argument, "Range's begin is past its end");
+
+ return llvm::Error::success();
}
llvm::Optional<CharSourceRange>
@@ -46,20 +94,308 @@ clang::tooling::getRangeForEdit(const CharSourceRange &EditRange,
// foo(DO_NOTHING(6))
// Decide whether the current behavior is desirable and modify if not.
CharSourceRange Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts);
- if (Range.isInvalid())
- return None;
+ bool IsInvalid = llvm::errorToBool(validateEditRange(Range, SM));
+ if (IsInvalid)
+ return llvm::None;
+ return Range;
- if (Range.getBegin().isMacroID() || Range.getEnd().isMacroID())
- return None;
- if (SM.isInSystemHeader(Range.getBegin()) ||
- SM.isInSystemHeader(Range.getEnd()))
- return None;
+}
- std::pair<FileID, unsigned> BeginInfo = SM.getDecomposedLoc(Range.getBegin());
- std::pair<FileID, unsigned> EndInfo = SM.getDecomposedLoc(Range.getEnd());
- if (BeginInfo.first != EndInfo.first ||
- BeginInfo.second > EndInfo.second)
- return None;
+static bool startsWithNewline(const SourceManager &SM, const Token &Tok) {
+ return isVerticalWhitespace(SM.getCharacterData(Tok.getLocation())[0]);
+}
- return Range;
+static bool contains(const std::set<tok::TokenKind> &Terminators,
+ const Token &Tok) {
+ return Terminators.count(Tok.getKind()) > 0;
+}
+
+// Returns the exclusive, *file* end location of the entity whose last token is
+// at location 'EntityLast'. That is, it returns the location one past the last
+// relevant character.
+//
+// Associated tokens include comments, horizontal whitespace and 'Terminators'
+// -- optional tokens, which, if any are found, will be included; if
+// 'Terminators' is empty, we will not include any extra tokens beyond comments
+// and horizontal whitespace.
+static SourceLocation
+getEntityEndLoc(const SourceManager &SM, SourceLocation EntityLast,
+ const std::set<tok::TokenKind> &Terminators,
+ const LangOptions &LangOpts) {
+ assert(EntityLast.isValid() && "Invalid end location found.");
+
+ // We remember the last location of a non-horizontal-whitespace token we have
+ // lexed; this is the location up to which we will want to delete.
+ // FIXME: Support using the spelling loc here for cases where we want to
+ // analyze the macro text.
+
+ CharSourceRange ExpansionRange = SM.getExpansionRange(EntityLast);
+ // FIXME: Should check isTokenRange(), for the (rare) case that
+ // `ExpansionRange` is a character range.
+ std::unique_ptr<Lexer> Lexer = [&]() {
+ bool Invalid = false;
+ auto FileOffset = SM.getDecomposedLoc(ExpansionRange.getEnd());
+ llvm::StringRef File = SM.getBufferData(FileOffset.first, &Invalid);
+ assert(!Invalid && "Cannot get file/offset");
+ return std::make_unique<clang::Lexer>(
+ SM.getLocForStartOfFile(FileOffset.first), LangOpts, File.begin(),
+ File.data() + FileOffset.second, File.end());
+ }();
+
+ // Tell Lexer to return whitespace as pseudo-tokens (kind is tok::unknown).
+ Lexer->SetKeepWhitespaceMode(true);
+
+ // Generally, the code we want to include looks like this ([] are optional),
+ // If Terminators is empty:
+ // [ <comment> ] [ <newline> ]
+ // Otherwise:
+ // ... <terminator> [ <comment> ] [ <newline> ]
+
+ Token Tok;
+ bool Terminated = false;
+
+ // First, lex to the current token (which is the last token of the range that
+ // is definitely associated with the decl). Then, we process the first token
+ // separately from the rest based on conditions that hold specifically for
+ // that first token.
+ //
+ // We do not search for a terminator if none is required or we've already
+ // encountered it. Otherwise, if the original `EntityLast` location was in a
+ // macro expansion, we don't have visibility into the text, so we assume we've
+ // already terminated. However, we note this assumption with
+ // `TerminatedByMacro`, because we'll want to handle it somewhat differently
+ // for the terminators semicolon and comma. These terminators can be safely
+ // associated with the entity when they appear after the macro -- extra
+ // semicolons have no effect on the program and a well-formed program won't
+ // have multiple commas in a row, so we're guaranteed that there is only one.
+ //
+ // FIXME: This handling of macros is more conservative than necessary. When
+ // the end of the expansion coincides with the end of the node, we can still
+ // safely analyze the code. But, it is more complicated, because we need to
+ // start by lexing the spelling loc for the first token and then switch to the
+ // expansion loc.
+ bool TerminatedByMacro = false;
+ Lexer->LexFromRawLexer(Tok);
+ if (Terminators.empty() || contains(Terminators, Tok))
+ Terminated = true;
+ else if (EntityLast.isMacroID()) {
+ Terminated = true;
+ TerminatedByMacro = true;
+ }
+
+ // We save the most recent candidate for the exclusive end location.
+ SourceLocation End = Tok.getEndLoc();
+
+ while (!Terminated) {
+ // Lex the next token we want to possibly expand the range with.
+ Lexer->LexFromRawLexer(Tok);
+
+ switch (Tok.getKind()) {
+ case tok::eof:
+ // Unexpected separators.
+ case tok::l_brace:
+ case tok::r_brace:
+ case tok::comma:
+ return End;
+ // Whitespace pseudo-tokens.
+ case tok::unknown:
+ if (startsWithNewline(SM, Tok))
+ // Include at least until the end of the line.
+ End = Tok.getEndLoc();
+ break;
+ default:
+ if (contains(Terminators, Tok))
+ Terminated = true;
+ End = Tok.getEndLoc();
+ break;
+ }
+ }
+
+ do {
+ // Lex the next token we want to possibly expand the range with.
+ Lexer->LexFromRawLexer(Tok);
+
+ switch (Tok.getKind()) {
+ case tok::unknown:
+ if (startsWithNewline(SM, Tok))
+ // We're done, but include this newline.
+ return Tok.getEndLoc();
+ break;
+ case tok::comment:
+ // Include any comments we find on the way.
+ End = Tok.getEndLoc();
+ break;
+ case tok::semi:
+ case tok::comma:
+ if (TerminatedByMacro && contains(Terminators, Tok)) {
+ End = Tok.getEndLoc();
+ // We've found a real terminator.
+ TerminatedByMacro = false;
+ break;
+ }
+ // Found an unrelated token; stop and don't include it.
+ return End;
+ default:
+ // Found an unrelated token; stop and don't include it.
+ return End;
+ }
+ } while (true);
+}
+
+// Returns the expected terminator tokens for the given declaration.
+//
+// If we do not know the correct terminator token, returns an empty set.
+//
+// There are cases where we have more than one possible terminator (for example,
+// we find either a comma or a semicolon after a VarDecl).
+static std::set<tok::TokenKind> getTerminators(const Decl &D) {
+ if (llvm::isa<RecordDecl>(D) || llvm::isa<UsingDecl>(D))
+ return {tok::semi};
+
+ if (llvm::isa<FunctionDecl>(D) || llvm::isa<LinkageSpecDecl>(D))
+ return {tok::r_brace, tok::semi};
+
+ if (llvm::isa<VarDecl>(D) || llvm::isa<FieldDecl>(D))
+ return {tok::comma, tok::semi};
+
+ return {};
+}
+
+// Starting from `Loc`, skips whitespace up to, and including, a single
+// newline. Returns the (exclusive) end of any skipped whitespace (that is, the
+// location immediately after the whitespace).
+static SourceLocation skipWhitespaceAndNewline(const SourceManager &SM,
+ SourceLocation Loc,
+ const LangOptions &LangOpts) {
+ const char *LocChars = SM.getCharacterData(Loc);
+ int i = 0;
+ while (isHorizontalWhitespace(LocChars[i]))
+ ++i;
+ if (isVerticalWhitespace(LocChars[i]))
+ ++i;
+ return Loc.getLocWithOffset(i);
+}
+
+// Is `Loc` separated from any following decl by something meaningful (e.g. an
+// empty line, a comment), ignoring horizontal whitespace? Since this is a
+// heuristic, we return false when in doubt. `Loc` cannot be the first location
+// in the file.
+static bool atOrBeforeSeparation(const SourceManager &SM, SourceLocation Loc,
+ const LangOptions &LangOpts) {
+ // If the preceding character is a newline, we'll check for an empty line as a
+ // separator. However, we can't identify an empty line using tokens, so we
+ // analyse the characters. If we try to use tokens, we'll just end up with a
+ // whitespace token, whose characters we'd have to analyse anyhow.
+ bool Invalid = false;
+ const char *LocChars =
+ SM.getCharacterData(Loc.getLocWithOffset(-1), &Invalid);
+ assert(!Invalid &&
+ "Loc must be a valid character and not the first of the source file.");
+ if (isVerticalWhitespace(LocChars[0])) {
+ for (int i = 1; isWhitespace(LocChars[i]); ++i)
+ if (isVerticalWhitespace(LocChars[i]))
+ return true;
+ }
+ // We didn't find an empty line, so lex the next token, skipping past any
+ // whitespace we just scanned.
+ Token Tok;
+ bool Failed = Lexer::getRawToken(Loc, Tok, SM, LangOpts,
+ /*IgnoreWhiteSpace=*/true);
+ if (Failed)
+ // Any text that confuses the lexer seems fair to consider a separation.
+ return true;
+
+ switch (Tok.getKind()) {
+ case tok::comment:
+ case tok::l_brace:
+ case tok::r_brace:
+ case tok::eof:
+ return true;
+ default:
+ return false;
+ }
+}
+
+CharSourceRange tooling::getAssociatedRange(const Decl &Decl,
+ ASTContext &Context) {
+ const SourceManager &SM = Context.getSourceManager();
+ const LangOptions &LangOpts = Context.getLangOpts();
+ CharSourceRange Range = CharSourceRange::getTokenRange(Decl.getSourceRange());
+
+ // First, expand to the start of the template<> declaration if necessary.
+ if (const auto *Record = llvm::dyn_cast<CXXRecordDecl>(&Decl)) {
+ if (const auto *T = Record->getDescribedClassTemplate())
+ if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin()))
+ Range.setBegin(T->getBeginLoc());
+ } else if (const auto *F = llvm::dyn_cast<FunctionDecl>(&Decl)) {
+ if (const auto *T = F->getDescribedFunctionTemplate())
+ if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin()))
+ Range.setBegin(T->getBeginLoc());
+ }
+
+ // Next, expand the end location past trailing comments to include a potential
+ // newline at the end of the decl's line.
+ Range.setEnd(
+ getEntityEndLoc(SM, Decl.getEndLoc(), getTerminators(Decl), LangOpts));
+ Range.setTokenRange(false);
+
+ // Expand to include preceeding associated comments. We ignore any comments
+ // that are not preceeding the decl, since we've already skipped trailing
+ // comments with getEntityEndLoc.
+ if (const RawComment *Comment =
+ Decl.getASTContext().getRawCommentForDeclNoCache(&Decl))
+ // Only include a preceding comment if:
+ // * it is *not* separate from the declaration (not including any newline
+ // that immediately follows the comment),
+ // * the decl *is* separate from any following entity (so, there are no
+ // other entities the comment could refer to), and
+ // * it is not a IfThisThenThat lint check.
+ if (SM.isBeforeInTranslationUnit(Comment->getBeginLoc(),
+ Range.getBegin()) &&
+ !atOrBeforeSeparation(
+ SM, skipWhitespaceAndNewline(SM, Comment->getEndLoc(), LangOpts),
+ LangOpts) &&
+ atOrBeforeSeparation(SM, Range.getEnd(), LangOpts)) {
+ const StringRef CommentText = Comment->getRawText(SM);
+ if (!CommentText.contains("LINT.IfChange") &&
+ !CommentText.contains("LINT.ThenChange"))
+ Range.setBegin(Comment->getBeginLoc());
+ }
+ // Add leading attributes.
+ for (auto *Attr : Decl.attrs()) {
+ if (Attr->getLocation().isInvalid() ||
+ !SM.isBeforeInTranslationUnit(Attr->getLocation(), Range.getBegin()))
+ continue;
+ Range.setBegin(Attr->getLocation());
+
+ // Extend to the left '[[' or '__attribute((' if we saw the attribute,
+ // unless it is not a valid location.
+ bool Invalid;
+ StringRef Source =
+ SM.getBufferData(SM.getFileID(Range.getBegin()), &Invalid);
+ if (Invalid)
+ continue;
+ llvm::StringRef BeforeAttr =
+ Source.substr(0, SM.getFileOffset(Range.getBegin()));
+ llvm::StringRef BeforeAttrStripped = BeforeAttr.rtrim();
+
+ for (llvm::StringRef Prefix : {"[[", "__attribute__(("}) {
+ // Handle whitespace between attribute prefix and attribute value.
+ if (BeforeAttrStripped.endswith(Prefix)) {
+ // Move start to start position of prefix, which is
+ // length(BeforeAttr) - length(BeforeAttrStripped) + length(Prefix)
+ // positions to the left.
+ Range.setBegin(Range.getBegin().getLocWithOffset(static_cast<int>(
+ -BeforeAttr.size() + BeforeAttrStripped.size() - Prefix.size())));
+ break;
+ // If we didn't see '[[' or '__attribute' it's probably coming from a
+ // macro expansion which is already handled by makeFileCharRange(),
+ // below.
+ }
+ }
+ }
+
+ // Range.getEnd() is already fully un-expanded by getEntityEndLoc. But,
+ // Range.getBegin() may be inside an expansion.
+ return Lexer::makeFileCharRange(Range, SM, LangOpts);
}
diff --git a/clang/lib/Tooling/Transformer/Stencil.cpp b/clang/lib/Tooling/Transformer/Stencil.cpp
index 8710e3cdf60f..2670bf7adabf 100644
--- a/clang/lib/Tooling/Transformer/Stencil.cpp
+++ b/clang/lib/Tooling/Transformer/Stencil.cpp
@@ -12,12 +12,14 @@
#include "clang/AST/Expr.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Transformer/SourceCode.h"
#include "clang/Tooling/Transformer/SourceCodeBuilders.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
#include <atomic>
#include <memory>
#include <string>
@@ -26,7 +28,6 @@ using namespace clang;
using namespace transformer;
using ast_matchers::MatchFinder;
-using ast_type_traits::DynTypedNode;
using llvm::errc;
using llvm::Error;
using llvm::Expected;
@@ -81,14 +82,14 @@ struct SelectorData {
// A stencil operation to build a member access `e.m` or `e->m`, as appropriate.
struct AccessData {
AccessData(StringRef BaseId, Stencil Member)
- : BaseId(BaseId), Member(std::move(Member)) {}
+ : BaseId(std::string(BaseId)), Member(std::move(Member)) {}
std::string BaseId;
Stencil Member;
};
struct IfBoundData {
IfBoundData(StringRef Id, Stencil TrueStencil, Stencil FalseStencil)
- : Id(Id), TrueStencil(std::move(TrueStencil)),
+ : Id(std::string(Id)), TrueStencil(std::move(TrueStencil)),
FalseStencil(std::move(FalseStencil)) {}
std::string Id;
Stencil TrueStencil;
@@ -227,10 +228,37 @@ Error evalData(const UnaryOperationData &Data,
Error evalData(const SelectorData &Data, const MatchFinder::MatchResult &Match,
std::string *Result) {
- auto Range = Data.Selector(Match);
- if (!Range)
- return Range.takeError();
- *Result += tooling::getText(*Range, *Match.Context);
+ auto RawRange = Data.Selector(Match);
+ if (!RawRange)
+ return RawRange.takeError();
+ CharSourceRange Range = Lexer::makeFileCharRange(
+ *RawRange, *Match.SourceManager, Match.Context->getLangOpts());
+ if (Range.isInvalid()) {
+ // Validate the original range to attempt to get a meaningful error message.
+ // If it's valid, then something else is the cause and we just return the
+ // generic failure message.
+ if (auto Err = tooling::validateEditRange(*RawRange, *Match.SourceManager))
+ return handleErrors(std::move(Err), [](std::unique_ptr<StringError> E) {
+ assert(E->convertToErrorCode() ==
+ llvm::make_error_code(errc::invalid_argument) &&
+ "Validation errors must carry the invalid_argument code");
+ return llvm::createStringError(
+ errc::invalid_argument,
+ "selected range could not be resolved to a valid source range; " +
+ E->getMessage());
+ });
+ return llvm::createStringError(
+ errc::invalid_argument,
+ "selected range could not be resolved to a valid source range");
+ }
+ // Validate `Range`, because `makeFileCharRange` accepts some ranges that
+ // `validateEditRange` rejects.
+ if (auto Err = tooling::validateEditRange(Range, *Match.SourceManager))
+ return joinErrors(
+ llvm::createStringError(errc::invalid_argument,
+ "selected range is not valid for editing"),
+ std::move(Err));
+ *Result += tooling::getText(Range, *Match.Context);
return Error::success();
}
@@ -294,47 +322,41 @@ public:
};
} // namespace
-Stencil transformer::detail::makeStencil(StringRef Text) { return text(Text); }
-
-Stencil transformer::detail::makeStencil(RangeSelector Selector) {
- return selection(std::move(Selector));
+Stencil transformer::detail::makeStencil(StringRef Text) {
+ return std::make_shared<StencilImpl<RawTextData>>(std::string(Text));
}
-Stencil transformer::text(StringRef Text) {
- return std::make_shared<StencilImpl<RawTextData>>(Text);
-}
-
-Stencil transformer::selection(RangeSelector Selector) {
+Stencil transformer::detail::makeStencil(RangeSelector Selector) {
return std::make_shared<StencilImpl<SelectorData>>(std::move(Selector));
}
Stencil transformer::dPrint(StringRef Id) {
- return std::make_shared<StencilImpl<DebugPrintNodeData>>(Id);
+ return std::make_shared<StencilImpl<DebugPrintNodeData>>(std::string(Id));
}
Stencil transformer::expression(llvm::StringRef Id) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::Parens, Id);
+ UnaryNodeOperator::Parens, std::string(Id));
}
Stencil transformer::deref(llvm::StringRef ExprId) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::Deref, ExprId);
+ UnaryNodeOperator::Deref, std::string(ExprId));
}
Stencil transformer::maybeDeref(llvm::StringRef ExprId) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::MaybeDeref, ExprId);
+ UnaryNodeOperator::MaybeDeref, std::string(ExprId));
}
Stencil transformer::addressOf(llvm::StringRef ExprId) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::AddressOf, ExprId);
+ UnaryNodeOperator::AddressOf, std::string(ExprId));
}
Stencil transformer::maybeAddressOf(llvm::StringRef ExprId) {
return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::MaybeAddressOf, ExprId);
+ UnaryNodeOperator::MaybeAddressOf, std::string(ExprId));
}
Stencil transformer::access(StringRef BaseId, Stencil Member) {
diff --git a/clang/lib/Tooling/Transformer/Transformer.cpp b/clang/lib/Tooling/Transformer/Transformer.cpp
index 71f0646f4c0e..e8fc00c4e953 100644
--- a/clang/lib/Tooling/Transformer/Transformer.cpp
+++ b/clang/lib/Tooling/Transformer/Transformer.cpp
@@ -12,6 +12,7 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Tooling/Refactoring/AtomicChange.h"
#include "llvm/Support/Error.h"
+#include <map>
#include <utility>
#include <vector>
@@ -31,7 +32,7 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
transformer::RewriteRule::Case Case =
transformer::detail::findSelectedCase(Result, Rule);
- auto Transformations = transformer::detail::translateEdits(Result, Case.Edits);
+ auto Transformations = Case.Edits(Result);
if (!Transformations) {
Consumer(Transformations.takeError());
return;
@@ -45,28 +46,39 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
return;
}
- // Record the results in the AtomicChange, anchored at the location of the
- // first change.
- AtomicChange AC(*Result.SourceManager,
- (*Transformations)[0].Range.getBegin());
+ // Group the transformations, by file, into AtomicChanges, each anchored by
+ // the location of the first change in that file.
+ std::map<FileID, AtomicChange> ChangesByFileID;
for (const auto &T : *Transformations) {
+ auto ID = Result.SourceManager->getFileID(T.Range.getBegin());
+ auto Iter = ChangesByFileID
+ .emplace(ID, AtomicChange(*Result.SourceManager,
+ T.Range.getBegin(), T.Metadata))
+ .first;
+ auto &AC = Iter->second;
if (auto Err = AC.replace(*Result.SourceManager, T.Range, T.Replacement)) {
Consumer(std::move(Err));
return;
}
}
- for (const auto &I : Case.AddedIncludes) {
- auto &Header = I.first;
- switch (I.second) {
- case transformer::IncludeFormat::Quoted:
- AC.addHeader(Header);
- break;
- case transformer::IncludeFormat::Angled:
- AC.addHeader((llvm::Twine("<") + Header + ">").str());
- break;
+ for (auto &IDChangePair : ChangesByFileID) {
+ auto &AC = IDChangePair.second;
+ // FIXME: this will add includes to *all* changed files, which may not be
+ // the intent. We should upgrade the representation to allow associating
+ // headers with specific edits.
+ for (const auto &I : Case.AddedIncludes) {
+ auto &Header = I.first;
+ switch (I.second) {
+ case transformer::IncludeFormat::Quoted:
+ AC.addHeader(Header);
+ break;
+ case transformer::IncludeFormat::Angled:
+ AC.addHeader((llvm::Twine("<") + Header + ">").str());
+ break;
+ }
}
- }
- Consumer(std::move(AC));
+ Consumer(std::move(AC));
+ }
}